unsigned long subbuf_size, mmap_offset = 0;
unsigned long num_subbuf_alloc;
struct page **pages;
- void **virt;
unsigned long i;
num_pages = size >> PAGE_SHIFT;
if (unlikely(!pages))
goto pages_error;
- virt = kmalloc_node(ALIGN(sizeof(*virt) * num_pages,
- 1 << INTERNODE_CACHE_SHIFT),
- GFP_KERNEL, cpu_to_node(max(bufb->cpu, 0)));
- if (unlikely(!virt))
- goto virt_error;
-
bufb->array = kmalloc_node(ALIGN(sizeof(*bufb->array)
* num_subbuf_alloc,
1 << INTERNODE_CACHE_SHIFT),
GFP_KERNEL | __GFP_ZERO, 0);
if (unlikely(!pages[i]))
goto depopulate;
- virt[i] = page_address(pages[i]);
}
bufb->num_pages_per_subbuf = num_pages_per_subbuf;
for (i = 0; i < num_subbuf_alloc; i++) {
for (j = 0; j < num_pages_per_subbuf; j++) {
CHAN_WARN_ON(chanb, page_idx > num_pages);
- bufb->array[i]->p[j].virt = virt[page_idx];
- bufb->array[i]->p[j].page = pages[page_idx];
+ bufb->array[i]->p[j].virt = page_address(pages[page_idx]);
+ bufb->array[i]->p[j].pfn = page_to_pfn(pages[page_idx]);
page_idx++;
}
if (config->output == RING_BUFFER_MMAP) {
* will not fault.
*/
wrapper_vmalloc_sync_all();
- kfree(virt);
kfree(pages);
return 0;
__free_page(pages[i]);
kfree(bufb->array);
array_error:
- kfree(virt);
-virt_error:
kfree(pages);
pages_error:
return -ENOMEM;
kfree(bufb->buf_cnt);
for (i = 0; i < num_subbuf_alloc; i++) {
for (j = 0; j < bufb->num_pages_per_subbuf; j++)
- __free_page(bufb->array[i]->p[j].page);
+ __free_page(pfn_to_page(bufb->array[i]->p[j].pfn));
kfree(bufb->array[i]);
}
kfree(bufb->array);
EXPORT_SYMBOL_GPL(lib_ring_buffer_read_cstr);
/**
- * lib_ring_buffer_read_get_page - Get a whole page to read from
+ * lib_ring_buffer_read_get_pfn - Get a page frame number to read from
* @bufb : buffer backend
* @offset : offset within the buffer
* @virt : pointer to page address (output)
*
* Should be protected by get_subbuf/put_subbuf.
- * Returns the pointer to the page struct pointer.
+ * Returns the pointer to the page frame number unsigned long.
*/
-struct page **lib_ring_buffer_read_get_page(struct lib_ring_buffer_backend *bufb,
+unsigned long *lib_ring_buffer_read_get_pfn(struct lib_ring_buffer_backend *bufb,
size_t offset, void ***virt)
{
size_t index;
CHAN_WARN_ON(chanb, config->mode == RING_BUFFER_OVERWRITE
&& subbuffer_id_is_noref(config, id));
*virt = &rpages->p[index].virt;
- return &rpages->p[index].page;
+ return &rpages->p[index].pfn;
}
-EXPORT_SYMBOL_GPL(lib_ring_buffer_read_get_page);
+EXPORT_SYMBOL_GPL(lib_ring_buffer_read_get_pfn);
/**
* lib_ring_buffer_read_offset_address - get address of a buffer location
struct channel *chan = buf->backend.chan;
const struct lib_ring_buffer_config *config = &chan->backend.config;
pgoff_t pgoff = vmf->pgoff;
- struct page **page;
+ unsigned long *pfnp;
void **virt;
unsigned long offset, sb_bindex;
buf->backend.chan->backend.subbuf_size))
return VM_FAULT_SIGBUS;
/*
- * ring_buffer_read_get_page() gets the page in the current reader's
- * pages.
+ * ring_buffer_read_get_pfn() gets the page frame number for the
+ * current reader's pages.
*/
- page = lib_ring_buffer_read_get_page(&buf->backend, offset, &virt);
- if (!*page)
+ pfnp = lib_ring_buffer_read_get_pfn(&buf->backend, offset, &virt);
+ if (!*pfnp)
return VM_FAULT_SIGBUS;
- get_page(*page);
- vmf->page = *page;
+ get_page(pfn_to_page(*pfnp));
+ vmf->page = pfn_to_page(*pfnp);
return 0;
}
for (; spd.nr_pages < nr_pages; spd.nr_pages++) {
unsigned int this_len;
- struct page **page, *new_page;
+ unsigned long *pfnp, new_pfn;
+ struct page *new_page;
void **virt;
if (!len)
GFP_KERNEL | __GFP_ZERO, 0);
if (!new_page)
break;
-
+ new_pfn = page_to_pfn(new_page);
this_len = PAGE_SIZE - poff;
- page = lib_ring_buffer_read_get_page(&buf->backend, roffset, &virt);
- spd.pages[spd.nr_pages] = *page;
- *page = new_page;
+ pfnp = lib_ring_buffer_read_get_pfn(&buf->backend, roffset, &virt);
+ spd.pages[spd.nr_pages] = pfn_to_page(*pfnp);
+ *pfnp = new_pfn;
*virt = page_address(new_page);
spd.partial[spd.nr_pages].offset = poff;
spd.partial[spd.nr_pages].len = this_len;