Commit | Line | Data |
---|---|---|
9f36eaed MJ |
1 | /* SPDX-License-Identifier: (GPL-2.0 OR LGPL-2.1) |
2 | * | |
f3bc08c5 MD |
3 | * ring_buffer_backend.c |
4 | * | |
886d51a3 | 5 | * Copyright (C) 2005-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com> |
f3bc08c5 MD |
6 | */ |
7 | ||
f3bc08c5 MD |
8 | #include <linux/stddef.h> |
9 | #include <linux/module.h> | |
10 | #include <linux/string.h> | |
11 | #include <linux/bitops.h> | |
12 | #include <linux/delay.h> | |
13 | #include <linux/errno.h> | |
14 | #include <linux/slab.h> | |
15 | #include <linux/cpu.h> | |
16 | #include <linux/mm.h> | |
df388b78 | 17 | #include <linux/vmalloc.h> |
f3bc08c5 | 18 | |
c075712b MD |
19 | #include <wrapper/vmalloc.h> /* for wrapper_vmalloc_sync_all() */ |
20 | #include <wrapper/ringbuffer/config.h> | |
21 | #include <wrapper/ringbuffer/backend.h> | |
22 | #include <wrapper/ringbuffer/frontend.h> | |
f3bc08c5 MD |
23 | |
24 | /** | |
25 | * lib_ring_buffer_backend_allocate - allocate a channel buffer | |
26 | * @config: ring buffer instance configuration | |
27 | * @buf: the buffer struct | |
28 | * @size: total size of the buffer | |
29 | * @num_subbuf: number of subbuffers | |
30 | * @extra_reader_sb: need extra subbuffer for reader | |
31 | */ | |
32 | static | |
33 | int lib_ring_buffer_backend_allocate(const struct lib_ring_buffer_config *config, | |
34 | struct lib_ring_buffer_backend *bufb, | |
35 | size_t size, size_t num_subbuf, | |
36 | int extra_reader_sb) | |
37 | { | |
38 | struct channel_backend *chanb = &bufb->chan->backend; | |
39 | unsigned long j, num_pages, num_pages_per_subbuf, page_idx = 0; | |
40 | unsigned long subbuf_size, mmap_offset = 0; | |
41 | unsigned long num_subbuf_alloc; | |
42 | struct page **pages; | |
f3bc08c5 MD |
43 | unsigned long i; |
44 | ||
45 | num_pages = size >> PAGE_SHIFT; | |
46 | num_pages_per_subbuf = num_pages >> get_count_order(num_subbuf); | |
47 | subbuf_size = chanb->subbuf_size; | |
48 | num_subbuf_alloc = num_subbuf; | |
49 | ||
50 | if (extra_reader_sb) { | |
51 | num_pages += num_pages_per_subbuf; /* Add pages for reader */ | |
52 | num_subbuf_alloc++; | |
53 | } | |
54 | ||
df388b78 | 55 | pages = vmalloc_node(ALIGN(sizeof(*pages) * num_pages, |
f3bc08c5 | 56 | 1 << INTERNODE_CACHE_SHIFT), |
df388b78 | 57 | cpu_to_node(max(bufb->cpu, 0))); |
f3bc08c5 MD |
58 | if (unlikely(!pages)) |
59 | goto pages_error; | |
60 | ||
48f5e0b5 | 61 | bufb->array = lttng_kvmalloc_node(ALIGN(sizeof(*bufb->array) |
f3bc08c5 MD |
62 | * num_subbuf_alloc, |
63 | 1 << INTERNODE_CACHE_SHIFT), | |
df388b78 MD |
64 | GFP_KERNEL | __GFP_NOWARN, |
65 | cpu_to_node(max(bufb->cpu, 0))); | |
f3bc08c5 MD |
66 | if (unlikely(!bufb->array)) |
67 | goto array_error; | |
68 | ||
69 | for (i = 0; i < num_pages; i++) { | |
70 | pages[i] = alloc_pages_node(cpu_to_node(max(bufb->cpu, 0)), | |
df388b78 | 71 | GFP_KERNEL | __GFP_NOWARN | __GFP_ZERO, 0); |
f3bc08c5 MD |
72 | if (unlikely(!pages[i])) |
73 | goto depopulate; | |
f3bc08c5 MD |
74 | } |
75 | bufb->num_pages_per_subbuf = num_pages_per_subbuf; | |
76 | ||
77 | /* Allocate backend pages array elements */ | |
78 | for (i = 0; i < num_subbuf_alloc; i++) { | |
79 | bufb->array[i] = | |
48f5e0b5 | 80 | lttng_kvzalloc_node(ALIGN( |
f3bc08c5 MD |
81 | sizeof(struct lib_ring_buffer_backend_pages) + |
82 | sizeof(struct lib_ring_buffer_backend_page) | |
83 | * num_pages_per_subbuf, | |
84 | 1 << INTERNODE_CACHE_SHIFT), | |
df388b78 MD |
85 | GFP_KERNEL | __GFP_NOWARN, |
86 | cpu_to_node(max(bufb->cpu, 0))); | |
f3bc08c5 MD |
87 | if (!bufb->array[i]) |
88 | goto free_array; | |
89 | } | |
90 | ||
91 | /* Allocate write-side subbuffer table */ | |
48f5e0b5 | 92 | bufb->buf_wsb = lttng_kvzalloc_node(ALIGN( |
f3bc08c5 MD |
93 | sizeof(struct lib_ring_buffer_backend_subbuffer) |
94 | * num_subbuf, | |
95 | 1 << INTERNODE_CACHE_SHIFT), | |
df388b78 MD |
96 | GFP_KERNEL | __GFP_NOWARN, |
97 | cpu_to_node(max(bufb->cpu, 0))); | |
f3bc08c5 MD |
98 | if (unlikely(!bufb->buf_wsb)) |
99 | goto free_array; | |
100 | ||
101 | for (i = 0; i < num_subbuf; i++) | |
102 | bufb->buf_wsb[i].id = subbuffer_id(config, 0, 1, i); | |
103 | ||
104 | /* Assign read-side subbuffer table */ | |
105 | if (extra_reader_sb) | |
106 | bufb->buf_rsb.id = subbuffer_id(config, 0, 1, | |
107 | num_subbuf_alloc - 1); | |
108 | else | |
109 | bufb->buf_rsb.id = subbuffer_id(config, 0, 1, 0); | |
110 | ||
5b3cf4f9 | 111 | /* Allocate subbuffer packet counter table */ |
48f5e0b5 | 112 | bufb->buf_cnt = lttng_kvzalloc_node(ALIGN( |
5b3cf4f9 JD |
113 | sizeof(struct lib_ring_buffer_backend_counts) |
114 | * num_subbuf, | |
115 | 1 << INTERNODE_CACHE_SHIFT), | |
df388b78 MD |
116 | GFP_KERNEL | __GFP_NOWARN, |
117 | cpu_to_node(max(bufb->cpu, 0))); | |
5b3cf4f9 JD |
118 | if (unlikely(!bufb->buf_cnt)) |
119 | goto free_wsb; | |
120 | ||
f3bc08c5 MD |
121 | /* Assign pages to page index */ |
122 | for (i = 0; i < num_subbuf_alloc; i++) { | |
123 | for (j = 0; j < num_pages_per_subbuf; j++) { | |
124 | CHAN_WARN_ON(chanb, page_idx > num_pages); | |
0112cb7b MD |
125 | bufb->array[i]->p[j].virt = page_address(pages[page_idx]); |
126 | bufb->array[i]->p[j].pfn = page_to_pfn(pages[page_idx]); | |
f3bc08c5 MD |
127 | page_idx++; |
128 | } | |
129 | if (config->output == RING_BUFFER_MMAP) { | |
130 | bufb->array[i]->mmap_offset = mmap_offset; | |
131 | mmap_offset += subbuf_size; | |
132 | } | |
133 | } | |
134 | ||
135 | /* | |
136 | * If kmalloc ever uses vmalloc underneath, make sure the buffer pages | |
137 | * will not fault. | |
138 | */ | |
6d2a620c | 139 | wrapper_vmalloc_sync_all(); |
df388b78 | 140 | vfree(pages); |
f3bc08c5 MD |
141 | return 0; |
142 | ||
5b3cf4f9 | 143 | free_wsb: |
48f5e0b5 | 144 | lttng_kvfree(bufb->buf_wsb); |
f3bc08c5 MD |
145 | free_array: |
146 | for (i = 0; (i < num_subbuf_alloc && bufb->array[i]); i++) | |
48f5e0b5 | 147 | lttng_kvfree(bufb->array[i]); |
f3bc08c5 MD |
148 | depopulate: |
149 | /* Free all allocated pages */ | |
150 | for (i = 0; (i < num_pages && pages[i]); i++) | |
151 | __free_page(pages[i]); | |
48f5e0b5 | 152 | lttng_kvfree(bufb->array); |
f3bc08c5 | 153 | array_error: |
df388b78 | 154 | vfree(pages); |
f3bc08c5 MD |
155 | pages_error: |
156 | return -ENOMEM; | |
157 | } | |
158 | ||
159 | int lib_ring_buffer_backend_create(struct lib_ring_buffer_backend *bufb, | |
160 | struct channel_backend *chanb, int cpu) | |
161 | { | |
5a8fd222 | 162 | const struct lib_ring_buffer_config *config = &chanb->config; |
f3bc08c5 MD |
163 | |
164 | bufb->chan = container_of(chanb, struct channel, backend); | |
165 | bufb->cpu = cpu; | |
166 | ||
167 | return lib_ring_buffer_backend_allocate(config, bufb, chanb->buf_size, | |
168 | chanb->num_subbuf, | |
169 | chanb->extra_reader_sb); | |
170 | } | |
171 | ||
172 | void lib_ring_buffer_backend_free(struct lib_ring_buffer_backend *bufb) | |
173 | { | |
174 | struct channel_backend *chanb = &bufb->chan->backend; | |
175 | unsigned long i, j, num_subbuf_alloc; | |
176 | ||
177 | num_subbuf_alloc = chanb->num_subbuf; | |
178 | if (chanb->extra_reader_sb) | |
179 | num_subbuf_alloc++; | |
180 | ||
48f5e0b5 MJ |
181 | lttng_kvfree(bufb->buf_wsb); |
182 | lttng_kvfree(bufb->buf_cnt); | |
f3bc08c5 MD |
183 | for (i = 0; i < num_subbuf_alloc; i++) { |
184 | for (j = 0; j < bufb->num_pages_per_subbuf; j++) | |
0112cb7b | 185 | __free_page(pfn_to_page(bufb->array[i]->p[j].pfn)); |
48f5e0b5 | 186 | lttng_kvfree(bufb->array[i]); |
f3bc08c5 | 187 | } |
48f5e0b5 | 188 | lttng_kvfree(bufb->array); |
f3bc08c5 MD |
189 | bufb->allocated = 0; |
190 | } | |
191 | ||
192 | void lib_ring_buffer_backend_reset(struct lib_ring_buffer_backend *bufb) | |
193 | { | |
194 | struct channel_backend *chanb = &bufb->chan->backend; | |
5a8fd222 | 195 | const struct lib_ring_buffer_config *config = &chanb->config; |
f3bc08c5 MD |
196 | unsigned long num_subbuf_alloc; |
197 | unsigned int i; | |
198 | ||
199 | num_subbuf_alloc = chanb->num_subbuf; | |
200 | if (chanb->extra_reader_sb) | |
201 | num_subbuf_alloc++; | |
202 | ||
203 | for (i = 0; i < chanb->num_subbuf; i++) | |
204 | bufb->buf_wsb[i].id = subbuffer_id(config, 0, 1, i); | |
205 | if (chanb->extra_reader_sb) | |
206 | bufb->buf_rsb.id = subbuffer_id(config, 0, 1, | |
207 | num_subbuf_alloc - 1); | |
208 | else | |
209 | bufb->buf_rsb.id = subbuffer_id(config, 0, 1, 0); | |
210 | ||
211 | for (i = 0; i < num_subbuf_alloc; i++) { | |
212 | /* Don't reset mmap_offset */ | |
213 | v_set(config, &bufb->array[i]->records_commit, 0); | |
214 | v_set(config, &bufb->array[i]->records_unread, 0); | |
215 | bufb->array[i]->data_size = 0; | |
216 | /* Don't reset backend page and virt addresses */ | |
217 | } | |
218 | /* Don't reset num_pages_per_subbuf, cpu, allocated */ | |
219 | v_set(config, &bufb->records_read, 0); | |
220 | } | |
221 | ||
222 | /* | |
223 | * The frontend is responsible for also calling ring_buffer_backend_reset for | |
224 | * each buffer when calling channel_backend_reset. | |
225 | */ | |
226 | void channel_backend_reset(struct channel_backend *chanb) | |
227 | { | |
228 | struct channel *chan = container_of(chanb, struct channel, backend); | |
5a8fd222 | 229 | const struct lib_ring_buffer_config *config = &chanb->config; |
f3bc08c5 MD |
230 | |
231 | /* | |
232 | * Don't reset buf_size, subbuf_size, subbuf_size_order, | |
233 | * num_subbuf_order, buf_size_order, extra_reader_sb, num_subbuf, | |
234 | * priv, notifiers, config, cpumask and name. | |
235 | */ | |
236 | chanb->start_tsc = config->cb.ring_buffer_clock_read(chan); | |
237 | } | |
238 | ||
1e367326 MD |
239 | #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) |
240 | ||
241 | /* | |
242 | * No need to implement a "dead" callback to do a buffer switch here, | |
243 | * because it will happen when tracing is stopped, or will be done by | |
244 | * switch timer CPU DEAD callback. | |
245 | * We don't free buffers when CPU go away, because it would make trace | |
246 | * data vanish, which is unwanted. | |
247 | */ | |
248 | int lttng_cpuhp_rb_backend_prepare(unsigned int cpu, | |
249 | struct lttng_cpuhp_node *node) | |
250 | { | |
251 | struct channel_backend *chanb = container_of(node, | |
252 | struct channel_backend, cpuhp_prepare); | |
253 | const struct lib_ring_buffer_config *config = &chanb->config; | |
254 | struct lib_ring_buffer *buf; | |
255 | int ret; | |
256 | ||
257 | CHAN_WARN_ON(chanb, config->alloc == RING_BUFFER_ALLOC_GLOBAL); | |
258 | ||
259 | buf = per_cpu_ptr(chanb->buf, cpu); | |
260 | ret = lib_ring_buffer_create(buf, chanb, cpu); | |
261 | if (ret) { | |
262 | printk(KERN_ERR | |
263 | "ring_buffer_cpu_hp_callback: cpu %d " | |
264 | "buffer creation failed\n", cpu); | |
265 | return ret; | |
266 | } | |
267 | return 0; | |
268 | } | |
269 | EXPORT_SYMBOL_GPL(lttng_cpuhp_rb_backend_prepare); | |
270 | ||
271 | #else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */ | |
272 | ||
f3bc08c5 | 273 | #ifdef CONFIG_HOTPLUG_CPU |
1e367326 | 274 | |
f3bc08c5 MD |
275 | /** |
276 | * lib_ring_buffer_cpu_hp_callback - CPU hotplug callback | |
277 | * @nb: notifier block | |
278 | * @action: hotplug action to take | |
279 | * @hcpu: CPU number | |
280 | * | |
281 | * Returns the success/failure of the operation. (%NOTIFY_OK, %NOTIFY_BAD) | |
282 | */ | |
283 | static | |
e8f071d5 | 284 | int lib_ring_buffer_cpu_hp_callback(struct notifier_block *nb, |
f3bc08c5 MD |
285 | unsigned long action, |
286 | void *hcpu) | |
287 | { | |
288 | unsigned int cpu = (unsigned long)hcpu; | |
289 | struct channel_backend *chanb = container_of(nb, struct channel_backend, | |
290 | cpu_hp_notifier); | |
5a8fd222 | 291 | const struct lib_ring_buffer_config *config = &chanb->config; |
f3bc08c5 MD |
292 | struct lib_ring_buffer *buf; |
293 | int ret; | |
294 | ||
295 | CHAN_WARN_ON(chanb, config->alloc == RING_BUFFER_ALLOC_GLOBAL); | |
296 | ||
297 | switch (action) { | |
298 | case CPU_UP_PREPARE: | |
299 | case CPU_UP_PREPARE_FROZEN: | |
300 | buf = per_cpu_ptr(chanb->buf, cpu); | |
301 | ret = lib_ring_buffer_create(buf, chanb, cpu); | |
302 | if (ret) { | |
303 | printk(KERN_ERR | |
304 | "ring_buffer_cpu_hp_callback: cpu %d " | |
305 | "buffer creation failed\n", cpu); | |
306 | return NOTIFY_BAD; | |
307 | } | |
308 | break; | |
309 | case CPU_DEAD: | |
310 | case CPU_DEAD_FROZEN: | |
311 | /* No need to do a buffer switch here, because it will happen | |
312 | * when tracing is stopped, or will be done by switch timer CPU | |
313 | * DEAD callback. */ | |
314 | break; | |
315 | } | |
316 | return NOTIFY_OK; | |
317 | } | |
1e367326 | 318 | |
f3bc08c5 MD |
319 | #endif |
320 | ||
1e367326 MD |
321 | #endif /* #else #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */ |
322 | ||
f3bc08c5 MD |
323 | /** |
324 | * channel_backend_init - initialize a channel backend | |
325 | * @chanb: channel backend | |
326 | * @name: channel name | |
327 | * @config: client ring buffer configuration | |
328 | * @priv: client private data | |
329 | * @parent: dentry of parent directory, %NULL for root directory | |
330 | * @subbuf_size: size of sub-buffers (> PAGE_SIZE, power of 2) | |
331 | * @num_subbuf: number of sub-buffers (power of 2) | |
332 | * | |
333 | * Returns channel pointer if successful, %NULL otherwise. | |
334 | * | |
335 | * Creates per-cpu channel buffers using the sizes and attributes | |
336 | * specified. The created channel buffer files will be named | |
337 | * name_0...name_N-1. File permissions will be %S_IRUSR. | |
338 | * | |
339 | * Called with CPU hotplug disabled. | |
340 | */ | |
341 | int channel_backend_init(struct channel_backend *chanb, | |
342 | const char *name, | |
343 | const struct lib_ring_buffer_config *config, | |
344 | void *priv, size_t subbuf_size, size_t num_subbuf) | |
345 | { | |
346 | struct channel *chan = container_of(chanb, struct channel, backend); | |
347 | unsigned int i; | |
348 | int ret; | |
349 | ||
350 | if (!name) | |
351 | return -EPERM; | |
352 | ||
f3bc08c5 | 353 | /* Check that the subbuffer size is larger than a page. */ |
2fb46300 MD |
354 | if (subbuf_size < PAGE_SIZE) |
355 | return -EINVAL; | |
f3bc08c5 MD |
356 | |
357 | /* | |
bbda3a00 MD |
358 | * Make sure the number of subbuffers and subbuffer size are |
359 | * power of 2 and nonzero. | |
f3bc08c5 | 360 | */ |
bbda3a00 | 361 | if (!subbuf_size || (subbuf_size & (subbuf_size - 1))) |
863497fa | 362 | return -EINVAL; |
bbda3a00 | 363 | if (!num_subbuf || (num_subbuf & (num_subbuf - 1))) |
863497fa | 364 | return -EINVAL; |
5140d2b3 MD |
365 | /* |
366 | * Overwrite mode buffers require at least 2 subbuffers per | |
367 | * buffer. | |
368 | */ | |
369 | if (config->mode == RING_BUFFER_OVERWRITE && num_subbuf < 2) | |
370 | return -EINVAL; | |
f3bc08c5 MD |
371 | |
372 | ret = subbuffer_id_check_index(config, num_subbuf); | |
373 | if (ret) | |
374 | return ret; | |
375 | ||
376 | chanb->priv = priv; | |
377 | chanb->buf_size = num_subbuf * subbuf_size; | |
378 | chanb->subbuf_size = subbuf_size; | |
379 | chanb->buf_size_order = get_count_order(chanb->buf_size); | |
380 | chanb->subbuf_size_order = get_count_order(subbuf_size); | |
381 | chanb->num_subbuf_order = get_count_order(num_subbuf); | |
382 | chanb->extra_reader_sb = | |
383 | (config->mode == RING_BUFFER_OVERWRITE) ? 1 : 0; | |
384 | chanb->num_subbuf = num_subbuf; | |
385 | strlcpy(chanb->name, name, NAME_MAX); | |
5a8fd222 | 386 | memcpy(&chanb->config, config, sizeof(chanb->config)); |
f3bc08c5 MD |
387 | |
388 | if (config->alloc == RING_BUFFER_ALLOC_PER_CPU) { | |
389 | if (!zalloc_cpumask_var(&chanb->cpumask, GFP_KERNEL)) | |
390 | return -ENOMEM; | |
391 | } | |
392 | ||
393 | if (config->alloc == RING_BUFFER_ALLOC_PER_CPU) { | |
394 | /* Allocating the buffer per-cpu structures */ | |
395 | chanb->buf = alloc_percpu(struct lib_ring_buffer); | |
396 | if (!chanb->buf) | |
397 | goto free_cpumask; | |
398 | ||
1e367326 MD |
399 | #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) |
400 | chanb->cpuhp_prepare.component = LTTNG_RING_BUFFER_BACKEND; | |
401 | ret = cpuhp_state_add_instance(lttng_rb_hp_prepare, | |
402 | &chanb->cpuhp_prepare.node); | |
403 | if (ret) | |
404 | goto free_bufs; | |
405 | #else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */ | |
406 | ||
407 | { | |
408 | /* | |
409 | * In case of non-hotplug cpu, if the ring-buffer is allocated | |
410 | * in early initcall, it will not be notified of secondary cpus. | |
411 | * In that off case, we need to allocate for all possible cpus. | |
412 | */ | |
f3bc08c5 | 413 | #ifdef CONFIG_HOTPLUG_CPU |
1e367326 MD |
414 | /* |
415 | * buf->backend.allocated test takes care of concurrent CPU | |
416 | * hotplug. | |
417 | * Priority higher than frontend, so we create the ring buffer | |
418 | * before we start the timer. | |
419 | */ | |
420 | chanb->cpu_hp_notifier.notifier_call = | |
421 | lib_ring_buffer_cpu_hp_callback; | |
422 | chanb->cpu_hp_notifier.priority = 5; | |
423 | register_hotcpu_notifier(&chanb->cpu_hp_notifier); | |
424 | ||
425 | get_online_cpus(); | |
426 | for_each_online_cpu(i) { | |
427 | ret = lib_ring_buffer_create(per_cpu_ptr(chanb->buf, i), | |
428 | chanb, i); | |
429 | if (ret) | |
430 | goto free_bufs; /* cpu hotplug locked */ | |
431 | } | |
432 | put_online_cpus(); | |
f3bc08c5 | 433 | #else |
1e367326 MD |
434 | for_each_possible_cpu(i) { |
435 | ret = lib_ring_buffer_create(per_cpu_ptr(chanb->buf, i), | |
436 | chanb, i); | |
437 | if (ret) | |
438 | goto free_bufs; | |
439 | } | |
f3bc08c5 | 440 | #endif |
1e367326 MD |
441 | } |
442 | #endif /* #else #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */ | |
f3bc08c5 MD |
443 | } else { |
444 | chanb->buf = kzalloc(sizeof(struct lib_ring_buffer), GFP_KERNEL); | |
445 | if (!chanb->buf) | |
446 | goto free_cpumask; | |
447 | ret = lib_ring_buffer_create(chanb->buf, chanb, -1); | |
448 | if (ret) | |
449 | goto free_bufs; | |
450 | } | |
451 | chanb->start_tsc = config->cb.ring_buffer_clock_read(chan); | |
452 | ||
453 | return 0; | |
454 | ||
455 | free_bufs: | |
456 | if (config->alloc == RING_BUFFER_ALLOC_PER_CPU) { | |
1e367326 MD |
457 | #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) |
458 | ret = cpuhp_state_remove_instance(lttng_rb_hp_prepare, | |
459 | &chanb->cpuhp_prepare.node); | |
460 | WARN_ON(ret); | |
461 | #else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */ | |
462 | #ifdef CONFIG_HOTPLUG_CPU | |
463 | put_online_cpus(); | |
464 | unregister_hotcpu_notifier(&chanb->cpu_hp_notifier); | |
465 | #endif | |
466 | #endif /* #else #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */ | |
f3bc08c5 | 467 | for_each_possible_cpu(i) { |
1e367326 MD |
468 | struct lib_ring_buffer *buf = |
469 | per_cpu_ptr(chanb->buf, i); | |
f3bc08c5 MD |
470 | |
471 | if (!buf->backend.allocated) | |
472 | continue; | |
473 | lib_ring_buffer_free(buf); | |
474 | } | |
f3bc08c5 MD |
475 | free_percpu(chanb->buf); |
476 | } else | |
477 | kfree(chanb->buf); | |
478 | free_cpumask: | |
479 | if (config->alloc == RING_BUFFER_ALLOC_PER_CPU) | |
480 | free_cpumask_var(chanb->cpumask); | |
481 | return -ENOMEM; | |
482 | } | |
483 | ||
484 | /** | |
485 | * channel_backend_unregister_notifiers - unregister notifiers | |
486 | * @chan: the channel | |
487 | * | |
488 | * Holds CPU hotplug. | |
489 | */ | |
490 | void channel_backend_unregister_notifiers(struct channel_backend *chanb) | |
491 | { | |
5a8fd222 | 492 | const struct lib_ring_buffer_config *config = &chanb->config; |
f3bc08c5 | 493 | |
1e367326 MD |
494 | if (config->alloc == RING_BUFFER_ALLOC_PER_CPU) { |
495 | #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) | |
496 | int ret; | |
497 | ||
498 | ret = cpuhp_state_remove_instance(lttng_rb_hp_prepare, | |
499 | &chanb->cpuhp_prepare.node); | |
500 | WARN_ON(ret); | |
501 | #else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */ | |
f3bc08c5 | 502 | unregister_hotcpu_notifier(&chanb->cpu_hp_notifier); |
1e367326 MD |
503 | #endif /* #else #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */ |
504 | } | |
f3bc08c5 MD |
505 | } |
506 | ||
507 | /** | |
508 | * channel_backend_free - destroy the channel | |
509 | * @chan: the channel | |
510 | * | |
511 | * Destroy all channel buffers and frees the channel. | |
512 | */ | |
513 | void channel_backend_free(struct channel_backend *chanb) | |
514 | { | |
5a8fd222 | 515 | const struct lib_ring_buffer_config *config = &chanb->config; |
f3bc08c5 MD |
516 | unsigned int i; |
517 | ||
518 | if (config->alloc == RING_BUFFER_ALLOC_PER_CPU) { | |
519 | for_each_possible_cpu(i) { | |
520 | struct lib_ring_buffer *buf = per_cpu_ptr(chanb->buf, i); | |
521 | ||
522 | if (!buf->backend.allocated) | |
523 | continue; | |
524 | lib_ring_buffer_free(buf); | |
525 | } | |
526 | free_cpumask_var(chanb->cpumask); | |
527 | free_percpu(chanb->buf); | |
528 | } else { | |
529 | struct lib_ring_buffer *buf = chanb->buf; | |
530 | ||
531 | CHAN_WARN_ON(chanb, !buf->backend.allocated); | |
532 | lib_ring_buffer_free(buf); | |
533 | kfree(buf); | |
534 | } | |
535 | } | |
536 | ||
537 | /** | |
538 | * lib_ring_buffer_write - write data to a ring_buffer buffer. | |
539 | * @bufb : buffer backend | |
540 | * @offset : offset within the buffer | |
541 | * @src : source address | |
542 | * @len : length to write | |
543 | * @pagecpy : page size copied so far | |
544 | */ | |
545 | void _lib_ring_buffer_write(struct lib_ring_buffer_backend *bufb, size_t offset, | |
bfe529f9 | 546 | const void *src, size_t len, size_t pagecpy) |
f3bc08c5 MD |
547 | { |
548 | struct channel_backend *chanb = &bufb->chan->backend; | |
5a8fd222 | 549 | const struct lib_ring_buffer_config *config = &chanb->config; |
f3bc08c5 MD |
550 | size_t sbidx, index; |
551 | struct lib_ring_buffer_backend_pages *rpages; | |
552 | unsigned long sb_bindex, id; | |
553 | ||
554 | do { | |
555 | len -= pagecpy; | |
556 | src += pagecpy; | |
557 | offset += pagecpy; | |
558 | sbidx = offset >> chanb->subbuf_size_order; | |
559 | index = (offset & (chanb->subbuf_size - 1)) >> PAGE_SHIFT; | |
560 | ||
561 | /* | |
562 | * Underlying layer should never ask for writes across | |
563 | * subbuffers. | |
564 | */ | |
565 | CHAN_WARN_ON(chanb, offset >= chanb->buf_size); | |
566 | ||
567 | pagecpy = min_t(size_t, len, PAGE_SIZE - (offset & ~PAGE_MASK)); | |
568 | id = bufb->buf_wsb[sbidx].id; | |
569 | sb_bindex = subbuffer_id_get_index(config, id); | |
570 | rpages = bufb->array[sb_bindex]; | |
571 | CHAN_WARN_ON(chanb, config->mode == RING_BUFFER_OVERWRITE | |
572 | && subbuffer_id_is_noref(config, id)); | |
573 | lib_ring_buffer_do_copy(config, | |
574 | rpages->p[index].virt | |
575 | + (offset & ~PAGE_MASK), | |
576 | src, pagecpy); | |
577 | } while (unlikely(len != pagecpy)); | |
578 | } | |
579 | EXPORT_SYMBOL_GPL(_lib_ring_buffer_write); | |
580 | ||
4ea00e4f JD |
581 | |
582 | /** | |
583 | * lib_ring_buffer_memset - write len bytes of c to a ring_buffer buffer. | |
584 | * @bufb : buffer backend | |
585 | * @offset : offset within the buffer | |
586 | * @c : the byte to write | |
587 | * @len : length to write | |
588 | * @pagecpy : page size copied so far | |
589 | */ | |
590 | void _lib_ring_buffer_memset(struct lib_ring_buffer_backend *bufb, | |
591 | size_t offset, | |
bfe529f9 | 592 | int c, size_t len, size_t pagecpy) |
4ea00e4f JD |
593 | { |
594 | struct channel_backend *chanb = &bufb->chan->backend; | |
5a8fd222 | 595 | const struct lib_ring_buffer_config *config = &chanb->config; |
4ea00e4f JD |
596 | size_t sbidx, index; |
597 | struct lib_ring_buffer_backend_pages *rpages; | |
598 | unsigned long sb_bindex, id; | |
599 | ||
600 | do { | |
601 | len -= pagecpy; | |
602 | offset += pagecpy; | |
603 | sbidx = offset >> chanb->subbuf_size_order; | |
604 | index = (offset & (chanb->subbuf_size - 1)) >> PAGE_SHIFT; | |
605 | ||
606 | /* | |
607 | * Underlying layer should never ask for writes across | |
608 | * subbuffers. | |
609 | */ | |
610 | CHAN_WARN_ON(chanb, offset >= chanb->buf_size); | |
611 | ||
612 | pagecpy = min_t(size_t, len, PAGE_SIZE - (offset & ~PAGE_MASK)); | |
613 | id = bufb->buf_wsb[sbidx].id; | |
614 | sb_bindex = subbuffer_id_get_index(config, id); | |
615 | rpages = bufb->array[sb_bindex]; | |
616 | CHAN_WARN_ON(chanb, config->mode == RING_BUFFER_OVERWRITE | |
617 | && subbuffer_id_is_noref(config, id)); | |
618 | lib_ring_buffer_do_memset(rpages->p[index].virt | |
619 | + (offset & ~PAGE_MASK), | |
620 | c, pagecpy); | |
621 | } while (unlikely(len != pagecpy)); | |
622 | } | |
623 | EXPORT_SYMBOL_GPL(_lib_ring_buffer_memset); | |
624 | ||
16f78f3a MD |
625 | /** |
626 | * lib_ring_buffer_strcpy - write string data to a ring_buffer buffer. | |
627 | * @bufb : buffer backend | |
628 | * @offset : offset within the buffer | |
629 | * @src : source address | |
630 | * @len : length to write | |
631 | * @pagecpy : page size copied so far | |
632 | * @pad : character to use for padding | |
633 | */ | |
634 | void _lib_ring_buffer_strcpy(struct lib_ring_buffer_backend *bufb, | |
635 | size_t offset, const char *src, size_t len, | |
636 | size_t pagecpy, int pad) | |
637 | { | |
638 | struct channel_backend *chanb = &bufb->chan->backend; | |
639 | const struct lib_ring_buffer_config *config = &chanb->config; | |
640 | size_t sbidx, index; | |
641 | struct lib_ring_buffer_backend_pages *rpages; | |
642 | unsigned long sb_bindex, id; | |
643 | int src_terminated = 0; | |
644 | ||
645 | CHAN_WARN_ON(chanb, !len); | |
646 | offset += pagecpy; | |
647 | do { | |
648 | len -= pagecpy; | |
649 | if (!src_terminated) | |
650 | src += pagecpy; | |
651 | sbidx = offset >> chanb->subbuf_size_order; | |
652 | index = (offset & (chanb->subbuf_size - 1)) >> PAGE_SHIFT; | |
653 | ||
654 | /* | |
655 | * Underlying layer should never ask for writes across | |
656 | * subbuffers. | |
657 | */ | |
658 | CHAN_WARN_ON(chanb, offset >= chanb->buf_size); | |
659 | ||
660 | pagecpy = min_t(size_t, len, PAGE_SIZE - (offset & ~PAGE_MASK)); | |
661 | id = bufb->buf_wsb[sbidx].id; | |
662 | sb_bindex = subbuffer_id_get_index(config, id); | |
663 | rpages = bufb->array[sb_bindex]; | |
664 | CHAN_WARN_ON(chanb, config->mode == RING_BUFFER_OVERWRITE | |
665 | && subbuffer_id_is_noref(config, id)); | |
666 | ||
667 | if (likely(!src_terminated)) { | |
668 | size_t count, to_copy; | |
669 | ||
670 | to_copy = pagecpy; | |
671 | if (pagecpy == len) | |
672 | to_copy--; /* Final '\0' */ | |
673 | count = lib_ring_buffer_do_strcpy(config, | |
674 | rpages->p[index].virt | |
675 | + (offset & ~PAGE_MASK), | |
676 | src, to_copy); | |
677 | offset += count; | |
678 | /* Padding */ | |
679 | if (unlikely(count < to_copy)) { | |
680 | size_t pad_len = to_copy - count; | |
681 | ||
682 | /* Next pages will have padding */ | |
683 | src_terminated = 1; | |
684 | lib_ring_buffer_do_memset(rpages->p[index].virt | |
685 | + (offset & ~PAGE_MASK), | |
686 | pad, pad_len); | |
687 | offset += pad_len; | |
688 | } | |
689 | } else { | |
690 | size_t pad_len; | |
691 | ||
692 | pad_len = pagecpy; | |
693 | if (pagecpy == len) | |
694 | pad_len--; /* Final '\0' */ | |
695 | lib_ring_buffer_do_memset(rpages->p[index].virt | |
696 | + (offset & ~PAGE_MASK), | |
697 | pad, pad_len); | |
698 | offset += pad_len; | |
699 | } | |
700 | } while (unlikely(len != pagecpy)); | |
701 | /* Ending '\0' */ | |
702 | lib_ring_buffer_do_memset(rpages->p[index].virt + (offset & ~PAGE_MASK), | |
703 | '\0', 1); | |
704 | } | |
705 | EXPORT_SYMBOL_GPL(_lib_ring_buffer_strcpy); | |
4ea00e4f JD |
706 | |
707 | /** | |
7b8ea3a5 | 708 | * lib_ring_buffer_copy_from_user_inatomic - write user data to a ring_buffer buffer. |
4ea00e4f JD |
709 | * @bufb : buffer backend |
710 | * @offset : offset within the buffer | |
711 | * @src : source address | |
712 | * @len : length to write | |
713 | * @pagecpy : page size copied so far | |
714 | * | |
715 | * This function deals with userspace pointers, it should never be called | |
716 | * directly without having the src pointer checked with access_ok() | |
717 | * previously. | |
718 | */ | |
7b8ea3a5 | 719 | void _lib_ring_buffer_copy_from_user_inatomic(struct lib_ring_buffer_backend *bufb, |
4ea00e4f JD |
720 | size_t offset, |
721 | const void __user *src, size_t len, | |
bfe529f9 | 722 | size_t pagecpy) |
4ea00e4f JD |
723 | { |
724 | struct channel_backend *chanb = &bufb->chan->backend; | |
5a8fd222 | 725 | const struct lib_ring_buffer_config *config = &chanb->config; |
4ea00e4f JD |
726 | size_t sbidx, index; |
727 | struct lib_ring_buffer_backend_pages *rpages; | |
728 | unsigned long sb_bindex, id; | |
729 | int ret; | |
730 | ||
731 | do { | |
732 | len -= pagecpy; | |
733 | src += pagecpy; | |
734 | offset += pagecpy; | |
735 | sbidx = offset >> chanb->subbuf_size_order; | |
736 | index = (offset & (chanb->subbuf_size - 1)) >> PAGE_SHIFT; | |
737 | ||
738 | /* | |
739 | * Underlying layer should never ask for writes across | |
740 | * subbuffers. | |
741 | */ | |
742 | CHAN_WARN_ON(chanb, offset >= chanb->buf_size); | |
743 | ||
744 | pagecpy = min_t(size_t, len, PAGE_SIZE - (offset & ~PAGE_MASK)); | |
745 | id = bufb->buf_wsb[sbidx].id; | |
746 | sb_bindex = subbuffer_id_get_index(config, id); | |
747 | rpages = bufb->array[sb_bindex]; | |
748 | CHAN_WARN_ON(chanb, config->mode == RING_BUFFER_OVERWRITE | |
749 | && subbuffer_id_is_noref(config, id)); | |
7b8ea3a5 | 750 | ret = lib_ring_buffer_do_copy_from_user_inatomic(rpages->p[index].virt |
4ea00e4f JD |
751 | + (offset & ~PAGE_MASK), |
752 | src, pagecpy) != 0; | |
753 | if (ret > 0) { | |
d87a9f03 | 754 | /* Copy failed. */ |
4ea00e4f JD |
755 | _lib_ring_buffer_memset(bufb, offset, 0, len, 0); |
756 | break; /* stop copy */ | |
757 | } | |
758 | } while (unlikely(len != pagecpy)); | |
759 | } | |
7b8ea3a5 | 760 | EXPORT_SYMBOL_GPL(_lib_ring_buffer_copy_from_user_inatomic); |
4ea00e4f | 761 | |
16f78f3a MD |
762 | /** |
763 | * lib_ring_buffer_strcpy_from_user_inatomic - write userspace string data to a ring_buffer buffer. | |
764 | * @bufb : buffer backend | |
765 | * @offset : offset within the buffer | |
766 | * @src : source address | |
767 | * @len : length to write | |
768 | * @pagecpy : page size copied so far | |
769 | * @pad : character to use for padding | |
770 | * | |
771 | * This function deals with userspace pointers, it should never be called | |
772 | * directly without having the src pointer checked with access_ok() | |
773 | * previously. | |
774 | */ | |
775 | void _lib_ring_buffer_strcpy_from_user_inatomic(struct lib_ring_buffer_backend *bufb, | |
776 | size_t offset, const char __user *src, size_t len, | |
777 | size_t pagecpy, int pad) | |
778 | { | |
779 | struct channel_backend *chanb = &bufb->chan->backend; | |
780 | const struct lib_ring_buffer_config *config = &chanb->config; | |
781 | size_t sbidx, index; | |
782 | struct lib_ring_buffer_backend_pages *rpages; | |
783 | unsigned long sb_bindex, id; | |
784 | int src_terminated = 0; | |
785 | ||
786 | offset += pagecpy; | |
787 | do { | |
788 | len -= pagecpy; | |
789 | if (!src_terminated) | |
790 | src += pagecpy; | |
791 | sbidx = offset >> chanb->subbuf_size_order; | |
792 | index = (offset & (chanb->subbuf_size - 1)) >> PAGE_SHIFT; | |
793 | ||
794 | /* | |
795 | * Underlying layer should never ask for writes across | |
796 | * subbuffers. | |
797 | */ | |
798 | CHAN_WARN_ON(chanb, offset >= chanb->buf_size); | |
799 | ||
800 | pagecpy = min_t(size_t, len, PAGE_SIZE - (offset & ~PAGE_MASK)); | |
801 | id = bufb->buf_wsb[sbidx].id; | |
802 | sb_bindex = subbuffer_id_get_index(config, id); | |
803 | rpages = bufb->array[sb_bindex]; | |
804 | CHAN_WARN_ON(chanb, config->mode == RING_BUFFER_OVERWRITE | |
805 | && subbuffer_id_is_noref(config, id)); | |
806 | ||
807 | if (likely(!src_terminated)) { | |
808 | size_t count, to_copy; | |
809 | ||
810 | to_copy = pagecpy; | |
811 | if (pagecpy == len) | |
812 | to_copy--; /* Final '\0' */ | |
813 | count = lib_ring_buffer_do_strcpy_from_user_inatomic(config, | |
814 | rpages->p[index].virt | |
815 | + (offset & ~PAGE_MASK), | |
816 | src, to_copy); | |
817 | offset += count; | |
818 | /* Padding */ | |
819 | if (unlikely(count < to_copy)) { | |
820 | size_t pad_len = to_copy - count; | |
821 | ||
822 | /* Next pages will have padding */ | |
823 | src_terminated = 1; | |
824 | lib_ring_buffer_do_memset(rpages->p[index].virt | |
825 | + (offset & ~PAGE_MASK), | |
826 | pad, pad_len); | |
827 | offset += pad_len; | |
828 | } | |
829 | } else { | |
830 | size_t pad_len; | |
831 | ||
832 | pad_len = pagecpy; | |
833 | if (pagecpy == len) | |
834 | pad_len--; /* Final '\0' */ | |
835 | lib_ring_buffer_do_memset(rpages->p[index].virt | |
836 | + (offset & ~PAGE_MASK), | |
837 | pad, pad_len); | |
838 | offset += pad_len; | |
839 | } | |
840 | } while (unlikely(len != pagecpy)); | |
841 | /* Ending '\0' */ | |
842 | lib_ring_buffer_do_memset(rpages->p[index].virt + (offset & ~PAGE_MASK), | |
843 | '\0', 1); | |
844 | } | |
845 | EXPORT_SYMBOL_GPL(_lib_ring_buffer_strcpy_from_user_inatomic); | |
846 | ||
f3bc08c5 MD |
847 | /** |
848 | * lib_ring_buffer_read - read data from ring_buffer_buffer. | |
849 | * @bufb : buffer backend | |
850 | * @offset : offset within the buffer | |
851 | * @dest : destination address | |
852 | * @len : length to copy to destination | |
853 | * | |
854 | * Should be protected by get_subbuf/put_subbuf. | |
855 | * Returns the length copied. | |
856 | */ | |
857 | size_t lib_ring_buffer_read(struct lib_ring_buffer_backend *bufb, size_t offset, | |
858 | void *dest, size_t len) | |
859 | { | |
860 | struct channel_backend *chanb = &bufb->chan->backend; | |
5a8fd222 | 861 | const struct lib_ring_buffer_config *config = &chanb->config; |
bfe529f9 | 862 | size_t index, pagecpy, orig_len; |
f3bc08c5 MD |
863 | struct lib_ring_buffer_backend_pages *rpages; |
864 | unsigned long sb_bindex, id; | |
865 | ||
866 | orig_len = len; | |
867 | offset &= chanb->buf_size - 1; | |
868 | index = (offset & (chanb->subbuf_size - 1)) >> PAGE_SHIFT; | |
869 | if (unlikely(!len)) | |
870 | return 0; | |
871 | for (;;) { | |
872 | pagecpy = min_t(size_t, len, PAGE_SIZE - (offset & ~PAGE_MASK)); | |
873 | id = bufb->buf_rsb.id; | |
874 | sb_bindex = subbuffer_id_get_index(config, id); | |
875 | rpages = bufb->array[sb_bindex]; | |
876 | CHAN_WARN_ON(chanb, config->mode == RING_BUFFER_OVERWRITE | |
877 | && subbuffer_id_is_noref(config, id)); | |
878 | memcpy(dest, rpages->p[index].virt + (offset & ~PAGE_MASK), | |
879 | pagecpy); | |
880 | len -= pagecpy; | |
881 | if (likely(!len)) | |
882 | break; | |
883 | dest += pagecpy; | |
884 | offset += pagecpy; | |
885 | index = (offset & (chanb->subbuf_size - 1)) >> PAGE_SHIFT; | |
886 | /* | |
887 | * Underlying layer should never ask for reads across | |
888 | * subbuffers. | |
889 | */ | |
890 | CHAN_WARN_ON(chanb, offset >= chanb->buf_size); | |
891 | } | |
892 | return orig_len; | |
893 | } | |
894 | EXPORT_SYMBOL_GPL(lib_ring_buffer_read); | |
895 | ||
896 | /** | |
897 | * __lib_ring_buffer_copy_to_user - read data from ring_buffer to userspace | |
898 | * @bufb : buffer backend | |
899 | * @offset : offset within the buffer | |
900 | * @dest : destination userspace address | |
901 | * @len : length to copy to destination | |
902 | * | |
903 | * Should be protected by get_subbuf/put_subbuf. | |
904 | * access_ok() must have been performed on dest addresses prior to call this | |
905 | * function. | |
906 | * Returns -EFAULT on error, 0 if ok. | |
907 | */ | |
908 | int __lib_ring_buffer_copy_to_user(struct lib_ring_buffer_backend *bufb, | |
909 | size_t offset, void __user *dest, size_t len) | |
910 | { | |
911 | struct channel_backend *chanb = &bufb->chan->backend; | |
5a8fd222 | 912 | const struct lib_ring_buffer_config *config = &chanb->config; |
f3bc08c5 | 913 | size_t index; |
88dfd899 | 914 | ssize_t pagecpy; |
f3bc08c5 MD |
915 | struct lib_ring_buffer_backend_pages *rpages; |
916 | unsigned long sb_bindex, id; | |
917 | ||
f3bc08c5 MD |
918 | offset &= chanb->buf_size - 1; |
919 | index = (offset & (chanb->subbuf_size - 1)) >> PAGE_SHIFT; | |
920 | if (unlikely(!len)) | |
921 | return 0; | |
922 | for (;;) { | |
923 | pagecpy = min_t(size_t, len, PAGE_SIZE - (offset & ~PAGE_MASK)); | |
924 | id = bufb->buf_rsb.id; | |
925 | sb_bindex = subbuffer_id_get_index(config, id); | |
926 | rpages = bufb->array[sb_bindex]; | |
927 | CHAN_WARN_ON(chanb, config->mode == RING_BUFFER_OVERWRITE | |
928 | && subbuffer_id_is_noref(config, id)); | |
929 | if (__copy_to_user(dest, | |
930 | rpages->p[index].virt + (offset & ~PAGE_MASK), | |
931 | pagecpy)) | |
932 | return -EFAULT; | |
933 | len -= pagecpy; | |
934 | if (likely(!len)) | |
935 | break; | |
936 | dest += pagecpy; | |
937 | offset += pagecpy; | |
938 | index = (offset & (chanb->subbuf_size - 1)) >> PAGE_SHIFT; | |
939 | /* | |
940 | * Underlying layer should never ask for reads across | |
941 | * subbuffers. | |
942 | */ | |
943 | CHAN_WARN_ON(chanb, offset >= chanb->buf_size); | |
944 | } | |
945 | return 0; | |
946 | } | |
947 | EXPORT_SYMBOL_GPL(__lib_ring_buffer_copy_to_user); | |
948 | ||
949 | /** | |
950 | * lib_ring_buffer_read_cstr - read a C-style string from ring_buffer. | |
951 | * @bufb : buffer backend | |
952 | * @offset : offset within the buffer | |
953 | * @dest : destination address | |
954 | * @len : destination's length | |
955 | * | |
61eb4c39 | 956 | * Return string's length, or -EINVAL on error. |
f3bc08c5 | 957 | * Should be protected by get_subbuf/put_subbuf. |
61eb4c39 | 958 | * Destination length should be at least 1 to hold '\0'. |
f3bc08c5 MD |
959 | */ |
960 | int lib_ring_buffer_read_cstr(struct lib_ring_buffer_backend *bufb, size_t offset, | |
961 | void *dest, size_t len) | |
962 | { | |
963 | struct channel_backend *chanb = &bufb->chan->backend; | |
5a8fd222 | 964 | const struct lib_ring_buffer_config *config = &chanb->config; |
f3bc08c5 MD |
965 | size_t index; |
966 | ssize_t pagecpy, pagelen, strpagelen, orig_offset; | |
967 | char *str; | |
968 | struct lib_ring_buffer_backend_pages *rpages; | |
969 | unsigned long sb_bindex, id; | |
970 | ||
971 | offset &= chanb->buf_size - 1; | |
972 | index = (offset & (chanb->subbuf_size - 1)) >> PAGE_SHIFT; | |
973 | orig_offset = offset; | |
61eb4c39 MD |
974 | if (unlikely(!len)) |
975 | return -EINVAL; | |
f3bc08c5 MD |
976 | for (;;) { |
977 | id = bufb->buf_rsb.id; | |
978 | sb_bindex = subbuffer_id_get_index(config, id); | |
979 | rpages = bufb->array[sb_bindex]; | |
980 | CHAN_WARN_ON(chanb, config->mode == RING_BUFFER_OVERWRITE | |
981 | && subbuffer_id_is_noref(config, id)); | |
982 | str = (char *)rpages->p[index].virt + (offset & ~PAGE_MASK); | |
983 | pagelen = PAGE_SIZE - (offset & ~PAGE_MASK); | |
984 | strpagelen = strnlen(str, pagelen); | |
985 | if (len) { | |
986 | pagecpy = min_t(size_t, len, strpagelen); | |
987 | if (dest) { | |
988 | memcpy(dest, str, pagecpy); | |
989 | dest += pagecpy; | |
990 | } | |
991 | len -= pagecpy; | |
992 | } | |
993 | offset += strpagelen; | |
994 | index = (offset & (chanb->subbuf_size - 1)) >> PAGE_SHIFT; | |
995 | if (strpagelen < pagelen) | |
996 | break; | |
997 | /* | |
998 | * Underlying layer should never ask for reads across | |
999 | * subbuffers. | |
1000 | */ | |
1001 | CHAN_WARN_ON(chanb, offset >= chanb->buf_size); | |
1002 | } | |
1003 | if (dest && len) | |
1004 | ((char *)dest)[0] = 0; | |
1005 | return offset - orig_offset; | |
1006 | } | |
1007 | EXPORT_SYMBOL_GPL(lib_ring_buffer_read_cstr); | |
1008 | ||
1009 | /** | |
0112cb7b | 1010 | * lib_ring_buffer_read_get_pfn - Get a page frame number to read from |
f3bc08c5 MD |
1011 | * @bufb : buffer backend |
1012 | * @offset : offset within the buffer | |
1013 | * @virt : pointer to page address (output) | |
1014 | * | |
1015 | * Should be protected by get_subbuf/put_subbuf. | |
0112cb7b | 1016 | * Returns the pointer to the page frame number unsigned long. |
f3bc08c5 | 1017 | */ |
0112cb7b | 1018 | unsigned long *lib_ring_buffer_read_get_pfn(struct lib_ring_buffer_backend *bufb, |
f3bc08c5 MD |
1019 | size_t offset, void ***virt) |
1020 | { | |
1021 | size_t index; | |
1022 | struct lib_ring_buffer_backend_pages *rpages; | |
1023 | struct channel_backend *chanb = &bufb->chan->backend; | |
5a8fd222 | 1024 | const struct lib_ring_buffer_config *config = &chanb->config; |
f3bc08c5 MD |
1025 | unsigned long sb_bindex, id; |
1026 | ||
1027 | offset &= chanb->buf_size - 1; | |
1028 | index = (offset & (chanb->subbuf_size - 1)) >> PAGE_SHIFT; | |
1029 | id = bufb->buf_rsb.id; | |
1030 | sb_bindex = subbuffer_id_get_index(config, id); | |
1031 | rpages = bufb->array[sb_bindex]; | |
1032 | CHAN_WARN_ON(chanb, config->mode == RING_BUFFER_OVERWRITE | |
1033 | && subbuffer_id_is_noref(config, id)); | |
1034 | *virt = &rpages->p[index].virt; | |
0112cb7b | 1035 | return &rpages->p[index].pfn; |
f3bc08c5 | 1036 | } |
0112cb7b | 1037 | EXPORT_SYMBOL_GPL(lib_ring_buffer_read_get_pfn); |
f3bc08c5 MD |
1038 | |
1039 | /** | |
1040 | * lib_ring_buffer_read_offset_address - get address of a buffer location | |
1041 | * @bufb : buffer backend | |
1042 | * @offset : offset within the buffer. | |
1043 | * | |
1044 | * Return the address where a given offset is located (for read). | |
1045 | * Should be used to get the current subbuffer header pointer. Given we know | |
759d02c1 MD |
1046 | * it's never on a page boundary, it's safe to read/write directly |
1047 | * from/to this address, as long as the read/write is never bigger than a | |
1048 | * page size. | |
f3bc08c5 MD |
1049 | */ |
1050 | void *lib_ring_buffer_read_offset_address(struct lib_ring_buffer_backend *bufb, | |
1051 | size_t offset) | |
1052 | { | |
1053 | size_t index; | |
1054 | struct lib_ring_buffer_backend_pages *rpages; | |
1055 | struct channel_backend *chanb = &bufb->chan->backend; | |
5a8fd222 | 1056 | const struct lib_ring_buffer_config *config = &chanb->config; |
f3bc08c5 MD |
1057 | unsigned long sb_bindex, id; |
1058 | ||
1059 | offset &= chanb->buf_size - 1; | |
1060 | index = (offset & (chanb->subbuf_size - 1)) >> PAGE_SHIFT; | |
1061 | id = bufb->buf_rsb.id; | |
1062 | sb_bindex = subbuffer_id_get_index(config, id); | |
1063 | rpages = bufb->array[sb_bindex]; | |
1064 | CHAN_WARN_ON(chanb, config->mode == RING_BUFFER_OVERWRITE | |
1065 | && subbuffer_id_is_noref(config, id)); | |
1066 | return rpages->p[index].virt + (offset & ~PAGE_MASK); | |
1067 | } | |
1068 | EXPORT_SYMBOL_GPL(lib_ring_buffer_read_offset_address); | |
1069 | ||
1070 | /** | |
1071 | * lib_ring_buffer_offset_address - get address of a location within the buffer | |
1072 | * @bufb : buffer backend | |
1073 | * @offset : offset within the buffer. | |
1074 | * | |
1075 | * Return the address where a given offset is located. | |
1076 | * Should be used to get the current subbuffer header pointer. Given we know | |
1077 | * it's always at the beginning of a page, it's safe to write directly to this | |
1078 | * address, as long as the write is never bigger than a page size. | |
1079 | */ | |
1080 | void *lib_ring_buffer_offset_address(struct lib_ring_buffer_backend *bufb, | |
1081 | size_t offset) | |
1082 | { | |
1083 | size_t sbidx, index; | |
1084 | struct lib_ring_buffer_backend_pages *rpages; | |
1085 | struct channel_backend *chanb = &bufb->chan->backend; | |
5a8fd222 | 1086 | const struct lib_ring_buffer_config *config = &chanb->config; |
f3bc08c5 MD |
1087 | unsigned long sb_bindex, id; |
1088 | ||
1089 | offset &= chanb->buf_size - 1; | |
1090 | sbidx = offset >> chanb->subbuf_size_order; | |
1091 | index = (offset & (chanb->subbuf_size - 1)) >> PAGE_SHIFT; | |
1092 | id = bufb->buf_wsb[sbidx].id; | |
1093 | sb_bindex = subbuffer_id_get_index(config, id); | |
1094 | rpages = bufb->array[sb_bindex]; | |
1095 | CHAN_WARN_ON(chanb, config->mode == RING_BUFFER_OVERWRITE | |
1096 | && subbuffer_id_is_noref(config, id)); | |
1097 | return rpages->p[index].virt + (offset & ~PAGE_MASK); | |
1098 | } | |
1099 | EXPORT_SYMBOL_GPL(lib_ring_buffer_offset_address); |