Commit | Line | Data |
---|---|---|
f3bc08c5 MD |
1 | /* |
2 | * ring_buffer_backend.c | |
3 | * | |
886d51a3 | 4 | * Copyright (C) 2005-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com> |
f3bc08c5 | 5 | * |
886d51a3 MD |
6 | * This library is free software; you can redistribute it and/or |
7 | * modify it under the terms of the GNU Lesser General Public | |
8 | * License as published by the Free Software Foundation; only | |
9 | * version 2.1 of the License. | |
10 | * | |
11 | * This library is distributed in the hope that it will be useful, | |
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
14 | * Lesser General Public License for more details. | |
15 | * | |
16 | * You should have received a copy of the GNU Lesser General Public | |
17 | * License along with this library; if not, write to the Free Software | |
18 | * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA | |
f3bc08c5 MD |
19 | */ |
20 | ||
f3bc08c5 MD |
21 | #include <linux/stddef.h> |
22 | #include <linux/module.h> | |
23 | #include <linux/string.h> | |
24 | #include <linux/bitops.h> | |
25 | #include <linux/delay.h> | |
26 | #include <linux/errno.h> | |
27 | #include <linux/slab.h> | |
28 | #include <linux/cpu.h> | |
29 | #include <linux/mm.h> | |
df388b78 | 30 | #include <linux/vmalloc.h> |
f3bc08c5 | 31 | |
c075712b MD |
32 | #include <wrapper/vmalloc.h> /* for wrapper_vmalloc_sync_all() */ |
33 | #include <wrapper/ringbuffer/config.h> | |
34 | #include <wrapper/ringbuffer/backend.h> | |
35 | #include <wrapper/ringbuffer/frontend.h> | |
f3bc08c5 MD |
36 | |
37 | /** | |
38 | * lib_ring_buffer_backend_allocate - allocate a channel buffer | |
39 | * @config: ring buffer instance configuration | |
40 | * @buf: the buffer struct | |
41 | * @size: total size of the buffer | |
42 | * @num_subbuf: number of subbuffers | |
43 | * @extra_reader_sb: need extra subbuffer for reader | |
44 | */ | |
45 | static | |
46 | int lib_ring_buffer_backend_allocate(const struct lib_ring_buffer_config *config, | |
47 | struct lib_ring_buffer_backend *bufb, | |
48 | size_t size, size_t num_subbuf, | |
49 | int extra_reader_sb) | |
50 | { | |
51 | struct channel_backend *chanb = &bufb->chan->backend; | |
52 | unsigned long j, num_pages, num_pages_per_subbuf, page_idx = 0; | |
53 | unsigned long subbuf_size, mmap_offset = 0; | |
54 | unsigned long num_subbuf_alloc; | |
55 | struct page **pages; | |
f3bc08c5 MD |
56 | unsigned long i; |
57 | ||
58 | num_pages = size >> PAGE_SHIFT; | |
59 | num_pages_per_subbuf = num_pages >> get_count_order(num_subbuf); | |
60 | subbuf_size = chanb->subbuf_size; | |
61 | num_subbuf_alloc = num_subbuf; | |
62 | ||
63 | if (extra_reader_sb) { | |
64 | num_pages += num_pages_per_subbuf; /* Add pages for reader */ | |
65 | num_subbuf_alloc++; | |
66 | } | |
67 | ||
df388b78 | 68 | pages = vmalloc_node(ALIGN(sizeof(*pages) * num_pages, |
f3bc08c5 | 69 | 1 << INTERNODE_CACHE_SHIFT), |
df388b78 | 70 | cpu_to_node(max(bufb->cpu, 0))); |
f3bc08c5 MD |
71 | if (unlikely(!pages)) |
72 | goto pages_error; | |
73 | ||
fd0cebd8 | 74 | bufb->array = lttng_kvmalloc_node(ALIGN(sizeof(*bufb->array) |
f3bc08c5 MD |
75 | * num_subbuf_alloc, |
76 | 1 << INTERNODE_CACHE_SHIFT), | |
df388b78 MD |
77 | GFP_KERNEL | __GFP_NOWARN, |
78 | cpu_to_node(max(bufb->cpu, 0))); | |
f3bc08c5 MD |
79 | if (unlikely(!bufb->array)) |
80 | goto array_error; | |
81 | ||
82 | for (i = 0; i < num_pages; i++) { | |
83 | pages[i] = alloc_pages_node(cpu_to_node(max(bufb->cpu, 0)), | |
df388b78 | 84 | GFP_KERNEL | __GFP_NOWARN | __GFP_ZERO, 0); |
f3bc08c5 MD |
85 | if (unlikely(!pages[i])) |
86 | goto depopulate; | |
f3bc08c5 MD |
87 | } |
88 | bufb->num_pages_per_subbuf = num_pages_per_subbuf; | |
89 | ||
90 | /* Allocate backend pages array elements */ | |
91 | for (i = 0; i < num_subbuf_alloc; i++) { | |
92 | bufb->array[i] = | |
fd0cebd8 | 93 | lttng_kvzalloc_node(ALIGN( |
f3bc08c5 MD |
94 | sizeof(struct lib_ring_buffer_backend_pages) + |
95 | sizeof(struct lib_ring_buffer_backend_page) | |
96 | * num_pages_per_subbuf, | |
97 | 1 << INTERNODE_CACHE_SHIFT), | |
df388b78 MD |
98 | GFP_KERNEL | __GFP_NOWARN, |
99 | cpu_to_node(max(bufb->cpu, 0))); | |
f3bc08c5 MD |
100 | if (!bufb->array[i]) |
101 | goto free_array; | |
102 | } | |
103 | ||
104 | /* Allocate write-side subbuffer table */ | |
fd0cebd8 | 105 | bufb->buf_wsb = lttng_kvzalloc_node(ALIGN( |
f3bc08c5 MD |
106 | sizeof(struct lib_ring_buffer_backend_subbuffer) |
107 | * num_subbuf, | |
108 | 1 << INTERNODE_CACHE_SHIFT), | |
df388b78 MD |
109 | GFP_KERNEL | __GFP_NOWARN, |
110 | cpu_to_node(max(bufb->cpu, 0))); | |
f3bc08c5 MD |
111 | if (unlikely(!bufb->buf_wsb)) |
112 | goto free_array; | |
113 | ||
114 | for (i = 0; i < num_subbuf; i++) | |
115 | bufb->buf_wsb[i].id = subbuffer_id(config, 0, 1, i); | |
116 | ||
117 | /* Assign read-side subbuffer table */ | |
118 | if (extra_reader_sb) | |
119 | bufb->buf_rsb.id = subbuffer_id(config, 0, 1, | |
120 | num_subbuf_alloc - 1); | |
121 | else | |
122 | bufb->buf_rsb.id = subbuffer_id(config, 0, 1, 0); | |
123 | ||
5b3cf4f9 | 124 | /* Allocate subbuffer packet counter table */ |
fd0cebd8 | 125 | bufb->buf_cnt = lttng_kvzalloc_node(ALIGN( |
5b3cf4f9 JD |
126 | sizeof(struct lib_ring_buffer_backend_counts) |
127 | * num_subbuf, | |
128 | 1 << INTERNODE_CACHE_SHIFT), | |
df388b78 MD |
129 | GFP_KERNEL | __GFP_NOWARN, |
130 | cpu_to_node(max(bufb->cpu, 0))); | |
5b3cf4f9 JD |
131 | if (unlikely(!bufb->buf_cnt)) |
132 | goto free_wsb; | |
133 | ||
f3bc08c5 MD |
134 | /* Assign pages to page index */ |
135 | for (i = 0; i < num_subbuf_alloc; i++) { | |
136 | for (j = 0; j < num_pages_per_subbuf; j++) { | |
137 | CHAN_WARN_ON(chanb, page_idx > num_pages); | |
0112cb7b MD |
138 | bufb->array[i]->p[j].virt = page_address(pages[page_idx]); |
139 | bufb->array[i]->p[j].pfn = page_to_pfn(pages[page_idx]); | |
f3bc08c5 MD |
140 | page_idx++; |
141 | } | |
142 | if (config->output == RING_BUFFER_MMAP) { | |
143 | bufb->array[i]->mmap_offset = mmap_offset; | |
144 | mmap_offset += subbuf_size; | |
145 | } | |
146 | } | |
147 | ||
148 | /* | |
149 | * If kmalloc ever uses vmalloc underneath, make sure the buffer pages | |
150 | * will not fault. | |
151 | */ | |
6d2a620c | 152 | wrapper_vmalloc_sync_all(); |
df388b78 | 153 | vfree(pages); |
f3bc08c5 MD |
154 | return 0; |
155 | ||
5b3cf4f9 | 156 | free_wsb: |
fd0cebd8 | 157 | lttng_kvfree(bufb->buf_wsb); |
f3bc08c5 MD |
158 | free_array: |
159 | for (i = 0; (i < num_subbuf_alloc && bufb->array[i]); i++) | |
fd0cebd8 | 160 | lttng_kvfree(bufb->array[i]); |
f3bc08c5 MD |
161 | depopulate: |
162 | /* Free all allocated pages */ | |
163 | for (i = 0; (i < num_pages && pages[i]); i++) | |
164 | __free_page(pages[i]); | |
fd0cebd8 | 165 | lttng_kvfree(bufb->array); |
f3bc08c5 | 166 | array_error: |
df388b78 | 167 | vfree(pages); |
f3bc08c5 MD |
168 | pages_error: |
169 | return -ENOMEM; | |
170 | } | |
171 | ||
172 | int lib_ring_buffer_backend_create(struct lib_ring_buffer_backend *bufb, | |
173 | struct channel_backend *chanb, int cpu) | |
174 | { | |
5a8fd222 | 175 | const struct lib_ring_buffer_config *config = &chanb->config; |
f3bc08c5 MD |
176 | |
177 | bufb->chan = container_of(chanb, struct channel, backend); | |
178 | bufb->cpu = cpu; | |
179 | ||
180 | return lib_ring_buffer_backend_allocate(config, bufb, chanb->buf_size, | |
181 | chanb->num_subbuf, | |
182 | chanb->extra_reader_sb); | |
183 | } | |
184 | ||
185 | void lib_ring_buffer_backend_free(struct lib_ring_buffer_backend *bufb) | |
186 | { | |
187 | struct channel_backend *chanb = &bufb->chan->backend; | |
188 | unsigned long i, j, num_subbuf_alloc; | |
189 | ||
190 | num_subbuf_alloc = chanb->num_subbuf; | |
191 | if (chanb->extra_reader_sb) | |
192 | num_subbuf_alloc++; | |
193 | ||
fd0cebd8 MJ |
194 | lttng_kvfree(bufb->buf_wsb); |
195 | lttng_kvfree(bufb->buf_cnt); | |
f3bc08c5 MD |
196 | for (i = 0; i < num_subbuf_alloc; i++) { |
197 | for (j = 0; j < bufb->num_pages_per_subbuf; j++) | |
0112cb7b | 198 | __free_page(pfn_to_page(bufb->array[i]->p[j].pfn)); |
fd0cebd8 | 199 | lttng_kvfree(bufb->array[i]); |
f3bc08c5 | 200 | } |
fd0cebd8 | 201 | lttng_kvfree(bufb->array); |
f3bc08c5 MD |
202 | bufb->allocated = 0; |
203 | } | |
204 | ||
205 | void lib_ring_buffer_backend_reset(struct lib_ring_buffer_backend *bufb) | |
206 | { | |
207 | struct channel_backend *chanb = &bufb->chan->backend; | |
5a8fd222 | 208 | const struct lib_ring_buffer_config *config = &chanb->config; |
f3bc08c5 MD |
209 | unsigned long num_subbuf_alloc; |
210 | unsigned int i; | |
211 | ||
212 | num_subbuf_alloc = chanb->num_subbuf; | |
213 | if (chanb->extra_reader_sb) | |
214 | num_subbuf_alloc++; | |
215 | ||
216 | for (i = 0; i < chanb->num_subbuf; i++) | |
217 | bufb->buf_wsb[i].id = subbuffer_id(config, 0, 1, i); | |
218 | if (chanb->extra_reader_sb) | |
219 | bufb->buf_rsb.id = subbuffer_id(config, 0, 1, | |
220 | num_subbuf_alloc - 1); | |
221 | else | |
222 | bufb->buf_rsb.id = subbuffer_id(config, 0, 1, 0); | |
223 | ||
224 | for (i = 0; i < num_subbuf_alloc; i++) { | |
225 | /* Don't reset mmap_offset */ | |
226 | v_set(config, &bufb->array[i]->records_commit, 0); | |
227 | v_set(config, &bufb->array[i]->records_unread, 0); | |
228 | bufb->array[i]->data_size = 0; | |
229 | /* Don't reset backend page and virt addresses */ | |
230 | } | |
231 | /* Don't reset num_pages_per_subbuf, cpu, allocated */ | |
232 | v_set(config, &bufb->records_read, 0); | |
233 | } | |
234 | ||
235 | /* | |
236 | * The frontend is responsible for also calling ring_buffer_backend_reset for | |
237 | * each buffer when calling channel_backend_reset. | |
238 | */ | |
239 | void channel_backend_reset(struct channel_backend *chanb) | |
240 | { | |
241 | struct channel *chan = container_of(chanb, struct channel, backend); | |
5a8fd222 | 242 | const struct lib_ring_buffer_config *config = &chanb->config; |
f3bc08c5 MD |
243 | |
244 | /* | |
245 | * Don't reset buf_size, subbuf_size, subbuf_size_order, | |
246 | * num_subbuf_order, buf_size_order, extra_reader_sb, num_subbuf, | |
247 | * priv, notifiers, config, cpumask and name. | |
248 | */ | |
249 | chanb->start_tsc = config->cb.ring_buffer_clock_read(chan); | |
250 | } | |
251 | ||
1e367326 MD |
252 | #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) |
253 | ||
254 | /* | |
255 | * No need to implement a "dead" callback to do a buffer switch here, | |
256 | * because it will happen when tracing is stopped, or will be done by | |
257 | * switch timer CPU DEAD callback. | |
258 | * We don't free buffers when CPU go away, because it would make trace | |
259 | * data vanish, which is unwanted. | |
260 | */ | |
261 | int lttng_cpuhp_rb_backend_prepare(unsigned int cpu, | |
262 | struct lttng_cpuhp_node *node) | |
263 | { | |
264 | struct channel_backend *chanb = container_of(node, | |
265 | struct channel_backend, cpuhp_prepare); | |
266 | const struct lib_ring_buffer_config *config = &chanb->config; | |
267 | struct lib_ring_buffer *buf; | |
268 | int ret; | |
269 | ||
270 | CHAN_WARN_ON(chanb, config->alloc == RING_BUFFER_ALLOC_GLOBAL); | |
271 | ||
272 | buf = per_cpu_ptr(chanb->buf, cpu); | |
273 | ret = lib_ring_buffer_create(buf, chanb, cpu); | |
274 | if (ret) { | |
275 | printk(KERN_ERR | |
276 | "ring_buffer_cpu_hp_callback: cpu %d " | |
277 | "buffer creation failed\n", cpu); | |
278 | return ret; | |
279 | } | |
280 | return 0; | |
281 | } | |
282 | EXPORT_SYMBOL_GPL(lttng_cpuhp_rb_backend_prepare); | |
283 | ||
284 | #else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */ | |
285 | ||
f3bc08c5 | 286 | #ifdef CONFIG_HOTPLUG_CPU |
1e367326 | 287 | |
f3bc08c5 MD |
288 | /** |
289 | * lib_ring_buffer_cpu_hp_callback - CPU hotplug callback | |
290 | * @nb: notifier block | |
291 | * @action: hotplug action to take | |
292 | * @hcpu: CPU number | |
293 | * | |
294 | * Returns the success/failure of the operation. (%NOTIFY_OK, %NOTIFY_BAD) | |
295 | */ | |
296 | static | |
e8f071d5 | 297 | int lib_ring_buffer_cpu_hp_callback(struct notifier_block *nb, |
f3bc08c5 MD |
298 | unsigned long action, |
299 | void *hcpu) | |
300 | { | |
301 | unsigned int cpu = (unsigned long)hcpu; | |
302 | struct channel_backend *chanb = container_of(nb, struct channel_backend, | |
303 | cpu_hp_notifier); | |
5a8fd222 | 304 | const struct lib_ring_buffer_config *config = &chanb->config; |
f3bc08c5 MD |
305 | struct lib_ring_buffer *buf; |
306 | int ret; | |
307 | ||
308 | CHAN_WARN_ON(chanb, config->alloc == RING_BUFFER_ALLOC_GLOBAL); | |
309 | ||
310 | switch (action) { | |
311 | case CPU_UP_PREPARE: | |
312 | case CPU_UP_PREPARE_FROZEN: | |
313 | buf = per_cpu_ptr(chanb->buf, cpu); | |
314 | ret = lib_ring_buffer_create(buf, chanb, cpu); | |
315 | if (ret) { | |
316 | printk(KERN_ERR | |
317 | "ring_buffer_cpu_hp_callback: cpu %d " | |
318 | "buffer creation failed\n", cpu); | |
319 | return NOTIFY_BAD; | |
320 | } | |
321 | break; | |
322 | case CPU_DEAD: | |
323 | case CPU_DEAD_FROZEN: | |
324 | /* No need to do a buffer switch here, because it will happen | |
325 | * when tracing is stopped, or will be done by switch timer CPU | |
326 | * DEAD callback. */ | |
327 | break; | |
328 | } | |
329 | return NOTIFY_OK; | |
330 | } | |
1e367326 | 331 | |
f3bc08c5 MD |
332 | #endif |
333 | ||
1e367326 MD |
334 | #endif /* #else #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */ |
335 | ||
f3bc08c5 MD |
336 | /** |
337 | * channel_backend_init - initialize a channel backend | |
338 | * @chanb: channel backend | |
339 | * @name: channel name | |
340 | * @config: client ring buffer configuration | |
341 | * @priv: client private data | |
342 | * @parent: dentry of parent directory, %NULL for root directory | |
343 | * @subbuf_size: size of sub-buffers (> PAGE_SIZE, power of 2) | |
344 | * @num_subbuf: number of sub-buffers (power of 2) | |
345 | * | |
346 | * Returns channel pointer if successful, %NULL otherwise. | |
347 | * | |
348 | * Creates per-cpu channel buffers using the sizes and attributes | |
349 | * specified. The created channel buffer files will be named | |
350 | * name_0...name_N-1. File permissions will be %S_IRUSR. | |
351 | * | |
352 | * Called with CPU hotplug disabled. | |
353 | */ | |
354 | int channel_backend_init(struct channel_backend *chanb, | |
355 | const char *name, | |
356 | const struct lib_ring_buffer_config *config, | |
357 | void *priv, size_t subbuf_size, size_t num_subbuf) | |
358 | { | |
359 | struct channel *chan = container_of(chanb, struct channel, backend); | |
360 | unsigned int i; | |
361 | int ret; | |
362 | ||
363 | if (!name) | |
364 | return -EPERM; | |
365 | ||
f3bc08c5 | 366 | /* Check that the subbuffer size is larger than a page. */ |
2fb46300 MD |
367 | if (subbuf_size < PAGE_SIZE) |
368 | return -EINVAL; | |
f3bc08c5 MD |
369 | |
370 | /* | |
bbda3a00 MD |
371 | * Make sure the number of subbuffers and subbuffer size are |
372 | * power of 2 and nonzero. | |
f3bc08c5 | 373 | */ |
bbda3a00 | 374 | if (!subbuf_size || (subbuf_size & (subbuf_size - 1))) |
863497fa | 375 | return -EINVAL; |
bbda3a00 | 376 | if (!num_subbuf || (num_subbuf & (num_subbuf - 1))) |
863497fa | 377 | return -EINVAL; |
5140d2b3 MD |
378 | /* |
379 | * Overwrite mode buffers require at least 2 subbuffers per | |
380 | * buffer. | |
381 | */ | |
382 | if (config->mode == RING_BUFFER_OVERWRITE && num_subbuf < 2) | |
383 | return -EINVAL; | |
f3bc08c5 MD |
384 | |
385 | ret = subbuffer_id_check_index(config, num_subbuf); | |
386 | if (ret) | |
387 | return ret; | |
388 | ||
389 | chanb->priv = priv; | |
390 | chanb->buf_size = num_subbuf * subbuf_size; | |
391 | chanb->subbuf_size = subbuf_size; | |
392 | chanb->buf_size_order = get_count_order(chanb->buf_size); | |
393 | chanb->subbuf_size_order = get_count_order(subbuf_size); | |
394 | chanb->num_subbuf_order = get_count_order(num_subbuf); | |
395 | chanb->extra_reader_sb = | |
396 | (config->mode == RING_BUFFER_OVERWRITE) ? 1 : 0; | |
397 | chanb->num_subbuf = num_subbuf; | |
398 | strlcpy(chanb->name, name, NAME_MAX); | |
5a8fd222 | 399 | memcpy(&chanb->config, config, sizeof(chanb->config)); |
f3bc08c5 MD |
400 | |
401 | if (config->alloc == RING_BUFFER_ALLOC_PER_CPU) { | |
402 | if (!zalloc_cpumask_var(&chanb->cpumask, GFP_KERNEL)) | |
403 | return -ENOMEM; | |
404 | } | |
405 | ||
406 | if (config->alloc == RING_BUFFER_ALLOC_PER_CPU) { | |
407 | /* Allocating the buffer per-cpu structures */ | |
408 | chanb->buf = alloc_percpu(struct lib_ring_buffer); | |
409 | if (!chanb->buf) | |
410 | goto free_cpumask; | |
411 | ||
1e367326 MD |
412 | #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) |
413 | chanb->cpuhp_prepare.component = LTTNG_RING_BUFFER_BACKEND; | |
414 | ret = cpuhp_state_add_instance(lttng_rb_hp_prepare, | |
415 | &chanb->cpuhp_prepare.node); | |
416 | if (ret) | |
417 | goto free_bufs; | |
418 | #else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */ | |
419 | ||
420 | { | |
421 | /* | |
422 | * In case of non-hotplug cpu, if the ring-buffer is allocated | |
423 | * in early initcall, it will not be notified of secondary cpus. | |
424 | * In that off case, we need to allocate for all possible cpus. | |
425 | */ | |
f3bc08c5 | 426 | #ifdef CONFIG_HOTPLUG_CPU |
1e367326 MD |
427 | /* |
428 | * buf->backend.allocated test takes care of concurrent CPU | |
429 | * hotplug. | |
430 | * Priority higher than frontend, so we create the ring buffer | |
431 | * before we start the timer. | |
432 | */ | |
433 | chanb->cpu_hp_notifier.notifier_call = | |
434 | lib_ring_buffer_cpu_hp_callback; | |
435 | chanb->cpu_hp_notifier.priority = 5; | |
436 | register_hotcpu_notifier(&chanb->cpu_hp_notifier); | |
437 | ||
438 | get_online_cpus(); | |
439 | for_each_online_cpu(i) { | |
440 | ret = lib_ring_buffer_create(per_cpu_ptr(chanb->buf, i), | |
441 | chanb, i); | |
442 | if (ret) | |
443 | goto free_bufs; /* cpu hotplug locked */ | |
444 | } | |
445 | put_online_cpus(); | |
f3bc08c5 | 446 | #else |
1e367326 MD |
447 | for_each_possible_cpu(i) { |
448 | ret = lib_ring_buffer_create(per_cpu_ptr(chanb->buf, i), | |
449 | chanb, i); | |
450 | if (ret) | |
451 | goto free_bufs; | |
452 | } | |
f3bc08c5 | 453 | #endif |
1e367326 MD |
454 | } |
455 | #endif /* #else #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */ | |
f3bc08c5 MD |
456 | } else { |
457 | chanb->buf = kzalloc(sizeof(struct lib_ring_buffer), GFP_KERNEL); | |
458 | if (!chanb->buf) | |
459 | goto free_cpumask; | |
460 | ret = lib_ring_buffer_create(chanb->buf, chanb, -1); | |
461 | if (ret) | |
462 | goto free_bufs; | |
463 | } | |
464 | chanb->start_tsc = config->cb.ring_buffer_clock_read(chan); | |
465 | ||
466 | return 0; | |
467 | ||
468 | free_bufs: | |
469 | if (config->alloc == RING_BUFFER_ALLOC_PER_CPU) { | |
1e367326 MD |
470 | #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) |
471 | ret = cpuhp_state_remove_instance(lttng_rb_hp_prepare, | |
472 | &chanb->cpuhp_prepare.node); | |
473 | WARN_ON(ret); | |
474 | #else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */ | |
475 | #ifdef CONFIG_HOTPLUG_CPU | |
476 | put_online_cpus(); | |
477 | unregister_hotcpu_notifier(&chanb->cpu_hp_notifier); | |
478 | #endif | |
479 | #endif /* #else #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */ | |
f3bc08c5 | 480 | for_each_possible_cpu(i) { |
1e367326 MD |
481 | struct lib_ring_buffer *buf = |
482 | per_cpu_ptr(chanb->buf, i); | |
f3bc08c5 MD |
483 | |
484 | if (!buf->backend.allocated) | |
485 | continue; | |
486 | lib_ring_buffer_free(buf); | |
487 | } | |
f3bc08c5 MD |
488 | free_percpu(chanb->buf); |
489 | } else | |
490 | kfree(chanb->buf); | |
491 | free_cpumask: | |
492 | if (config->alloc == RING_BUFFER_ALLOC_PER_CPU) | |
493 | free_cpumask_var(chanb->cpumask); | |
494 | return -ENOMEM; | |
495 | } | |
496 | ||
497 | /** | |
498 | * channel_backend_unregister_notifiers - unregister notifiers | |
499 | * @chan: the channel | |
500 | * | |
501 | * Holds CPU hotplug. | |
502 | */ | |
503 | void channel_backend_unregister_notifiers(struct channel_backend *chanb) | |
504 | { | |
5a8fd222 | 505 | const struct lib_ring_buffer_config *config = &chanb->config; |
f3bc08c5 | 506 | |
1e367326 MD |
507 | if (config->alloc == RING_BUFFER_ALLOC_PER_CPU) { |
508 | #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) | |
509 | int ret; | |
510 | ||
511 | ret = cpuhp_state_remove_instance(lttng_rb_hp_prepare, | |
512 | &chanb->cpuhp_prepare.node); | |
513 | WARN_ON(ret); | |
514 | #else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */ | |
f3bc08c5 | 515 | unregister_hotcpu_notifier(&chanb->cpu_hp_notifier); |
1e367326 MD |
516 | #endif /* #else #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */ |
517 | } | |
f3bc08c5 MD |
518 | } |
519 | ||
520 | /** | |
521 | * channel_backend_free - destroy the channel | |
522 | * @chan: the channel | |
523 | * | |
524 | * Destroy all channel buffers and frees the channel. | |
525 | */ | |
526 | void channel_backend_free(struct channel_backend *chanb) | |
527 | { | |
5a8fd222 | 528 | const struct lib_ring_buffer_config *config = &chanb->config; |
f3bc08c5 MD |
529 | unsigned int i; |
530 | ||
531 | if (config->alloc == RING_BUFFER_ALLOC_PER_CPU) { | |
532 | for_each_possible_cpu(i) { | |
533 | struct lib_ring_buffer *buf = per_cpu_ptr(chanb->buf, i); | |
534 | ||
535 | if (!buf->backend.allocated) | |
536 | continue; | |
537 | lib_ring_buffer_free(buf); | |
538 | } | |
539 | free_cpumask_var(chanb->cpumask); | |
540 | free_percpu(chanb->buf); | |
541 | } else { | |
542 | struct lib_ring_buffer *buf = chanb->buf; | |
543 | ||
544 | CHAN_WARN_ON(chanb, !buf->backend.allocated); | |
545 | lib_ring_buffer_free(buf); | |
546 | kfree(buf); | |
547 | } | |
548 | } | |
549 | ||
550 | /** | |
551 | * lib_ring_buffer_write - write data to a ring_buffer buffer. | |
552 | * @bufb : buffer backend | |
553 | * @offset : offset within the buffer | |
554 | * @src : source address | |
555 | * @len : length to write | |
556 | * @pagecpy : page size copied so far | |
557 | */ | |
558 | void _lib_ring_buffer_write(struct lib_ring_buffer_backend *bufb, size_t offset, | |
bfe529f9 | 559 | const void *src, size_t len, size_t pagecpy) |
f3bc08c5 MD |
560 | { |
561 | struct channel_backend *chanb = &bufb->chan->backend; | |
5a8fd222 | 562 | const struct lib_ring_buffer_config *config = &chanb->config; |
f3bc08c5 MD |
563 | size_t sbidx, index; |
564 | struct lib_ring_buffer_backend_pages *rpages; | |
565 | unsigned long sb_bindex, id; | |
566 | ||
567 | do { | |
568 | len -= pagecpy; | |
569 | src += pagecpy; | |
570 | offset += pagecpy; | |
571 | sbidx = offset >> chanb->subbuf_size_order; | |
572 | index = (offset & (chanb->subbuf_size - 1)) >> PAGE_SHIFT; | |
573 | ||
574 | /* | |
575 | * Underlying layer should never ask for writes across | |
576 | * subbuffers. | |
577 | */ | |
578 | CHAN_WARN_ON(chanb, offset >= chanb->buf_size); | |
579 | ||
580 | pagecpy = min_t(size_t, len, PAGE_SIZE - (offset & ~PAGE_MASK)); | |
581 | id = bufb->buf_wsb[sbidx].id; | |
582 | sb_bindex = subbuffer_id_get_index(config, id); | |
583 | rpages = bufb->array[sb_bindex]; | |
584 | CHAN_WARN_ON(chanb, config->mode == RING_BUFFER_OVERWRITE | |
585 | && subbuffer_id_is_noref(config, id)); | |
586 | lib_ring_buffer_do_copy(config, | |
587 | rpages->p[index].virt | |
588 | + (offset & ~PAGE_MASK), | |
589 | src, pagecpy); | |
590 | } while (unlikely(len != pagecpy)); | |
591 | } | |
592 | EXPORT_SYMBOL_GPL(_lib_ring_buffer_write); | |
593 | ||
4ea00e4f JD |
594 | |
595 | /** | |
596 | * lib_ring_buffer_memset - write len bytes of c to a ring_buffer buffer. | |
597 | * @bufb : buffer backend | |
598 | * @offset : offset within the buffer | |
599 | * @c : the byte to write | |
600 | * @len : length to write | |
601 | * @pagecpy : page size copied so far | |
602 | */ | |
603 | void _lib_ring_buffer_memset(struct lib_ring_buffer_backend *bufb, | |
604 | size_t offset, | |
bfe529f9 | 605 | int c, size_t len, size_t pagecpy) |
4ea00e4f JD |
606 | { |
607 | struct channel_backend *chanb = &bufb->chan->backend; | |
5a8fd222 | 608 | const struct lib_ring_buffer_config *config = &chanb->config; |
4ea00e4f JD |
609 | size_t sbidx, index; |
610 | struct lib_ring_buffer_backend_pages *rpages; | |
611 | unsigned long sb_bindex, id; | |
612 | ||
613 | do { | |
614 | len -= pagecpy; | |
615 | offset += pagecpy; | |
616 | sbidx = offset >> chanb->subbuf_size_order; | |
617 | index = (offset & (chanb->subbuf_size - 1)) >> PAGE_SHIFT; | |
618 | ||
619 | /* | |
620 | * Underlying layer should never ask for writes across | |
621 | * subbuffers. | |
622 | */ | |
623 | CHAN_WARN_ON(chanb, offset >= chanb->buf_size); | |
624 | ||
625 | pagecpy = min_t(size_t, len, PAGE_SIZE - (offset & ~PAGE_MASK)); | |
626 | id = bufb->buf_wsb[sbidx].id; | |
627 | sb_bindex = subbuffer_id_get_index(config, id); | |
628 | rpages = bufb->array[sb_bindex]; | |
629 | CHAN_WARN_ON(chanb, config->mode == RING_BUFFER_OVERWRITE | |
630 | && subbuffer_id_is_noref(config, id)); | |
631 | lib_ring_buffer_do_memset(rpages->p[index].virt | |
632 | + (offset & ~PAGE_MASK), | |
633 | c, pagecpy); | |
634 | } while (unlikely(len != pagecpy)); | |
635 | } | |
636 | EXPORT_SYMBOL_GPL(_lib_ring_buffer_memset); | |
637 | ||
16f78f3a MD |
638 | /** |
639 | * lib_ring_buffer_strcpy - write string data to a ring_buffer buffer. | |
640 | * @bufb : buffer backend | |
641 | * @offset : offset within the buffer | |
642 | * @src : source address | |
643 | * @len : length to write | |
644 | * @pagecpy : page size copied so far | |
645 | * @pad : character to use for padding | |
646 | */ | |
647 | void _lib_ring_buffer_strcpy(struct lib_ring_buffer_backend *bufb, | |
648 | size_t offset, const char *src, size_t len, | |
649 | size_t pagecpy, int pad) | |
650 | { | |
651 | struct channel_backend *chanb = &bufb->chan->backend; | |
652 | const struct lib_ring_buffer_config *config = &chanb->config; | |
653 | size_t sbidx, index; | |
654 | struct lib_ring_buffer_backend_pages *rpages; | |
655 | unsigned long sb_bindex, id; | |
656 | int src_terminated = 0; | |
657 | ||
658 | CHAN_WARN_ON(chanb, !len); | |
659 | offset += pagecpy; | |
660 | do { | |
661 | len -= pagecpy; | |
662 | if (!src_terminated) | |
663 | src += pagecpy; | |
664 | sbidx = offset >> chanb->subbuf_size_order; | |
665 | index = (offset & (chanb->subbuf_size - 1)) >> PAGE_SHIFT; | |
666 | ||
667 | /* | |
668 | * Underlying layer should never ask for writes across | |
669 | * subbuffers. | |
670 | */ | |
671 | CHAN_WARN_ON(chanb, offset >= chanb->buf_size); | |
672 | ||
673 | pagecpy = min_t(size_t, len, PAGE_SIZE - (offset & ~PAGE_MASK)); | |
674 | id = bufb->buf_wsb[sbidx].id; | |
675 | sb_bindex = subbuffer_id_get_index(config, id); | |
676 | rpages = bufb->array[sb_bindex]; | |
677 | CHAN_WARN_ON(chanb, config->mode == RING_BUFFER_OVERWRITE | |
678 | && subbuffer_id_is_noref(config, id)); | |
679 | ||
680 | if (likely(!src_terminated)) { | |
681 | size_t count, to_copy; | |
682 | ||
683 | to_copy = pagecpy; | |
684 | if (pagecpy == len) | |
685 | to_copy--; /* Final '\0' */ | |
686 | count = lib_ring_buffer_do_strcpy(config, | |
687 | rpages->p[index].virt | |
688 | + (offset & ~PAGE_MASK), | |
689 | src, to_copy); | |
690 | offset += count; | |
691 | /* Padding */ | |
692 | if (unlikely(count < to_copy)) { | |
693 | size_t pad_len = to_copy - count; | |
694 | ||
695 | /* Next pages will have padding */ | |
696 | src_terminated = 1; | |
697 | lib_ring_buffer_do_memset(rpages->p[index].virt | |
698 | + (offset & ~PAGE_MASK), | |
699 | pad, pad_len); | |
700 | offset += pad_len; | |
701 | } | |
702 | } else { | |
703 | size_t pad_len; | |
704 | ||
705 | pad_len = pagecpy; | |
706 | if (pagecpy == len) | |
707 | pad_len--; /* Final '\0' */ | |
708 | lib_ring_buffer_do_memset(rpages->p[index].virt | |
709 | + (offset & ~PAGE_MASK), | |
710 | pad, pad_len); | |
711 | offset += pad_len; | |
712 | } | |
713 | } while (unlikely(len != pagecpy)); | |
714 | /* Ending '\0' */ | |
715 | lib_ring_buffer_do_memset(rpages->p[index].virt + (offset & ~PAGE_MASK), | |
716 | '\0', 1); | |
717 | } | |
718 | EXPORT_SYMBOL_GPL(_lib_ring_buffer_strcpy); | |
4ea00e4f JD |
719 | |
720 | /** | |
7b8ea3a5 | 721 | * lib_ring_buffer_copy_from_user_inatomic - write user data to a ring_buffer buffer. |
4ea00e4f JD |
722 | * @bufb : buffer backend |
723 | * @offset : offset within the buffer | |
724 | * @src : source address | |
725 | * @len : length to write | |
726 | * @pagecpy : page size copied so far | |
727 | * | |
728 | * This function deals with userspace pointers, it should never be called | |
729 | * directly without having the src pointer checked with access_ok() | |
730 | * previously. | |
731 | */ | |
7b8ea3a5 | 732 | void _lib_ring_buffer_copy_from_user_inatomic(struct lib_ring_buffer_backend *bufb, |
4ea00e4f JD |
733 | size_t offset, |
734 | const void __user *src, size_t len, | |
bfe529f9 | 735 | size_t pagecpy) |
4ea00e4f JD |
736 | { |
737 | struct channel_backend *chanb = &bufb->chan->backend; | |
5a8fd222 | 738 | const struct lib_ring_buffer_config *config = &chanb->config; |
4ea00e4f JD |
739 | size_t sbidx, index; |
740 | struct lib_ring_buffer_backend_pages *rpages; | |
741 | unsigned long sb_bindex, id; | |
742 | int ret; | |
743 | ||
744 | do { | |
745 | len -= pagecpy; | |
746 | src += pagecpy; | |
747 | offset += pagecpy; | |
748 | sbidx = offset >> chanb->subbuf_size_order; | |
749 | index = (offset & (chanb->subbuf_size - 1)) >> PAGE_SHIFT; | |
750 | ||
751 | /* | |
752 | * Underlying layer should never ask for writes across | |
753 | * subbuffers. | |
754 | */ | |
755 | CHAN_WARN_ON(chanb, offset >= chanb->buf_size); | |
756 | ||
757 | pagecpy = min_t(size_t, len, PAGE_SIZE - (offset & ~PAGE_MASK)); | |
758 | id = bufb->buf_wsb[sbidx].id; | |
759 | sb_bindex = subbuffer_id_get_index(config, id); | |
760 | rpages = bufb->array[sb_bindex]; | |
761 | CHAN_WARN_ON(chanb, config->mode == RING_BUFFER_OVERWRITE | |
762 | && subbuffer_id_is_noref(config, id)); | |
7b8ea3a5 | 763 | ret = lib_ring_buffer_do_copy_from_user_inatomic(rpages->p[index].virt |
4ea00e4f JD |
764 | + (offset & ~PAGE_MASK), |
765 | src, pagecpy) != 0; | |
766 | if (ret > 0) { | |
d87a9f03 | 767 | /* Copy failed. */ |
4ea00e4f JD |
768 | _lib_ring_buffer_memset(bufb, offset, 0, len, 0); |
769 | break; /* stop copy */ | |
770 | } | |
771 | } while (unlikely(len != pagecpy)); | |
772 | } | |
7b8ea3a5 | 773 | EXPORT_SYMBOL_GPL(_lib_ring_buffer_copy_from_user_inatomic); |
4ea00e4f | 774 | |
16f78f3a MD |
775 | /** |
776 | * lib_ring_buffer_strcpy_from_user_inatomic - write userspace string data to a ring_buffer buffer. | |
777 | * @bufb : buffer backend | |
778 | * @offset : offset within the buffer | |
779 | * @src : source address | |
780 | * @len : length to write | |
781 | * @pagecpy : page size copied so far | |
782 | * @pad : character to use for padding | |
783 | * | |
784 | * This function deals with userspace pointers, it should never be called | |
785 | * directly without having the src pointer checked with access_ok() | |
786 | * previously. | |
787 | */ | |
788 | void _lib_ring_buffer_strcpy_from_user_inatomic(struct lib_ring_buffer_backend *bufb, | |
789 | size_t offset, const char __user *src, size_t len, | |
790 | size_t pagecpy, int pad) | |
791 | { | |
792 | struct channel_backend *chanb = &bufb->chan->backend; | |
793 | const struct lib_ring_buffer_config *config = &chanb->config; | |
794 | size_t sbidx, index; | |
795 | struct lib_ring_buffer_backend_pages *rpages; | |
796 | unsigned long sb_bindex, id; | |
797 | int src_terminated = 0; | |
798 | ||
799 | offset += pagecpy; | |
800 | do { | |
801 | len -= pagecpy; | |
802 | if (!src_terminated) | |
803 | src += pagecpy; | |
804 | sbidx = offset >> chanb->subbuf_size_order; | |
805 | index = (offset & (chanb->subbuf_size - 1)) >> PAGE_SHIFT; | |
806 | ||
807 | /* | |
808 | * Underlying layer should never ask for writes across | |
809 | * subbuffers. | |
810 | */ | |
811 | CHAN_WARN_ON(chanb, offset >= chanb->buf_size); | |
812 | ||
813 | pagecpy = min_t(size_t, len, PAGE_SIZE - (offset & ~PAGE_MASK)); | |
814 | id = bufb->buf_wsb[sbidx].id; | |
815 | sb_bindex = subbuffer_id_get_index(config, id); | |
816 | rpages = bufb->array[sb_bindex]; | |
817 | CHAN_WARN_ON(chanb, config->mode == RING_BUFFER_OVERWRITE | |
818 | && subbuffer_id_is_noref(config, id)); | |
819 | ||
820 | if (likely(!src_terminated)) { | |
821 | size_t count, to_copy; | |
822 | ||
823 | to_copy = pagecpy; | |
824 | if (pagecpy == len) | |
825 | to_copy--; /* Final '\0' */ | |
826 | count = lib_ring_buffer_do_strcpy_from_user_inatomic(config, | |
827 | rpages->p[index].virt | |
828 | + (offset & ~PAGE_MASK), | |
829 | src, to_copy); | |
830 | offset += count; | |
831 | /* Padding */ | |
832 | if (unlikely(count < to_copy)) { | |
833 | size_t pad_len = to_copy - count; | |
834 | ||
835 | /* Next pages will have padding */ | |
836 | src_terminated = 1; | |
837 | lib_ring_buffer_do_memset(rpages->p[index].virt | |
838 | + (offset & ~PAGE_MASK), | |
839 | pad, pad_len); | |
840 | offset += pad_len; | |
841 | } | |
842 | } else { | |
843 | size_t pad_len; | |
844 | ||
845 | pad_len = pagecpy; | |
846 | if (pagecpy == len) | |
847 | pad_len--; /* Final '\0' */ | |
848 | lib_ring_buffer_do_memset(rpages->p[index].virt | |
849 | + (offset & ~PAGE_MASK), | |
850 | pad, pad_len); | |
851 | offset += pad_len; | |
852 | } | |
853 | } while (unlikely(len != pagecpy)); | |
854 | /* Ending '\0' */ | |
855 | lib_ring_buffer_do_memset(rpages->p[index].virt + (offset & ~PAGE_MASK), | |
856 | '\0', 1); | |
857 | } | |
858 | EXPORT_SYMBOL_GPL(_lib_ring_buffer_strcpy_from_user_inatomic); | |
859 | ||
f3bc08c5 MD |
860 | /** |
861 | * lib_ring_buffer_read - read data from ring_buffer_buffer. | |
862 | * @bufb : buffer backend | |
863 | * @offset : offset within the buffer | |
864 | * @dest : destination address | |
865 | * @len : length to copy to destination | |
866 | * | |
867 | * Should be protected by get_subbuf/put_subbuf. | |
868 | * Returns the length copied. | |
869 | */ | |
870 | size_t lib_ring_buffer_read(struct lib_ring_buffer_backend *bufb, size_t offset, | |
871 | void *dest, size_t len) | |
872 | { | |
873 | struct channel_backend *chanb = &bufb->chan->backend; | |
5a8fd222 | 874 | const struct lib_ring_buffer_config *config = &chanb->config; |
bfe529f9 | 875 | size_t index, pagecpy, orig_len; |
f3bc08c5 MD |
876 | struct lib_ring_buffer_backend_pages *rpages; |
877 | unsigned long sb_bindex, id; | |
878 | ||
879 | orig_len = len; | |
880 | offset &= chanb->buf_size - 1; | |
881 | index = (offset & (chanb->subbuf_size - 1)) >> PAGE_SHIFT; | |
882 | if (unlikely(!len)) | |
883 | return 0; | |
884 | for (;;) { | |
885 | pagecpy = min_t(size_t, len, PAGE_SIZE - (offset & ~PAGE_MASK)); | |
886 | id = bufb->buf_rsb.id; | |
887 | sb_bindex = subbuffer_id_get_index(config, id); | |
888 | rpages = bufb->array[sb_bindex]; | |
889 | CHAN_WARN_ON(chanb, config->mode == RING_BUFFER_OVERWRITE | |
890 | && subbuffer_id_is_noref(config, id)); | |
891 | memcpy(dest, rpages->p[index].virt + (offset & ~PAGE_MASK), | |
892 | pagecpy); | |
893 | len -= pagecpy; | |
894 | if (likely(!len)) | |
895 | break; | |
896 | dest += pagecpy; | |
897 | offset += pagecpy; | |
898 | index = (offset & (chanb->subbuf_size - 1)) >> PAGE_SHIFT; | |
899 | /* | |
900 | * Underlying layer should never ask for reads across | |
901 | * subbuffers. | |
902 | */ | |
903 | CHAN_WARN_ON(chanb, offset >= chanb->buf_size); | |
904 | } | |
905 | return orig_len; | |
906 | } | |
907 | EXPORT_SYMBOL_GPL(lib_ring_buffer_read); | |
908 | ||
909 | /** | |
910 | * __lib_ring_buffer_copy_to_user - read data from ring_buffer to userspace | |
911 | * @bufb : buffer backend | |
912 | * @offset : offset within the buffer | |
913 | * @dest : destination userspace address | |
914 | * @len : length to copy to destination | |
915 | * | |
916 | * Should be protected by get_subbuf/put_subbuf. | |
917 | * access_ok() must have been performed on dest addresses prior to call this | |
918 | * function. | |
919 | * Returns -EFAULT on error, 0 if ok. | |
920 | */ | |
921 | int __lib_ring_buffer_copy_to_user(struct lib_ring_buffer_backend *bufb, | |
922 | size_t offset, void __user *dest, size_t len) | |
923 | { | |
924 | struct channel_backend *chanb = &bufb->chan->backend; | |
5a8fd222 | 925 | const struct lib_ring_buffer_config *config = &chanb->config; |
f3bc08c5 | 926 | size_t index; |
88dfd899 | 927 | ssize_t pagecpy; |
f3bc08c5 MD |
928 | struct lib_ring_buffer_backend_pages *rpages; |
929 | unsigned long sb_bindex, id; | |
930 | ||
f3bc08c5 MD |
931 | offset &= chanb->buf_size - 1; |
932 | index = (offset & (chanb->subbuf_size - 1)) >> PAGE_SHIFT; | |
933 | if (unlikely(!len)) | |
934 | return 0; | |
935 | for (;;) { | |
936 | pagecpy = min_t(size_t, len, PAGE_SIZE - (offset & ~PAGE_MASK)); | |
937 | id = bufb->buf_rsb.id; | |
938 | sb_bindex = subbuffer_id_get_index(config, id); | |
939 | rpages = bufb->array[sb_bindex]; | |
940 | CHAN_WARN_ON(chanb, config->mode == RING_BUFFER_OVERWRITE | |
941 | && subbuffer_id_is_noref(config, id)); | |
942 | if (__copy_to_user(dest, | |
943 | rpages->p[index].virt + (offset & ~PAGE_MASK), | |
944 | pagecpy)) | |
945 | return -EFAULT; | |
946 | len -= pagecpy; | |
947 | if (likely(!len)) | |
948 | break; | |
949 | dest += pagecpy; | |
950 | offset += pagecpy; | |
951 | index = (offset & (chanb->subbuf_size - 1)) >> PAGE_SHIFT; | |
952 | /* | |
953 | * Underlying layer should never ask for reads across | |
954 | * subbuffers. | |
955 | */ | |
956 | CHAN_WARN_ON(chanb, offset >= chanb->buf_size); | |
957 | } | |
958 | return 0; | |
959 | } | |
960 | EXPORT_SYMBOL_GPL(__lib_ring_buffer_copy_to_user); | |
961 | ||
962 | /** | |
963 | * lib_ring_buffer_read_cstr - read a C-style string from ring_buffer. | |
964 | * @bufb : buffer backend | |
965 | * @offset : offset within the buffer | |
966 | * @dest : destination address | |
967 | * @len : destination's length | |
968 | * | |
61eb4c39 | 969 | * Return string's length, or -EINVAL on error. |
f3bc08c5 | 970 | * Should be protected by get_subbuf/put_subbuf. |
61eb4c39 | 971 | * Destination length should be at least 1 to hold '\0'. |
f3bc08c5 MD |
972 | */ |
973 | int lib_ring_buffer_read_cstr(struct lib_ring_buffer_backend *bufb, size_t offset, | |
974 | void *dest, size_t len) | |
975 | { | |
976 | struct channel_backend *chanb = &bufb->chan->backend; | |
5a8fd222 | 977 | const struct lib_ring_buffer_config *config = &chanb->config; |
f3bc08c5 MD |
978 | size_t index; |
979 | ssize_t pagecpy, pagelen, strpagelen, orig_offset; | |
980 | char *str; | |
981 | struct lib_ring_buffer_backend_pages *rpages; | |
982 | unsigned long sb_bindex, id; | |
983 | ||
984 | offset &= chanb->buf_size - 1; | |
985 | index = (offset & (chanb->subbuf_size - 1)) >> PAGE_SHIFT; | |
986 | orig_offset = offset; | |
61eb4c39 MD |
987 | if (unlikely(!len)) |
988 | return -EINVAL; | |
f3bc08c5 MD |
989 | for (;;) { |
990 | id = bufb->buf_rsb.id; | |
991 | sb_bindex = subbuffer_id_get_index(config, id); | |
992 | rpages = bufb->array[sb_bindex]; | |
993 | CHAN_WARN_ON(chanb, config->mode == RING_BUFFER_OVERWRITE | |
994 | && subbuffer_id_is_noref(config, id)); | |
995 | str = (char *)rpages->p[index].virt + (offset & ~PAGE_MASK); | |
996 | pagelen = PAGE_SIZE - (offset & ~PAGE_MASK); | |
997 | strpagelen = strnlen(str, pagelen); | |
998 | if (len) { | |
999 | pagecpy = min_t(size_t, len, strpagelen); | |
1000 | if (dest) { | |
1001 | memcpy(dest, str, pagecpy); | |
1002 | dest += pagecpy; | |
1003 | } | |
1004 | len -= pagecpy; | |
1005 | } | |
1006 | offset += strpagelen; | |
1007 | index = (offset & (chanb->subbuf_size - 1)) >> PAGE_SHIFT; | |
1008 | if (strpagelen < pagelen) | |
1009 | break; | |
1010 | /* | |
1011 | * Underlying layer should never ask for reads across | |
1012 | * subbuffers. | |
1013 | */ | |
1014 | CHAN_WARN_ON(chanb, offset >= chanb->buf_size); | |
1015 | } | |
1016 | if (dest && len) | |
1017 | ((char *)dest)[0] = 0; | |
1018 | return offset - orig_offset; | |
1019 | } | |
1020 | EXPORT_SYMBOL_GPL(lib_ring_buffer_read_cstr); | |
1021 | ||
1022 | /** | |
0112cb7b | 1023 | * lib_ring_buffer_read_get_pfn - Get a page frame number to read from |
f3bc08c5 MD |
1024 | * @bufb : buffer backend |
1025 | * @offset : offset within the buffer | |
1026 | * @virt : pointer to page address (output) | |
1027 | * | |
1028 | * Should be protected by get_subbuf/put_subbuf. | |
0112cb7b | 1029 | * Returns the pointer to the page frame number unsigned long. |
f3bc08c5 | 1030 | */ |
0112cb7b | 1031 | unsigned long *lib_ring_buffer_read_get_pfn(struct lib_ring_buffer_backend *bufb, |
f3bc08c5 MD |
1032 | size_t offset, void ***virt) |
1033 | { | |
1034 | size_t index; | |
1035 | struct lib_ring_buffer_backend_pages *rpages; | |
1036 | struct channel_backend *chanb = &bufb->chan->backend; | |
5a8fd222 | 1037 | const struct lib_ring_buffer_config *config = &chanb->config; |
f3bc08c5 MD |
1038 | unsigned long sb_bindex, id; |
1039 | ||
1040 | offset &= chanb->buf_size - 1; | |
1041 | index = (offset & (chanb->subbuf_size - 1)) >> PAGE_SHIFT; | |
1042 | id = bufb->buf_rsb.id; | |
1043 | sb_bindex = subbuffer_id_get_index(config, id); | |
1044 | rpages = bufb->array[sb_bindex]; | |
1045 | CHAN_WARN_ON(chanb, config->mode == RING_BUFFER_OVERWRITE | |
1046 | && subbuffer_id_is_noref(config, id)); | |
1047 | *virt = &rpages->p[index].virt; | |
0112cb7b | 1048 | return &rpages->p[index].pfn; |
f3bc08c5 | 1049 | } |
0112cb7b | 1050 | EXPORT_SYMBOL_GPL(lib_ring_buffer_read_get_pfn); |
f3bc08c5 MD |
1051 | |
1052 | /** | |
1053 | * lib_ring_buffer_read_offset_address - get address of a buffer location | |
1054 | * @bufb : buffer backend | |
1055 | * @offset : offset within the buffer. | |
1056 | * | |
1057 | * Return the address where a given offset is located (for read). | |
1058 | * Should be used to get the current subbuffer header pointer. Given we know | |
759d02c1 MD |
1059 | * it's never on a page boundary, it's safe to read/write directly |
1060 | * from/to this address, as long as the read/write is never bigger than a | |
1061 | * page size. | |
f3bc08c5 MD |
1062 | */ |
1063 | void *lib_ring_buffer_read_offset_address(struct lib_ring_buffer_backend *bufb, | |
1064 | size_t offset) | |
1065 | { | |
1066 | size_t index; | |
1067 | struct lib_ring_buffer_backend_pages *rpages; | |
1068 | struct channel_backend *chanb = &bufb->chan->backend; | |
5a8fd222 | 1069 | const struct lib_ring_buffer_config *config = &chanb->config; |
f3bc08c5 MD |
1070 | unsigned long sb_bindex, id; |
1071 | ||
1072 | offset &= chanb->buf_size - 1; | |
1073 | index = (offset & (chanb->subbuf_size - 1)) >> PAGE_SHIFT; | |
1074 | id = bufb->buf_rsb.id; | |
1075 | sb_bindex = subbuffer_id_get_index(config, id); | |
1076 | rpages = bufb->array[sb_bindex]; | |
1077 | CHAN_WARN_ON(chanb, config->mode == RING_BUFFER_OVERWRITE | |
1078 | && subbuffer_id_is_noref(config, id)); | |
1079 | return rpages->p[index].virt + (offset & ~PAGE_MASK); | |
1080 | } | |
1081 | EXPORT_SYMBOL_GPL(lib_ring_buffer_read_offset_address); | |
1082 | ||
1083 | /** | |
1084 | * lib_ring_buffer_offset_address - get address of a location within the buffer | |
1085 | * @bufb : buffer backend | |
1086 | * @offset : offset within the buffer. | |
1087 | * | |
1088 | * Return the address where a given offset is located. | |
1089 | * Should be used to get the current subbuffer header pointer. Given we know | |
1090 | * it's always at the beginning of a page, it's safe to write directly to this | |
1091 | * address, as long as the write is never bigger than a page size. | |
1092 | */ | |
1093 | void *lib_ring_buffer_offset_address(struct lib_ring_buffer_backend *bufb, | |
1094 | size_t offset) | |
1095 | { | |
1096 | size_t sbidx, index; | |
1097 | struct lib_ring_buffer_backend_pages *rpages; | |
1098 | struct channel_backend *chanb = &bufb->chan->backend; | |
5a8fd222 | 1099 | const struct lib_ring_buffer_config *config = &chanb->config; |
f3bc08c5 MD |
1100 | unsigned long sb_bindex, id; |
1101 | ||
1102 | offset &= chanb->buf_size - 1; | |
1103 | sbidx = offset >> chanb->subbuf_size_order; | |
1104 | index = (offset & (chanb->subbuf_size - 1)) >> PAGE_SHIFT; | |
1105 | id = bufb->buf_wsb[sbidx].id; | |
1106 | sb_bindex = subbuffer_id_get_index(config, id); | |
1107 | rpages = bufb->array[sb_bindex]; | |
1108 | CHAN_WARN_ON(chanb, config->mode == RING_BUFFER_OVERWRITE | |
1109 | && subbuffer_id_is_noref(config, id)); | |
1110 | return rpages->p[index].virt + (offset & ~PAGE_MASK); | |
1111 | } | |
1112 | EXPORT_SYMBOL_GPL(lib_ring_buffer_offset_address); |