2 * Public API and common code for kernel->userspace relay file support.
4 * Copyright (C) 2002-2005 - Tom Zanussi (zanussi@us.ibm.com), IBM Corp
5 * Copyright (C) 1999-2005 - Karim Yaghmour (karim@opersys.com)
6 * Copyright (C) 2008 - Mathieu Desnoyers (mathieu.desnoyers@polymtl.ca)
8 * Moved to kernel/relay.c by Paul Mundt, 2006.
9 * November 2006 - CPU hotplug support by Mathieu Desnoyers
10 * (mathieu.desnoyers@polymtl.ca)
12 * This file is released under the GPL.
14 //ust// #include <linux/errno.h>
15 //ust// #include <linux/stddef.h>
16 //ust// #include <linux/slab.h>
17 //ust// #include <linux/module.h>
18 //ust// #include <linux/string.h>
19 //ust// #include <linux/ltt-relay.h>
20 //ust// #include <linux/vmalloc.h>
21 //ust// #include <linux/mm.h>
22 //ust// #include <linux/cpu.h>
23 //ust// #include <linux/splice.h>
24 //ust// #include <linux/bitops.h>
28 #include "kernelcompat.h"
34 #include "tracercore.h"
37 /* list of open channels, for cpu hotplug */
38 static DEFINE_MUTEX(relay_channels_mutex
);
39 static LIST_HEAD(relay_channels
);
42 static struct dentry
*ltt_create_buf_file_callback(struct rchan_buf
*buf
);
45 * relay_alloc_buf - allocate a channel buffer
46 * @buf: the buffer struct
47 * @size: total size of the buffer
49 //ust// static int relay_alloc_buf(struct rchan_buf *buf, size_t *size)
51 //ust// unsigned int i, n_pages;
52 //ust// struct buf_page *buf_page, *n;
54 //ust// *size = PAGE_ALIGN(*size);
55 //ust// n_pages = *size >> PAGE_SHIFT;
57 //ust// INIT_LIST_HEAD(&buf->pages);
59 //ust// for (i = 0; i < n_pages; i++) {
60 //ust// buf_page = kmalloc_node(sizeof(*buf_page), GFP_KERNEL,
61 //ust// cpu_to_node(buf->cpu));
62 //ust// if (unlikely(!buf_page))
63 //ust// goto depopulate;
64 //ust// buf_page->page = alloc_pages_node(cpu_to_node(buf->cpu),
65 //ust// GFP_KERNEL | __GFP_ZERO, 0);
66 //ust// if (unlikely(!buf_page->page)) {
67 //ust// kfree(buf_page);
68 //ust// goto depopulate;
70 //ust// list_add_tail(&buf_page->list, &buf->pages);
71 //ust// buf_page->offset = (size_t)i << PAGE_SHIFT;
72 //ust// buf_page->buf = buf;
73 //ust// set_page_private(buf_page->page, (unsigned long)buf_page);
75 //ust// buf->wpage = buf_page;
76 //ust// buf->hpage[0] = buf_page;
77 //ust// buf->hpage[1] = buf_page;
78 //ust// buf->rpage = buf_page;
81 //ust// buf->page_count = n_pages;
85 //ust// list_for_each_entry_safe(buf_page, n, &buf->pages, list) {
86 //ust// list_del_init(&buf_page->list);
87 //ust// __free_page(buf_page->page);
88 //ust// kfree(buf_page);
90 //ust// return -ENOMEM;
93 static int relay_alloc_buf(struct rchan_buf
*buf
, size_t *size
)
96 struct buf_page
*buf_page
, *n
;
102 *size
= PAGE_ALIGN(*size
);
104 result
= shmid
= shmget(getpid(), *size
, IPC_CREAT
| IPC_EXCL
| 0700);
110 ptr
= shmat(shmid
, NULL
, 0);
111 if(ptr
== (void *) -1) {
116 /* Already mark the shared memory for destruction. This will occur only
117 * when all users have detached.
119 result
= shmctl(shmid
, IPC_RMID
, NULL
);
126 buf
->buf_size
= *size
;
131 result
= shmctl(shmid
, IPC_RMID
, NULL
);
140 * relay_create_buf - allocate and initialize a channel buffer
141 * @chan: the relay channel
142 * @cpu: cpu the buffer belongs to
144 * Returns channel buffer if successful, %NULL otherwise.
146 static struct rchan_buf
*relay_create_buf(struct rchan
*chan
)
149 struct rchan_buf
*buf
= kzalloc(sizeof(struct rchan_buf
), GFP_KERNEL
);
154 ret
= relay_alloc_buf(buf
, &chan
->alloc_size
);
159 kref_get(&buf
->chan
->kref
);
168 * relay_destroy_channel - free the channel struct
169 * @kref: target kernel reference that contains the relay channel
171 * Should only be called from kref_put().
173 static void relay_destroy_channel(struct kref
*kref
)
175 struct rchan
*chan
= container_of(kref
, struct rchan
, kref
);
180 * relay_destroy_buf - destroy an rchan_buf struct and associated buffer
181 * @buf: the buffer struct
183 static void relay_destroy_buf(struct rchan_buf
*buf
)
185 struct rchan
*chan
= buf
->chan
;
186 struct buf_page
*buf_page
, *n
;
189 result
= munmap(buf
->buf_data
, buf
->buf_size
);
194 //ust// chan->buf[buf->cpu] = NULL;
196 kref_put(&chan
->kref
, relay_destroy_channel
);
200 * relay_remove_buf - remove a channel buffer
201 * @kref: target kernel reference that contains the relay buffer
203 * Removes the file from the fileystem, which also frees the
204 * rchan_buf_struct and the channel buffer. Should only be called from
207 static void relay_remove_buf(struct kref
*kref
)
209 struct rchan_buf
*buf
= container_of(kref
, struct rchan_buf
, kref
);
210 //ust// buf->chan->cb->remove_buf_file(buf);
211 relay_destroy_buf(buf
);
215 * High-level relay kernel API and associated functions.
219 * rchan_callback implementations defining default channel behavior. Used
220 * in place of corresponding NULL values in client callback struct.
224 * create_buf_file_create() default callback. Does nothing.
226 static struct dentry
*create_buf_file_default_callback(const char *filename
,
227 struct dentry
*parent
,
229 struct rchan_buf
*buf
)
235 * remove_buf_file() default callback. Does nothing.
237 static int remove_buf_file_default_callback(struct dentry
*dentry
)
243 * wakeup_readers - wake up readers waiting on a channel
244 * @data: contains the channel buffer
246 * This is the timer function used to defer reader waking.
248 //ust// static void wakeup_readers(unsigned long data)
250 //ust// struct rchan_buf *buf = (struct rchan_buf *)data;
251 //ust// wake_up_interruptible(&buf->read_wait);
255 * __relay_reset - reset a channel buffer
256 * @buf: the channel buffer
257 * @init: 1 if this is a first-time initialization
259 * See relay_reset() for description of effect.
261 static void __relay_reset(struct rchan_buf
*buf
, unsigned int init
)
264 //ust// init_waitqueue_head(&buf->read_wait);
265 kref_init(&buf
->kref
);
266 //ust// setup_timer(&buf->timer, wakeup_readers, (unsigned long)buf);
268 //ust// del_timer_sync(&buf->timer);
274 * relay_open_buf - create a new relay channel buffer
276 * used by relay_open() and CPU hotplug.
278 static struct rchan_buf
*relay_open_buf(struct rchan
*chan
)
280 struct rchan_buf
*buf
= NULL
;
281 struct dentry
*dentry
;
282 //ust// char *tmpname;
284 //ust// tmpname = kzalloc(NAME_MAX + 1, GFP_KERNEL);
285 //ust// if (!tmpname)
287 //ust// snprintf(tmpname, NAME_MAX, "%s%d", chan->base_filename, cpu);
289 buf
= relay_create_buf(chan
);
293 __relay_reset(buf
, 1);
295 /* Create file in fs */
296 //ust// dentry = chan->cb->create_buf_file(tmpname, chan->parent, S_IRUSR,
299 ltt_create_buf_file_callback(buf
); // ust //
302 //ust// goto free_buf;
304 //ust// buf->dentry = dentry;
309 relay_destroy_buf(buf
);
312 //ust// kfree(tmpname);
318 * relay_close_buf - close a channel buffer
319 * @buf: channel buffer
321 * Marks the buffer finalized and restores the default callbacks.
322 * The channel buffer and channel buffer data structure are then freed
323 * automatically when the last reference is given up.
325 static void relay_close_buf(struct rchan_buf
*buf
)
327 //ust// del_timer_sync(&buf->timer);
328 kref_put(&buf
->kref
, relay_remove_buf
);
331 //ust// static void setup_callbacks(struct rchan *chan,
332 //ust// struct rchan_callbacks *cb)
335 //ust// chan->cb = &default_channel_callbacks;
339 //ust// if (!cb->create_buf_file)
340 //ust// cb->create_buf_file = create_buf_file_default_callback;
341 //ust// if (!cb->remove_buf_file)
342 //ust// cb->remove_buf_file = remove_buf_file_default_callback;
343 //ust// chan->cb = cb;
347 * relay_hotcpu_callback - CPU hotplug callback
348 * @nb: notifier block
349 * @action: hotplug action to take
352 * Returns the success/failure of the operation. (%NOTIFY_OK, %NOTIFY_BAD)
354 //ust// static int __cpuinit relay_hotcpu_callback(struct notifier_block *nb,
355 //ust// unsigned long action,
358 //ust// unsigned int hotcpu = (unsigned long)hcpu;
359 //ust// struct rchan *chan;
361 //ust// switch (action) {
362 //ust// case CPU_UP_PREPARE:
363 //ust// case CPU_UP_PREPARE_FROZEN:
364 //ust// mutex_lock(&relay_channels_mutex);
365 //ust// list_for_each_entry(chan, &relay_channels, list) {
366 //ust// if (chan->buf[hotcpu])
368 //ust// chan->buf[hotcpu] = relay_open_buf(chan, hotcpu);
369 //ust// if (!chan->buf[hotcpu]) {
370 //ust// printk(KERN_ERR
371 //ust// "relay_hotcpu_callback: cpu %d buffer "
372 //ust// "creation failed\n", hotcpu);
373 //ust// mutex_unlock(&relay_channels_mutex);
374 //ust// return NOTIFY_BAD;
377 //ust// mutex_unlock(&relay_channels_mutex);
379 //ust// case CPU_DEAD:
380 //ust// case CPU_DEAD_FROZEN:
381 //ust// /* No need to flush the cpu : will be flushed upon
382 //ust// * final relay_flush() call. */
385 //ust// return NOTIFY_OK;
389 * ltt_relay_open - create a new relay channel
390 * @base_filename: base name of files to create
391 * @parent: dentry of parent directory, %NULL for root directory
392 * @subbuf_size: size of sub-buffers
393 * @n_subbufs: number of sub-buffers
394 * @cb: client callback functions
395 * @private_data: user-defined data
397 * Returns channel pointer if successful, %NULL otherwise.
399 * Creates a channel buffer for each cpu using the sizes and
400 * attributes specified. The created channel buffer files
401 * will be named base_filename0...base_filenameN-1. File
402 * permissions will be %S_IRUSR.
404 struct rchan
*ltt_relay_open(const char *base_filename
,
405 struct dentry
*parent
,
412 //ust// if (!base_filename)
415 if (!(subbuf_size
&& n_subbufs
))
418 chan
= kzalloc(sizeof(struct rchan
), GFP_KERNEL
);
422 chan
->version
= LTT_RELAY_CHANNEL_VERSION
;
423 chan
->n_subbufs
= n_subbufs
;
424 chan
->subbuf_size
= subbuf_size
;
425 chan
->subbuf_size_order
= get_count_order(subbuf_size
);
426 chan
->alloc_size
= FIX_SIZE(subbuf_size
* n_subbufs
);
427 chan
->parent
= parent
;
428 chan
->private_data
= private_data
;
429 //ust// strlcpy(chan->base_filename, base_filename, NAME_MAX);
430 //ust// setup_callbacks(chan, cb);
431 kref_init(&chan
->kref
);
433 mutex_lock(&relay_channels_mutex
);
434 //ust// for_each_online_cpu(i) {
435 chan
->buf
= relay_open_buf(chan
);
439 list_add(&chan
->list
, &relay_channels
);
440 mutex_unlock(&relay_channels_mutex
);
445 //ust// for_each_possible_cpu(i) {
446 //ust// if (!chan->buf[i])
448 //ust// relay_close_buf(chan->buf[i]);
452 kref_put(&chan
->kref
, relay_destroy_channel
);
453 mutex_unlock(&relay_channels_mutex
);
456 //ust// EXPORT_SYMBOL_GPL(ltt_relay_open);
459 * ltt_relay_close - close the channel
462 * Closes all channel buffers and frees the channel.
464 void ltt_relay_close(struct rchan
*chan
)
471 mutex_lock(&relay_channels_mutex
);
472 //ust// for_each_possible_cpu(i)
474 relay_close_buf(chan
->buf
);
476 list_del(&chan
->list
);
477 kref_put(&chan
->kref
, relay_destroy_channel
);
478 mutex_unlock(&relay_channels_mutex
);
480 //ust// EXPORT_SYMBOL_GPL(ltt_relay_close);
483 * Start iteration at the previous element. Skip the real list head.
485 //ust// struct buf_page *ltt_relay_find_prev_page(struct rchan_buf *buf,
486 //ust// struct buf_page *page, size_t offset, ssize_t diff_offset)
488 //ust// struct buf_page *iter;
489 //ust// size_t orig_iter_off;
490 //ust// unsigned int i = 0;
492 //ust// orig_iter_off = page->offset;
493 //ust// list_for_each_entry_reverse(iter, &page->list, list) {
495 //ust// * Skip the real list head.
497 //ust// if (&iter->list == &buf->pages)
500 //ust// if (offset >= iter->offset
501 //ust// && offset < iter->offset + PAGE_SIZE) {
502 //ust// #ifdef CONFIG_LTT_RELAY_CHECK_RANDOM_ACCESS
504 //ust// printk(KERN_WARNING
505 //ust// "Backward random access detected in "
506 //ust// "ltt_relay. Iterations %u, "
507 //ust// "offset %zu, orig iter->off %zu, "
508 //ust// "iter->off %zu diff_offset %zd.\n", i,
509 //ust// offset, orig_iter_off, iter->offset,
510 //ust// diff_offset);
520 //ust// EXPORT_SYMBOL_GPL(ltt_relay_find_prev_page);
523 * Start iteration at the next element. Skip the real list head.
525 //ust// struct buf_page *ltt_relay_find_next_page(struct rchan_buf *buf,
526 //ust// struct buf_page *page, size_t offset, ssize_t diff_offset)
528 //ust// struct buf_page *iter;
529 //ust// unsigned int i = 0;
530 //ust// size_t orig_iter_off;
532 //ust// orig_iter_off = page->offset;
533 //ust// list_for_each_entry(iter, &page->list, list) {
535 //ust// * Skip the real list head.
537 //ust// if (&iter->list == &buf->pages)
540 //ust// if (offset >= iter->offset
541 //ust// && offset < iter->offset + PAGE_SIZE) {
542 //ust// #ifdef CONFIG_LTT_RELAY_CHECK_RANDOM_ACCESS
544 //ust// printk(KERN_WARNING
545 //ust// "Forward random access detected in "
546 //ust// "ltt_relay. Iterations %u, "
547 //ust// "offset %zu, orig iter->off %zu, "
548 //ust// "iter->off %zu diff_offset %zd.\n", i,
549 //ust// offset, orig_iter_off, iter->offset,
550 //ust// diff_offset);
560 //ust// EXPORT_SYMBOL_GPL(ltt_relay_find_next_page);
563 * ltt_relay_write - write data to a ltt_relay buffer.
565 * @offset : offset within the buffer
566 * @src : source address
567 * @len : length to write
568 * @page : cached buffer page
569 * @pagecpy : page size copied so far
571 void _ltt_relay_write(struct rchan_buf
*buf
, size_t offset
,
572 const void *src
, size_t len
, ssize_t cpy
)
579 * Underlying layer should never ask for writes across
582 WARN_ON(offset
>= buf
->buf_size
);
584 cpy
= min_t(size_t, len
, buf
->buf_size
- offset
);
585 ltt_relay_do_copy(buf
->buf_data
+ offset
, src
, cpy
);
586 } while (unlikely(len
!= cpy
));
588 //ust// EXPORT_SYMBOL_GPL(_ltt_relay_write);
591 * ltt_relay_read - read data from ltt_relay_buffer.
593 * @offset : offset within the buffer
594 * @dest : destination address
595 * @len : length to write
597 //ust// int ltt_relay_read(struct rchan_buf *buf, size_t offset,
598 //ust// void *dest, size_t len)
600 //ust// struct buf_page *page;
601 //ust// ssize_t pagecpy, orig_len;
603 //ust// orig_len = len;
604 //ust// offset &= buf->chan->alloc_size - 1;
605 //ust// page = buf->rpage;
606 //ust// if (unlikely(!len))
609 //ust// page = ltt_relay_cache_page(buf, &buf->rpage, page, offset);
610 //ust// pagecpy = min_t(size_t, len, PAGE_SIZE - (offset & ~PAGE_MASK));
611 //ust// memcpy(dest, page_address(page->page) + (offset & ~PAGE_MASK),
613 //ust// len -= pagecpy;
614 //ust// if (likely(!len))
616 //ust// dest += pagecpy;
617 //ust// offset += pagecpy;
619 //ust// * Underlying layer should never ask for reads across
620 //ust// * subbuffers.
622 //ust// WARN_ON(offset >= buf->chan->alloc_size);
624 //ust// return orig_len;
626 //ust// EXPORT_SYMBOL_GPL(ltt_relay_read);
629 * ltt_relay_read_get_page - Get a whole page to read from
631 * @offset : offset within the buffer
633 //ust// struct buf_page *ltt_relay_read_get_page(struct rchan_buf *buf, size_t offset)
635 //ust// struct buf_page *page;
637 //ust// offset &= buf->chan->alloc_size - 1;
638 //ust// page = buf->rpage;
639 //ust// page = ltt_relay_cache_page(buf, &buf->rpage, page, offset);
642 //ust// EXPORT_SYMBOL_GPL(ltt_relay_read_get_page);
645 * ltt_relay_offset_address - get address of a location within the buffer
647 * @offset : offset within the buffer.
649 * Return the address where a given offset is located.
650 * Should be used to get the current subbuffer header pointer. Given we know
651 * it's never on a page boundary, it's safe to write directly to this address,
652 * as long as the write is never bigger than a page size.
654 void *ltt_relay_offset_address(struct rchan_buf
*buf
, size_t offset
)
656 //ust// struct buf_page *page;
657 //ust// unsigned int odd;
659 //ust// offset &= buf->chan->alloc_size - 1;
660 //ust// odd = !!(offset & buf->chan->subbuf_size);
661 //ust// page = buf->hpage[odd];
662 //ust// if (offset < page->offset || offset >= page->offset + PAGE_SIZE)
663 //ust// buf->hpage[odd] = page = buf->wpage;
664 //ust// page = ltt_relay_cache_page(buf, &buf->hpage[odd], page, offset);
665 //ust// return page_address(page->page) + (offset & ~PAGE_MASK);
666 return ((char *)buf
->buf_data
)+offset
;
669 //ust// EXPORT_SYMBOL_GPL(ltt_relay_offset_address);
672 * relay_file_open - open file op for relay files
676 * Increments the channel buffer refcount.
678 //ust// static int relay_file_open(struct inode *inode, struct file *filp)
680 //ust// struct rchan_buf *buf = inode->i_private;
681 //ust// kref_get(&buf->kref);
682 //ust// filp->private_data = buf;
684 //ust// return nonseekable_open(inode, filp);
688 * relay_file_release - release file op for relay files
692 * Decrements the channel refcount, as the filesystem is
693 * no longer using it.
695 //ust// static int relay_file_release(struct inode *inode, struct file *filp)
697 //ust// struct rchan_buf *buf = filp->private_data;
698 //ust// kref_put(&buf->kref, relay_remove_buf);
703 //ust// const struct file_operations ltt_relay_file_operations = {
704 //ust// .open = relay_file_open,
705 //ust// .release = relay_file_release,
707 //ust// EXPORT_SYMBOL_GPL(ltt_relay_file_operations);
709 //ust// static __init int relay_init(void)
711 //ust// hotcpu_notifier(relay_hotcpu_callback, 5);
715 //ust// module_init(relay_init);
719 * (C) Copyright 2005-2008 - Mathieu Desnoyers (mathieu.desnoyers@polymtl.ca)
721 * LTTng lockless buffer space management (reader/writer).
724 * Mathieu Desnoyers (mathieu.desnoyers@polymtl.ca)
726 * Inspired from LTT :
727 * Karim Yaghmour (karim@opersys.com)
728 * Tom Zanussi (zanussi@us.ibm.com)
729 * Bob Wisniewski (bob@watson.ibm.com)
731 * Bob Wisniewski (bob@watson.ibm.com)
735 * 19/10/05, Complete lockless mechanism.
736 * 27/05/05, Modular redesign and rewrite.
738 * Userspace reader semantic :
739 * while (poll fd != POLLHUP) {
740 * - ioctl RELAY_GET_SUBBUF_SIZE
743 * - splice 1 subbuffer worth of data to a pipe
744 * - splice the data from pipe to disk/network
745 * - ioctl PUT_SUBBUF, check error value
746 * if err val < 0, previous subbuffer was corrupted.
751 //ust// #include <linux/time.h>
752 //ust// #include <linux/ltt-tracer.h>
753 //ust// #include <linux/ltt-relay.h>
754 //ust// #include <linux/module.h>
755 //ust// #include <linux/string.h>
756 //ust// #include <linux/slab.h>
757 //ust// #include <linux/init.h>
758 //ust// #include <linux/rcupdate.h>
759 //ust// #include <linux/sched.h>
760 //ust// #include <linux/bitops.h>
761 //ust// #include <linux/fs.h>
762 //ust// #include <linux/smp_lock.h>
763 //ust// #include <linux/debugfs.h>
764 //ust// #include <linux/stat.h>
765 //ust// #include <linux/cpu.h>
766 //ust// #include <linux/pipe_fs_i.h>
767 //ust// #include <linux/splice.h>
768 //ust// #include <asm/atomic.h>
769 //ust// #include <asm/local.h>
772 #define printk_dbg(fmt, args...) printk(fmt, args)
774 #define printk_dbg(fmt, args...)
778 * Last TSC comparison functions. Check if the current TSC overflows
779 * LTT_TSC_BITS bits from the last TSC read. Reads and writes last_tsc
783 #if (BITS_PER_LONG == 32)
784 static inline void save_last_tsc(struct ltt_channel_buf_struct
*ltt_buf
,
787 ltt_buf
->last_tsc
= (unsigned long)(tsc
>> LTT_TSC_BITS
);
790 static inline int last_tsc_overflow(struct ltt_channel_buf_struct
*ltt_buf
,
793 unsigned long tsc_shifted
= (unsigned long)(tsc
>> LTT_TSC_BITS
);
795 if (unlikely((tsc_shifted
- ltt_buf
->last_tsc
)))
801 static inline void save_last_tsc(struct ltt_channel_buf_struct
*ltt_buf
,
804 ltt_buf
->last_tsc
= (unsigned long)tsc
;
807 static inline int last_tsc_overflow(struct ltt_channel_buf_struct
*ltt_buf
,
810 if (unlikely((tsc
- ltt_buf
->last_tsc
) >> LTT_TSC_BITS
))
817 //ust// static struct file_operations ltt_file_operations;
820 * A switch is done during tracing or as a final flush after tracing (so it
821 * won't write in the new sub-buffer).
823 enum force_switch_mode
{ FORCE_ACTIVE
, FORCE_FLUSH
};
825 static int ltt_relay_create_buffer(struct ltt_trace_struct
*trace
,
826 struct ltt_channel_struct
*ltt_chan
,
827 struct rchan_buf
*buf
,
828 unsigned int n_subbufs
);
830 static void ltt_relay_destroy_buffer(struct ltt_channel_struct
*ltt_chan
);
832 static void ltt_force_switch(struct rchan_buf
*buf
,
833 enum force_switch_mode mode
);
838 static void ltt_buffer_begin_callback(struct rchan_buf
*buf
,
839 u64 tsc
, unsigned int subbuf_idx
)
841 struct ltt_channel_struct
*channel
=
842 (struct ltt_channel_struct
*)buf
->chan
->private_data
;
843 struct ltt_subbuffer_header
*header
=
844 (struct ltt_subbuffer_header
*)
845 ltt_relay_offset_address(buf
,
846 subbuf_idx
* buf
->chan
->subbuf_size
);
848 header
->cycle_count_begin
= tsc
;
849 header
->lost_size
= 0xFFFFFFFF; /* for debugging */
850 header
->buf_size
= buf
->chan
->subbuf_size
;
851 ltt_write_trace_header(channel
->trace
, header
);
855 * offset is assumed to never be 0 here : never deliver a completely empty
856 * subbuffer. The lost size is between 0 and subbuf_size-1.
858 static notrace
void ltt_buffer_end_callback(struct rchan_buf
*buf
,
859 u64 tsc
, unsigned int offset
, unsigned int subbuf_idx
)
861 struct ltt_channel_struct
*channel
=
862 (struct ltt_channel_struct
*)buf
->chan
->private_data
;
863 struct ltt_channel_buf_struct
*ltt_buf
= channel
->buf
;
864 struct ltt_subbuffer_header
*header
=
865 (struct ltt_subbuffer_header
*)
866 ltt_relay_offset_address(buf
,
867 subbuf_idx
* buf
->chan
->subbuf_size
);
869 header
->lost_size
= SUBBUF_OFFSET((buf
->chan
->subbuf_size
- offset
),
871 header
->cycle_count_end
= tsc
;
872 header
->events_lost
= local_read(<t_buf
->events_lost
);
873 header
->subbuf_corrupt
= local_read(<t_buf
->corrupted_subbuffers
);
877 static notrace
void ltt_deliver(struct rchan_buf
*buf
, unsigned int subbuf_idx
,
880 struct ltt_channel_struct
*channel
=
881 (struct ltt_channel_struct
*)buf
->chan
->private_data
;
882 struct ltt_channel_buf_struct
*ltt_buf
= channel
->buf
;
884 atomic_set(<t_buf
->wakeup_readers
, 1);
887 static struct dentry
*ltt_create_buf_file_callback(struct rchan_buf
*buf
)
889 struct ltt_channel_struct
*ltt_chan
;
891 //ust// struct dentry *dentry;
893 ltt_chan
= buf
->chan
->private_data
;
894 err
= ltt_relay_create_buffer(ltt_chan
->trace
, ltt_chan
, buf
, buf
->chan
->n_subbufs
);
898 //ust// dentry = debugfs_create_file(filename, mode, parent, buf,
899 //ust// <t_file_operations);
902 //ust// return dentry;
905 ltt_relay_destroy_buffer(ltt_chan
);
909 static int ltt_remove_buf_file_callback(struct rchan_buf
*buf
)
911 //ust// struct rchan_buf *buf = dentry->d_inode->i_private;
912 struct ltt_channel_struct
*ltt_chan
= buf
->chan
->private_data
;
914 //ust// debugfs_remove(dentry);
915 ltt_relay_destroy_buffer(ltt_chan
);
923 * This must be done after the trace is removed from the RCU list so that there
924 * are no stalled writers.
926 //ust// static void ltt_relay_wake_writers(struct ltt_channel_buf_struct *ltt_buf)
929 //ust// if (waitqueue_active(<t_buf->write_wait))
930 //ust// wake_up_interruptible(<t_buf->write_wait);
934 * This function should not be called from NMI interrupt context
936 static notrace
void ltt_buf_unfull(struct rchan_buf
*buf
,
937 unsigned int subbuf_idx
,
940 //ust// struct ltt_channel_struct *ltt_channel =
941 //ust// (struct ltt_channel_struct *)buf->chan->private_data;
942 //ust// struct ltt_channel_buf_struct *ltt_buf = ltt_channel->buf;
944 //ust// ltt_relay_wake_writers(ltt_buf);
948 * ltt_open - open file op for ltt files
949 * @inode: opened inode
952 * Open implementation. Makes sure only one open instance of a buffer is
953 * done at a given moment.
955 //ust// static int ltt_open(struct inode *inode, struct file *file)
957 //ust// struct rchan_buf *buf = inode->i_private;
958 //ust// struct ltt_channel_struct *ltt_channel =
959 //ust// (struct ltt_channel_struct *)buf->chan->private_data;
960 //ust// struct ltt_channel_buf_struct *ltt_buf =
961 //ust// percpu_ptr(ltt_channel->buf, buf->cpu);
963 //ust// if (!atomic_long_add_unless(<t_buf->active_readers, 1, 1))
964 //ust// return -EBUSY;
965 //ust// return ltt_relay_file_operations.open(inode, file);
969 * ltt_release - release file op for ltt files
970 * @inode: opened inode
973 * Release implementation.
975 //ust// static int ltt_release(struct inode *inode, struct file *file)
977 //ust// struct rchan_buf *buf = inode->i_private;
978 //ust// struct ltt_channel_struct *ltt_channel =
979 //ust// (struct ltt_channel_struct *)buf->chan->private_data;
980 //ust// struct ltt_channel_buf_struct *ltt_buf =
981 //ust// percpu_ptr(ltt_channel->buf, buf->cpu);
984 //ust// WARN_ON(atomic_long_read(<t_buf->active_readers) != 1);
985 //ust// atomic_long_dec(<t_buf->active_readers);
986 //ust// ret = ltt_relay_file_operations.release(inode, file);
987 //ust// WARN_ON(ret);
992 * ltt_poll - file op for ltt files
996 * Poll implementation.
998 //ust// static unsigned int ltt_poll(struct file *filp, poll_table *wait)
1000 //ust// unsigned int mask = 0;
1001 //ust// struct inode *inode = filp->f_dentry->d_inode;
1002 //ust// struct rchan_buf *buf = inode->i_private;
1003 //ust// struct ltt_channel_struct *ltt_channel =
1004 //ust// (struct ltt_channel_struct *)buf->chan->private_data;
1005 //ust// struct ltt_channel_buf_struct *ltt_buf =
1006 //ust// percpu_ptr(ltt_channel->buf, buf->cpu);
1008 //ust// if (filp->f_mode & FMODE_READ) {
1009 //ust// poll_wait_set_exclusive(wait);
1010 //ust// poll_wait(filp, &buf->read_wait, wait);
1012 //ust// WARN_ON(atomic_long_read(<t_buf->active_readers) != 1);
1013 //ust// if (SUBBUF_TRUNC(local_read(<t_buf->offset),
1015 //ust// - SUBBUF_TRUNC(atomic_long_read(<t_buf->consumed),
1018 //ust// if (buf->finalized)
1019 //ust// return POLLHUP;
1023 //ust// struct rchan *rchan =
1024 //ust// ltt_channel->trans_channel_data;
1025 //ust// if (SUBBUF_TRUNC(local_read(<t_buf->offset),
1027 //ust// - SUBBUF_TRUNC(atomic_long_read(
1028 //ust// <t_buf->consumed),
1030 //ust// >= rchan->alloc_size)
1031 //ust// return POLLPRI | POLLRDBAND;
1033 //ust// return POLLIN | POLLRDNORM;
1036 //ust// return mask;
1039 int ltt_do_get_subbuf(struct rchan_buf
*buf
, struct ltt_channel_buf_struct
*ltt_buf
, long *pconsumed_old
)
1041 struct ltt_channel_struct
*ltt_channel
= (struct ltt_channel_struct
*)buf
->chan
->private_data
;
1042 long consumed_old
, consumed_idx
, commit_count
, write_offset
;
1043 consumed_old
= atomic_long_read(<t_buf
->consumed
);
1044 consumed_idx
= SUBBUF_INDEX(consumed_old
, buf
->chan
);
1045 commit_count
= local_read(<t_buf
->commit_count
[consumed_idx
]);
1047 * Make sure we read the commit count before reading the buffer
1048 * data and the write offset. Correct consumed offset ordering
1049 * wrt commit count is insured by the use of cmpxchg to update
1050 * the consumed offset.
1053 write_offset
= local_read(<t_buf
->offset
);
1055 * Check that the subbuffer we are trying to consume has been
1056 * already fully committed.
1058 if (((commit_count
- buf
->chan
->subbuf_size
)
1059 & ltt_channel
->commit_count_mask
)
1060 - (BUFFER_TRUNC(consumed_old
, buf
->chan
)
1061 >> ltt_channel
->n_subbufs_order
)
1066 * Check that we are not about to read the same subbuffer in
1067 * which the writer head is.
1069 if ((SUBBUF_TRUNC(write_offset
, buf
->chan
)
1070 - SUBBUF_TRUNC(consumed_old
, buf
->chan
))
1075 *pconsumed_old
= consumed_old
;
1079 int ltt_do_put_subbuf(struct rchan_buf
*buf
, struct ltt_channel_buf_struct
*ltt_buf
, u32 uconsumed_old
)
1081 long consumed_new
, consumed_old
;
1083 consumed_old
= atomic_long_read(<t_buf
->consumed
);
1084 consumed_old
= consumed_old
& (~0xFFFFFFFFL
);
1085 consumed_old
= consumed_old
| uconsumed_old
;
1086 consumed_new
= SUBBUF_ALIGN(consumed_old
, buf
->chan
);
1088 spin_lock(<t_buf
->full_lock
);
1089 if (atomic_long_cmpxchg(<t_buf
->consumed
, consumed_old
,
1092 /* We have been pushed by the writer : the last
1093 * buffer read _is_ corrupted! It can also
1094 * happen if this is a buffer we never got. */
1095 spin_unlock(<t_buf
->full_lock
);
1098 /* tell the client that buffer is now unfull */
1101 index
= SUBBUF_INDEX(consumed_old
, buf
->chan
);
1102 data
= BUFFER_OFFSET(consumed_old
, buf
->chan
);
1103 ltt_buf_unfull(buf
, index
, data
);
1104 spin_unlock(<t_buf
->full_lock
);
1110 * ltt_ioctl - control on the debugfs file
1117 * This ioctl implements three commands necessary for a minimal
1118 * producer/consumer implementation :
1120 * Get the next sub buffer that can be read. It never blocks.
1122 * Release the currently read sub-buffer. Parameter is the last
1123 * put subbuffer (returned by GET_SUBBUF).
1124 * RELAY_GET_N_BUBBUFS
1125 * returns the number of sub buffers in the per cpu channel.
1126 * RELAY_GET_SUBBUF_SIZE
1127 * returns the size of the sub buffers.
1129 //ust// static int ltt_ioctl(struct inode *inode, struct file *filp,
1130 //ust// unsigned int cmd, unsigned long arg)
1132 //ust// struct rchan_buf *buf = inode->i_private;
1133 //ust// struct ltt_channel_struct *ltt_channel =
1134 //ust// (struct ltt_channel_struct *)buf->chan->private_data;
1135 //ust// struct ltt_channel_buf_struct *ltt_buf =
1136 //ust// percpu_ptr(ltt_channel->buf, buf->cpu);
1137 //ust// u32 __user *argp = (u32 __user *)arg;
1139 //ust// WARN_ON(atomic_long_read(<t_buf->active_readers) != 1);
1140 //ust// switch (cmd) {
1141 //ust// case RELAY_GET_SUBBUF:
1144 //ust// ret = ltt_do_get_subbuf(buf, ltt_buf, &consumed_old);
1147 //ust// return put_user((u32)consumed_old, argp);
1149 //ust// case RELAY_PUT_SUBBUF:
1152 //ust// u32 uconsumed_old;
1153 //ust// ret = get_user(uconsumed_old, argp);
1155 //ust// return ret; /* will return -EFAULT */
1156 //ust// return ltt_do_put_subbuf(buf, ltt_buf, uconsumed_old);
1158 //ust// case RELAY_GET_N_SUBBUFS:
1159 //ust// return put_user((u32)buf->chan->n_subbufs, argp);
1161 //ust// case RELAY_GET_SUBBUF_SIZE:
1162 //ust// return put_user((u32)buf->chan->subbuf_size, argp);
1165 //ust// return -ENOIOCTLCMD;
1170 //ust// #ifdef CONFIG_COMPAT
1171 //ust// static long ltt_compat_ioctl(struct file *file, unsigned int cmd,
1172 //ust// unsigned long arg)
1174 //ust// long ret = -ENOIOCTLCMD;
1176 //ust// lock_kernel();
1177 //ust// ret = ltt_ioctl(file->f_dentry->d_inode, file, cmd, arg);
1178 //ust// unlock_kernel();
1184 //ust// static void ltt_relay_pipe_buf_release(struct pipe_inode_info *pipe,
1185 //ust// struct pipe_buffer *pbuf)
1189 //ust// static struct pipe_buf_operations ltt_relay_pipe_buf_ops = {
1190 //ust// .can_merge = 0,
1191 //ust// .map = generic_pipe_buf_map,
1192 //ust// .unmap = generic_pipe_buf_unmap,
1193 //ust// .confirm = generic_pipe_buf_confirm,
1194 //ust// .release = ltt_relay_pipe_buf_release,
1195 //ust// .steal = generic_pipe_buf_steal,
1196 //ust// .get = generic_pipe_buf_get,
1199 //ust// static void ltt_relay_page_release(struct splice_pipe_desc *spd, unsigned int i)
1204 * subbuf_splice_actor - splice up to one subbuf's worth of data
1206 //ust// static int subbuf_splice_actor(struct file *in,
1207 //ust// loff_t *ppos,
1208 //ust// struct pipe_inode_info *pipe,
1210 //ust// unsigned int flags)
1212 //ust// struct rchan_buf *buf = in->private_data;
1213 //ust// struct ltt_channel_struct *ltt_channel =
1214 //ust// (struct ltt_channel_struct *)buf->chan->private_data;
1215 //ust// struct ltt_channel_buf_struct *ltt_buf =
1216 //ust// percpu_ptr(ltt_channel->buf, buf->cpu);
1217 //ust// unsigned int poff, subbuf_pages, nr_pages;
1218 //ust// struct page *pages[PIPE_BUFFERS];
1219 //ust// struct partial_page partial[PIPE_BUFFERS];
1220 //ust// struct splice_pipe_desc spd = {
1221 //ust// .pages = pages,
1222 //ust// .nr_pages = 0,
1223 //ust// .partial = partial,
1224 //ust// .flags = flags,
1225 //ust// .ops = <t_relay_pipe_buf_ops,
1226 //ust// .spd_release = ltt_relay_page_release,
1228 //ust// long consumed_old, consumed_idx, roffset;
1229 //ust// unsigned long bytes_avail;
1232 //ust// * Check that a GET_SUBBUF ioctl has been done before.
1234 //ust// WARN_ON(atomic_long_read(<t_buf->active_readers) != 1);
1235 //ust// consumed_old = atomic_long_read(<t_buf->consumed);
1236 //ust// consumed_old += *ppos;
1237 //ust// consumed_idx = SUBBUF_INDEX(consumed_old, buf->chan);
1240 //ust// * Adjust read len, if longer than what is available
1242 //ust// bytes_avail = SUBBUF_TRUNC(local_read(<t_buf->offset), buf->chan)
1243 //ust// - consumed_old;
1244 //ust// WARN_ON(bytes_avail > buf->chan->alloc_size);
1245 //ust// len = min_t(size_t, len, bytes_avail);
1246 //ust// subbuf_pages = bytes_avail >> PAGE_SHIFT;
1247 //ust// nr_pages = min_t(unsigned int, subbuf_pages, PIPE_BUFFERS);
1248 //ust// roffset = consumed_old & PAGE_MASK;
1249 //ust// poff = consumed_old & ~PAGE_MASK;
1250 //ust// printk_dbg(KERN_DEBUG "SPLICE actor len %zu pos %zd write_pos %ld\n",
1251 //ust// len, (ssize_t)*ppos, local_read(<t_buf->offset));
1253 //ust// for (; spd.nr_pages < nr_pages; spd.nr_pages++) {
1254 //ust// unsigned int this_len;
1255 //ust// struct buf_page *page;
1259 //ust// printk_dbg(KERN_DEBUG "SPLICE actor loop len %zu roffset %ld\n",
1260 //ust// len, roffset);
1262 //ust// this_len = PAGE_SIZE - poff;
1263 //ust// page = ltt_relay_read_get_page(buf, roffset);
1264 //ust// spd.pages[spd.nr_pages] = page->page;
1265 //ust// spd.partial[spd.nr_pages].offset = poff;
1266 //ust// spd.partial[spd.nr_pages].len = this_len;
1269 //ust// roffset += PAGE_SIZE;
1270 //ust// len -= this_len;
1273 //ust// if (!spd.nr_pages)
1276 //ust// return splice_to_pipe(pipe, &spd);
1279 //ust// static ssize_t ltt_relay_file_splice_read(struct file *in,
1280 //ust// loff_t *ppos,
1281 //ust// struct pipe_inode_info *pipe,
1283 //ust// unsigned int flags)
1285 //ust// ssize_t spliced;
1289 //ust// spliced = 0;
1291 //ust// printk_dbg(KERN_DEBUG "SPLICE read len %zu pos %zd\n",
1292 //ust// len, (ssize_t)*ppos);
1293 //ust// while (len && !spliced) {
1294 //ust// ret = subbuf_splice_actor(in, ppos, pipe, len, flags);
1295 //ust// printk_dbg(KERN_DEBUG "SPLICE read loop ret %d\n", ret);
1296 //ust// if (ret < 0)
1298 //ust// else if (!ret) {
1299 //ust// if (flags & SPLICE_F_NONBLOCK)
1300 //ust// ret = -EAGAIN;
1304 //ust// *ppos += ret;
1305 //ust// if (ret > len)
1309 //ust// spliced += ret;
1312 //ust// if (spliced)
1313 //ust// return spliced;
1318 static void ltt_relay_print_subbuffer_errors(
1319 struct ltt_channel_struct
*ltt_chan
,
1322 struct rchan
*rchan
= ltt_chan
->trans_channel_data
;
1323 struct ltt_channel_buf_struct
*ltt_buf
= ltt_chan
->buf
;
1324 long cons_idx
, commit_count
, write_offset
;
1326 cons_idx
= SUBBUF_INDEX(cons_off
, rchan
);
1327 commit_count
= local_read(<t_buf
->commit_count
[cons_idx
]);
1329 * No need to order commit_count and write_offset reads because we
1330 * execute after trace is stopped when there are no readers left.
1332 write_offset
= local_read(<t_buf
->offset
);
1334 "LTT : unread channel %s offset is %ld "
1335 "and cons_off : %ld\n",
1336 ltt_chan
->channel_name
, write_offset
, cons_off
);
1337 /* Check each sub-buffer for non filled commit count */
1338 if (((commit_count
- rchan
->subbuf_size
) & ltt_chan
->commit_count_mask
)
1339 - (BUFFER_TRUNC(cons_off
, rchan
) >> ltt_chan
->n_subbufs_order
)
1342 "LTT : %s : subbuffer %lu has non filled "
1343 "commit count %lu.\n",
1344 ltt_chan
->channel_name
, cons_idx
, commit_count
);
1345 printk(KERN_ALERT
"LTT : %s : commit count : %lu, subbuf size %zd\n",
1346 ltt_chan
->channel_name
, commit_count
,
1347 rchan
->subbuf_size
);
1350 static void ltt_relay_print_errors(struct ltt_trace_struct
*trace
,
1351 struct ltt_channel_struct
*ltt_chan
)
1353 struct rchan
*rchan
= ltt_chan
->trans_channel_data
;
1354 struct ltt_channel_buf_struct
*ltt_buf
= ltt_chan
->buf
;
1357 for (cons_off
= atomic_long_read(<t_buf
->consumed
);
1358 (SUBBUF_TRUNC(local_read(<t_buf
->offset
),
1361 cons_off
= SUBBUF_ALIGN(cons_off
, rchan
))
1362 ltt_relay_print_subbuffer_errors(ltt_chan
, cons_off
);
1365 static void ltt_relay_print_buffer_errors(struct ltt_channel_struct
*ltt_chan
)
1367 struct ltt_trace_struct
*trace
= ltt_chan
->trace
;
1368 struct ltt_channel_buf_struct
*ltt_buf
= ltt_chan
->buf
;
1370 if (local_read(<t_buf
->events_lost
))
1372 "LTT : %s : %ld events lost "
1374 ltt_chan
->channel_name
,
1375 local_read(<t_buf
->events_lost
),
1376 ltt_chan
->channel_name
);
1377 if (local_read(<t_buf
->corrupted_subbuffers
))
1379 "LTT : %s : %ld corrupted subbuffers "
1381 ltt_chan
->channel_name
,
1382 local_read(<t_buf
->corrupted_subbuffers
),
1383 ltt_chan
->channel_name
);
1385 ltt_relay_print_errors(trace
, ltt_chan
);
1388 static void ltt_relay_remove_dirs(struct ltt_trace_struct
*trace
)
1390 //ust// debugfs_remove(trace->dentry.trace_root);
1393 static void ltt_relay_release_channel(struct kref
*kref
)
1395 struct ltt_channel_struct
*ltt_chan
= container_of(kref
,
1396 struct ltt_channel_struct
, kref
);
1397 free(ltt_chan
->buf
);
1401 * Create ltt buffer.
1403 //ust// static int ltt_relay_create_buffer(struct ltt_trace_struct *trace,
1404 //ust// struct ltt_channel_struct *ltt_chan, struct rchan_buf *buf,
1405 //ust// unsigned int cpu, unsigned int n_subbufs)
1407 //ust// struct ltt_channel_buf_struct *ltt_buf =
1408 //ust// percpu_ptr(ltt_chan->buf, cpu);
1409 //ust// unsigned int j;
1411 //ust// ltt_buf->commit_count =
1412 //ust// kzalloc_node(sizeof(ltt_buf->commit_count) * n_subbufs,
1413 //ust// GFP_KERNEL, cpu_to_node(cpu));
1414 //ust// if (!ltt_buf->commit_count)
1415 //ust// return -ENOMEM;
1416 //ust// kref_get(&trace->kref);
1417 //ust// kref_get(&trace->ltt_transport_kref);
1418 //ust// kref_get(<t_chan->kref);
1419 //ust// local_set(<t_buf->offset, ltt_subbuffer_header_size());
1420 //ust// atomic_long_set(<t_buf->consumed, 0);
1421 //ust// atomic_long_set(<t_buf->active_readers, 0);
1422 //ust// for (j = 0; j < n_subbufs; j++)
1423 //ust// local_set(<t_buf->commit_count[j], 0);
1424 //ust// init_waitqueue_head(<t_buf->write_wait);
1425 //ust// atomic_set(<t_buf->wakeup_readers, 0);
1426 //ust// spin_lock_init(<t_buf->full_lock);
1428 //ust// ltt_buffer_begin_callback(buf, trace->start_tsc, 0);
1429 //ust// /* atomic_add made on local variable on data that belongs to
1430 //ust// * various CPUs : ok because tracing not started (for this cpu). */
1431 //ust// local_add(ltt_subbuffer_header_size(), <t_buf->commit_count[0]);
1433 //ust// local_set(<t_buf->events_lost, 0);
1434 //ust// local_set(<t_buf->corrupted_subbuffers, 0);
1439 static int ltt_relay_create_buffer(struct ltt_trace_struct
*trace
,
1440 struct ltt_channel_struct
*ltt_chan
, struct rchan_buf
*buf
,
1441 unsigned int n_subbufs
)
1443 struct ltt_channel_buf_struct
*ltt_buf
= ltt_chan
->buf
;
1446 ltt_buf
->commit_count
=
1447 zmalloc(sizeof(ltt_buf
->commit_count
) * n_subbufs
);
1448 if (!ltt_buf
->commit_count
)
1450 kref_get(&trace
->kref
);
1451 kref_get(&trace
->ltt_transport_kref
);
1452 kref_get(<t_chan
->kref
);
1453 local_set(<t_buf
->offset
, ltt_subbuffer_header_size());
1454 atomic_long_set(<t_buf
->consumed
, 0);
1455 atomic_long_set(<t_buf
->active_readers
, 0);
1456 for (j
= 0; j
< n_subbufs
; j
++)
1457 local_set(<t_buf
->commit_count
[j
], 0);
1458 //ust// init_waitqueue_head(<t_buf->write_wait);
1459 atomic_set(<t_buf
->wakeup_readers
, 0);
1460 spin_lock_init(<t_buf
->full_lock
);
1462 ltt_buffer_begin_callback(buf
, trace
->start_tsc
, 0);
1464 local_add(ltt_subbuffer_header_size(), <t_buf
->commit_count
[0]);
1466 local_set(<t_buf
->events_lost
, 0);
1467 local_set(<t_buf
->corrupted_subbuffers
, 0);
1472 static void ltt_relay_destroy_buffer(struct ltt_channel_struct
*ltt_chan
)
1474 struct ltt_trace_struct
*trace
= ltt_chan
->trace
;
1475 struct ltt_channel_buf_struct
*ltt_buf
= ltt_chan
->buf
;
1477 kref_put(<t_chan
->trace
->ltt_transport_kref
,
1478 ltt_release_transport
);
1479 ltt_relay_print_buffer_errors(ltt_chan
);
1480 kfree(ltt_buf
->commit_count
);
1481 ltt_buf
->commit_count
= NULL
;
1482 kref_put(<t_chan
->kref
, ltt_relay_release_channel
);
1483 kref_put(&trace
->kref
, ltt_release_trace
);
1484 //ust// wake_up_interruptible(&trace->kref_wq);
1490 static int ltt_relay_create_channel(const char *trace_name
,
1491 struct ltt_trace_struct
*trace
, struct dentry
*dir
,
1492 const char *channel_name
, struct ltt_channel_struct
*ltt_chan
,
1493 unsigned int subbuf_size
, unsigned int n_subbufs
,
1497 unsigned int tmpname_len
;
1500 tmpname
= kmalloc(PATH_MAX
, GFP_KERNEL
);
1504 strncpy(tmpname
, LTT_FLIGHT_PREFIX
, PATH_MAX
-1);
1505 strncat(tmpname
, channel_name
,
1506 PATH_MAX
-1-sizeof(LTT_FLIGHT_PREFIX
));
1508 strncpy(tmpname
, channel_name
, PATH_MAX
-1);
1510 strncat(tmpname
, "_", PATH_MAX
-1-strlen(tmpname
));
1512 kref_init(<t_chan
->kref
);
1514 ltt_chan
->trace
= trace
;
1515 ltt_chan
->buffer_begin
= ltt_buffer_begin_callback
;
1516 ltt_chan
->buffer_end
= ltt_buffer_end_callback
;
1517 ltt_chan
->overwrite
= overwrite
;
1518 ltt_chan
->n_subbufs_order
= get_count_order(n_subbufs
);
1519 ltt_chan
->commit_count_mask
= (~0UL >> ltt_chan
->n_subbufs_order
);
1520 //ust// ltt_chan->buf = percpu_alloc_mask(sizeof(struct ltt_channel_buf_struct), GFP_KERNEL, cpu_possible_map);
1521 ltt_chan
->buf
= malloc(sizeof(struct ltt_channel_buf_struct
));
1524 ltt_chan
->trans_channel_data
= ltt_relay_open(tmpname
,
1529 tmpname_len
= strlen(tmpname
);
1530 if (tmpname_len
> 0) {
1531 /* Remove final _ for pretty printing */
1532 tmpname
[tmpname_len
-1] = '\0';
1534 if (ltt_chan
->trans_channel_data
== NULL
) {
1535 printk(KERN_ERR
"LTT : Can't open %s channel for trace %s\n",
1536 tmpname
, trace_name
);
1537 goto relay_open_error
;
1544 //ust// percpu_free(ltt_chan->buf);
1552 static int ltt_relay_create_dirs(struct ltt_trace_struct
*new_trace
)
1554 //ust// new_trace->dentry.trace_root = debugfs_create_dir(new_trace->trace_name,
1555 //ust// get_ltt_root());
1556 //ust// if (new_trace->dentry.trace_root == NULL) {
1557 //ust// printk(KERN_ERR "LTT : Trace directory name %s already taken\n",
1558 //ust// new_trace->trace_name);
1559 //ust// return EEXIST;
1562 //ust// new_trace->callbacks.create_buf_file = ltt_create_buf_file_callback;
1563 //ust// new_trace->callbacks.remove_buf_file = ltt_remove_buf_file_callback;
1569 * LTTng channel flush function.
1571 * Must be called when no tracing is active in the channel, because of
1572 * accesses across CPUs.
1574 static notrace
void ltt_relay_buffer_flush(struct rchan_buf
*buf
)
1577 ltt_force_switch(buf
, FORCE_FLUSH
);
1580 static void ltt_relay_async_wakeup_chan(struct ltt_channel_struct
*ltt_channel
)
1582 //ust// unsigned int i;
1583 //ust// struct rchan *rchan = ltt_channel->trans_channel_data;
1585 //ust// for_each_possible_cpu(i) {
1586 //ust// struct ltt_channel_buf_struct *ltt_buf =
1587 //ust// percpu_ptr(ltt_channel->buf, i);
1589 //ust// if (atomic_read(<t_buf->wakeup_readers) == 1) {
1590 //ust// atomic_set(<t_buf->wakeup_readers, 0);
1591 //ust// wake_up_interruptible(&rchan->buf[i]->read_wait);
1596 static void ltt_relay_finish_buffer(struct ltt_channel_struct
*ltt_channel
)
1598 struct rchan
*rchan
= ltt_channel
->trans_channel_data
;
1601 struct ltt_channel_buf_struct
*ltt_buf
= ltt_channel
->buf
;
1602 ltt_relay_buffer_flush(rchan
->buf
);
1603 //ust// ltt_relay_wake_writers(ltt_buf);
1608 static void ltt_relay_finish_channel(struct ltt_channel_struct
*ltt_channel
)
1612 //ust// for_each_possible_cpu(i)
1613 ltt_relay_finish_buffer(ltt_channel
);
1616 static void ltt_relay_remove_channel(struct ltt_channel_struct
*channel
)
1618 struct rchan
*rchan
= channel
->trans_channel_data
;
1620 ltt_relay_close(rchan
);
1621 kref_put(&channel
->kref
, ltt_relay_release_channel
);
1624 struct ltt_reserve_switch_offsets
{
1625 long begin
, end
, old
;
1626 long begin_switch
, end_switch_current
, end_switch_old
;
1627 long commit_count
, reserve_commit_diff
;
1628 size_t before_hdr_pad
, size
;
1634 * !0 if execution must be aborted.
1636 static inline int ltt_relay_try_reserve(
1637 struct ltt_channel_struct
*ltt_channel
,
1638 struct ltt_channel_buf_struct
*ltt_buf
, struct rchan
*rchan
,
1639 struct rchan_buf
*buf
,
1640 struct ltt_reserve_switch_offsets
*offsets
, size_t data_size
,
1641 u64
*tsc
, unsigned int *rflags
, int largest_align
)
1643 offsets
->begin
= local_read(<t_buf
->offset
);
1644 offsets
->old
= offsets
->begin
;
1645 offsets
->begin_switch
= 0;
1646 offsets
->end_switch_current
= 0;
1647 offsets
->end_switch_old
= 0;
1649 *tsc
= trace_clock_read64();
1650 if (last_tsc_overflow(ltt_buf
, *tsc
))
1651 *rflags
= LTT_RFLAG_ID_SIZE_TSC
;
1653 if (SUBBUF_OFFSET(offsets
->begin
, buf
->chan
) == 0) {
1654 offsets
->begin_switch
= 1; /* For offsets->begin */
1656 offsets
->size
= ltt_get_header_size(ltt_channel
,
1657 offsets
->begin
, data_size
,
1658 &offsets
->before_hdr_pad
, *rflags
);
1659 offsets
->size
+= ltt_align(offsets
->begin
+ offsets
->size
,
1662 if ((SUBBUF_OFFSET(offsets
->begin
, buf
->chan
) + offsets
->size
)
1663 > buf
->chan
->subbuf_size
) {
1664 offsets
->end_switch_old
= 1; /* For offsets->old */
1665 offsets
->begin_switch
= 1; /* For offsets->begin */
1668 if (offsets
->begin_switch
) {
1671 if (offsets
->end_switch_old
)
1672 offsets
->begin
= SUBBUF_ALIGN(offsets
->begin
,
1674 offsets
->begin
= offsets
->begin
+ ltt_subbuffer_header_size();
1675 /* Test new buffer integrity */
1676 subbuf_index
= SUBBUF_INDEX(offsets
->begin
, buf
->chan
);
1677 offsets
->reserve_commit_diff
=
1678 (BUFFER_TRUNC(offsets
->begin
, buf
->chan
)
1679 >> ltt_channel
->n_subbufs_order
)
1680 - (local_read(<t_buf
->commit_count
[subbuf_index
])
1681 & ltt_channel
->commit_count_mask
);
1682 if (offsets
->reserve_commit_diff
== 0) {
1683 /* Next buffer not corrupted. */
1684 if (!ltt_channel
->overwrite
&&
1685 (SUBBUF_TRUNC(offsets
->begin
, buf
->chan
)
1686 - SUBBUF_TRUNC(atomic_long_read(
1687 <t_buf
->consumed
),
1689 >= rchan
->alloc_size
) {
1691 * We do not overwrite non consumed buffers
1692 * and we are full : event is lost.
1694 local_inc(<t_buf
->events_lost
);
1698 * next buffer not corrupted, we are either in
1699 * overwrite mode or the buffer is not full.
1700 * It's safe to write in this new subbuffer.
1705 * Next subbuffer corrupted. Force pushing reader even
1706 * in normal mode. It's safe to write in this new
1710 offsets
->size
= ltt_get_header_size(ltt_channel
,
1711 offsets
->begin
, data_size
,
1712 &offsets
->before_hdr_pad
, *rflags
);
1713 offsets
->size
+= ltt_align(offsets
->begin
+ offsets
->size
,
1716 if ((SUBBUF_OFFSET(offsets
->begin
, buf
->chan
) + offsets
->size
)
1717 > buf
->chan
->subbuf_size
) {
1719 * Event too big for subbuffers, report error, don't
1720 * complete the sub-buffer switch.
1722 local_inc(<t_buf
->events_lost
);
1726 * We just made a successful buffer switch and the event
1727 * fits in the new subbuffer. Let's write.
1732 * Event fits in the current buffer and we are not on a switch
1733 * boundary. It's safe to write.
1736 offsets
->end
= offsets
->begin
+ offsets
->size
;
1738 if ((SUBBUF_OFFSET(offsets
->end
, buf
->chan
)) == 0) {
1740 * The offset_end will fall at the very beginning of the next
1743 offsets
->end_switch_current
= 1; /* For offsets->begin */
1751 * !0 if execution must be aborted.
1753 static inline int ltt_relay_try_switch(
1754 enum force_switch_mode mode
,
1755 struct ltt_channel_struct
*ltt_channel
,
1756 struct ltt_channel_buf_struct
*ltt_buf
, struct rchan
*rchan
,
1757 struct rchan_buf
*buf
,
1758 struct ltt_reserve_switch_offsets
*offsets
,
1763 offsets
->begin
= local_read(<t_buf
->offset
);
1764 offsets
->old
= offsets
->begin
;
1765 offsets
->begin_switch
= 0;
1766 offsets
->end_switch_old
= 0;
1768 *tsc
= trace_clock_read64();
1770 if (SUBBUF_OFFSET(offsets
->begin
, buf
->chan
) != 0) {
1771 offsets
->begin
= SUBBUF_ALIGN(offsets
->begin
, buf
->chan
);
1772 offsets
->end_switch_old
= 1;
1774 /* we do not have to switch : buffer is empty */
1777 if (mode
== FORCE_ACTIVE
)
1778 offsets
->begin
+= ltt_subbuffer_header_size();
1780 * Always begin_switch in FORCE_ACTIVE mode.
1781 * Test new buffer integrity
1783 subbuf_index
= SUBBUF_INDEX(offsets
->begin
, buf
->chan
);
1784 offsets
->reserve_commit_diff
=
1785 (BUFFER_TRUNC(offsets
->begin
, buf
->chan
)
1786 >> ltt_channel
->n_subbufs_order
)
1787 - (local_read(<t_buf
->commit_count
[subbuf_index
])
1788 & ltt_channel
->commit_count_mask
);
1789 if (offsets
->reserve_commit_diff
== 0) {
1790 /* Next buffer not corrupted. */
1791 if (mode
== FORCE_ACTIVE
1792 && !ltt_channel
->overwrite
1793 && offsets
->begin
- atomic_long_read(<t_buf
->consumed
)
1794 >= rchan
->alloc_size
) {
1796 * We do not overwrite non consumed buffers and we are
1797 * full : ignore switch while tracing is active.
1803 * Next subbuffer corrupted. Force pushing reader even in normal
1807 offsets
->end
= offsets
->begin
;
1811 static inline void ltt_reserve_push_reader(
1812 struct ltt_channel_struct
*ltt_channel
,
1813 struct ltt_channel_buf_struct
*ltt_buf
,
1814 struct rchan
*rchan
,
1815 struct rchan_buf
*buf
,
1816 struct ltt_reserve_switch_offsets
*offsets
)
1818 long consumed_old
, consumed_new
;
1821 consumed_old
= atomic_long_read(<t_buf
->consumed
);
1823 * If buffer is in overwrite mode, push the reader consumed
1824 * count if the write position has reached it and we are not
1825 * at the first iteration (don't push the reader farther than
1826 * the writer). This operation can be done concurrently by many
1827 * writers in the same buffer, the writer being at the farthest
1828 * write position sub-buffer index in the buffer being the one
1829 * which will win this loop.
1830 * If the buffer is not in overwrite mode, pushing the reader
1831 * only happens if a sub-buffer is corrupted.
1833 if ((SUBBUF_TRUNC(offsets
->end
-1, buf
->chan
)
1834 - SUBBUF_TRUNC(consumed_old
, buf
->chan
))
1835 >= rchan
->alloc_size
)
1836 consumed_new
= SUBBUF_ALIGN(consumed_old
, buf
->chan
);
1838 consumed_new
= consumed_old
;
1841 } while (atomic_long_cmpxchg(<t_buf
->consumed
, consumed_old
,
1842 consumed_new
) != consumed_old
);
1844 if (consumed_old
!= consumed_new
) {
1846 * Reader pushed : we are the winner of the push, we can
1847 * therefore reequilibrate reserve and commit. Atomic increment
1848 * of the commit count permits other writers to play around
1849 * with this variable before us. We keep track of
1850 * corrupted_subbuffers even in overwrite mode :
1851 * we never want to write over a non completely committed
1852 * sub-buffer : possible causes : the buffer size is too low
1853 * compared to the unordered data input, or there is a writer
1854 * that died between the reserve and the commit.
1856 if (offsets
->reserve_commit_diff
) {
1858 * We have to alter the sub-buffer commit count.
1859 * We do not deliver the previous subbuffer, given it
1860 * was either corrupted or not consumed (overwrite
1863 local_add(offsets
->reserve_commit_diff
,
1864 <t_buf
->commit_count
[
1865 SUBBUF_INDEX(offsets
->begin
,
1867 if (!ltt_channel
->overwrite
1868 || offsets
->reserve_commit_diff
1869 != rchan
->subbuf_size
) {
1871 * The reserve commit diff was not subbuf_size :
1872 * it means the subbuffer was partly written to
1873 * and is therefore corrupted. If it is multiple
1874 * of subbuffer size and we are in flight
1875 * recorder mode, we are skipping over a whole
1878 local_inc(<t_buf
->corrupted_subbuffers
);
1886 * ltt_reserve_switch_old_subbuf: switch old subbuffer
1888 * Concurrency safe because we are the last and only thread to alter this
1889 * sub-buffer. As long as it is not delivered and read, no other thread can
1890 * alter the offset, alter the reserve_count or call the
1891 * client_buffer_end_callback on this sub-buffer.
1893 * The only remaining threads could be the ones with pending commits. They will
1894 * have to do the deliver themselves. Not concurrency safe in overwrite mode.
1895 * We detect corrupted subbuffers with commit and reserve counts. We keep a
1896 * corrupted sub-buffers count and push the readers across these sub-buffers.
1898 * Not concurrency safe if a writer is stalled in a subbuffer and another writer
1899 * switches in, finding out it's corrupted. The result will be than the old
1900 * (uncommited) subbuffer will be declared corrupted, and that the new subbuffer
1901 * will be declared corrupted too because of the commit count adjustment.
1903 * Note : offset_old should never be 0 here.
1905 static inline void ltt_reserve_switch_old_subbuf(
1906 struct ltt_channel_struct
*ltt_channel
,
1907 struct ltt_channel_buf_struct
*ltt_buf
, struct rchan
*rchan
,
1908 struct rchan_buf
*buf
,
1909 struct ltt_reserve_switch_offsets
*offsets
, u64
*tsc
)
1911 long oldidx
= SUBBUF_INDEX(offsets
->old
- 1, rchan
);
1913 ltt_channel
->buffer_end(buf
, *tsc
, offsets
->old
, oldidx
);
1914 /* Must write buffer end before incrementing commit count */
1916 offsets
->commit_count
=
1917 local_add_return(rchan
->subbuf_size
1918 - (SUBBUF_OFFSET(offsets
->old
- 1, rchan
)
1920 <t_buf
->commit_count
[oldidx
]);
1921 if ((BUFFER_TRUNC(offsets
->old
- 1, rchan
)
1922 >> ltt_channel
->n_subbufs_order
)
1923 - ((offsets
->commit_count
- rchan
->subbuf_size
)
1924 & ltt_channel
->commit_count_mask
) == 0)
1925 ltt_deliver(buf
, oldidx
, NULL
);
1929 * ltt_reserve_switch_new_subbuf: Populate new subbuffer.
1931 * This code can be executed unordered : writers may already have written to the
1932 * sub-buffer before this code gets executed, caution. The commit makes sure
1933 * that this code is executed before the deliver of this sub-buffer.
1935 static inline void ltt_reserve_switch_new_subbuf(
1936 struct ltt_channel_struct
*ltt_channel
,
1937 struct ltt_channel_buf_struct
*ltt_buf
, struct rchan
*rchan
,
1938 struct rchan_buf
*buf
,
1939 struct ltt_reserve_switch_offsets
*offsets
, u64
*tsc
)
1941 long beginidx
= SUBBUF_INDEX(offsets
->begin
, rchan
);
1943 ltt_channel
->buffer_begin(buf
, *tsc
, beginidx
);
1944 /* Must write buffer end before incrementing commit count */
1946 offsets
->commit_count
= local_add_return(ltt_subbuffer_header_size(),
1947 <t_buf
->commit_count
[beginidx
]);
1948 /* Check if the written buffer has to be delivered */
1949 if ((BUFFER_TRUNC(offsets
->begin
, rchan
)
1950 >> ltt_channel
->n_subbufs_order
)
1951 - ((offsets
->commit_count
- rchan
->subbuf_size
)
1952 & ltt_channel
->commit_count_mask
) == 0)
1953 ltt_deliver(buf
, beginidx
, NULL
);
1958 * ltt_reserve_end_switch_current: finish switching current subbuffer
1960 * Concurrency safe because we are the last and only thread to alter this
1961 * sub-buffer. As long as it is not delivered and read, no other thread can
1962 * alter the offset, alter the reserve_count or call the
1963 * client_buffer_end_callback on this sub-buffer.
1965 * The only remaining threads could be the ones with pending commits. They will
1966 * have to do the deliver themselves. Not concurrency safe in overwrite mode.
1967 * We detect corrupted subbuffers with commit and reserve counts. We keep a
1968 * corrupted sub-buffers count and push the readers across these sub-buffers.
1970 * Not concurrency safe if a writer is stalled in a subbuffer and another writer
1971 * switches in, finding out it's corrupted. The result will be than the old
1972 * (uncommited) subbuffer will be declared corrupted, and that the new subbuffer
1973 * will be declared corrupted too because of the commit count adjustment.
1975 static inline void ltt_reserve_end_switch_current(
1976 struct ltt_channel_struct
*ltt_channel
,
1977 struct ltt_channel_buf_struct
*ltt_buf
, struct rchan
*rchan
,
1978 struct rchan_buf
*buf
,
1979 struct ltt_reserve_switch_offsets
*offsets
, u64
*tsc
)
1981 long endidx
= SUBBUF_INDEX(offsets
->end
- 1, rchan
);
1983 ltt_channel
->buffer_end(buf
, *tsc
, offsets
->end
, endidx
);
1984 /* Must write buffer begin before incrementing commit count */
1986 offsets
->commit_count
=
1987 local_add_return(rchan
->subbuf_size
1988 - (SUBBUF_OFFSET(offsets
->end
- 1, rchan
)
1990 <t_buf
->commit_count
[endidx
]);
1991 if ((BUFFER_TRUNC(offsets
->end
- 1, rchan
)
1992 >> ltt_channel
->n_subbufs_order
)
1993 - ((offsets
->commit_count
- rchan
->subbuf_size
)
1994 & ltt_channel
->commit_count_mask
) == 0)
1995 ltt_deliver(buf
, endidx
, NULL
);
1999 * ltt_relay_reserve_slot - Atomic slot reservation in a LTTng buffer.
2000 * @trace: the trace structure to log to.
2001 * @ltt_channel: channel structure
2002 * @transport_data: data structure specific to ltt relay
2003 * @data_size: size of the variable length data to log.
2004 * @slot_size: pointer to total size of the slot (out)
2005 * @buf_offset : pointer to reserved buffer offset (out)
2006 * @tsc: pointer to the tsc at the slot reservation (out)
2009 * Return : -ENOSPC if not enough space, else returns 0.
2010 * It will take care of sub-buffer switching.
2012 static notrace
int ltt_relay_reserve_slot(struct ltt_trace_struct
*trace
,
2013 struct ltt_channel_struct
*ltt_channel
, void **transport_data
,
2014 size_t data_size
, size_t *slot_size
, long *buf_offset
, u64
*tsc
,
2015 unsigned int *rflags
, int largest_align
)
2017 struct rchan
*rchan
= ltt_channel
->trans_channel_data
;
2018 struct rchan_buf
*buf
= *transport_data
= rchan
->buf
;
2019 struct ltt_channel_buf_struct
*ltt_buf
= ltt_channel
->buf
;
2020 struct ltt_reserve_switch_offsets offsets
;
2022 offsets
.reserve_commit_diff
= 0;
2026 * Perform retryable operations.
2028 if (ltt_nesting
> 4) {
2029 local_inc(<t_buf
->events_lost
);
2033 if (ltt_relay_try_reserve(ltt_channel
, ltt_buf
,
2034 rchan
, buf
, &offsets
, data_size
, tsc
, rflags
,
2037 } while (local_cmpxchg(<t_buf
->offset
, offsets
.old
,
2038 offsets
.end
) != offsets
.old
);
2041 * Atomically update last_tsc. This update races against concurrent
2042 * atomic updates, but the race will always cause supplementary full TSC
2043 * events, never the opposite (missing a full TSC event when it would be
2046 save_last_tsc(ltt_buf
, *tsc
);
2049 * Push the reader if necessary
2051 ltt_reserve_push_reader(ltt_channel
, ltt_buf
, rchan
, buf
, &offsets
);
2054 * Switch old subbuffer if needed.
2056 if (offsets
.end_switch_old
)
2057 ltt_reserve_switch_old_subbuf(ltt_channel
, ltt_buf
, rchan
, buf
,
2061 * Populate new subbuffer.
2063 if (offsets
.begin_switch
)
2064 ltt_reserve_switch_new_subbuf(ltt_channel
, ltt_buf
, rchan
,
2065 buf
, &offsets
, tsc
);
2067 if (offsets
.end_switch_current
)
2068 ltt_reserve_end_switch_current(ltt_channel
, ltt_buf
, rchan
,
2069 buf
, &offsets
, tsc
);
2071 *slot_size
= offsets
.size
;
2072 *buf_offset
= offsets
.begin
+ offsets
.before_hdr_pad
;
2077 * Force a sub-buffer switch for a per-cpu buffer. This operation is
2078 * completely reentrant : can be called while tracing is active with
2079 * absolutely no lock held.
2081 * Note, however, that as a local_cmpxchg is used for some atomic
2082 * operations, this function must be called from the CPU which owns the buffer
2083 * for a ACTIVE flush.
2085 static notrace
void ltt_force_switch(struct rchan_buf
*buf
,
2086 enum force_switch_mode mode
)
2088 struct ltt_channel_struct
*ltt_channel
=
2089 (struct ltt_channel_struct
*)buf
->chan
->private_data
;
2090 struct ltt_channel_buf_struct
*ltt_buf
= ltt_channel
->buf
;
2091 struct rchan
*rchan
= ltt_channel
->trans_channel_data
;
2092 struct ltt_reserve_switch_offsets offsets
;
2095 offsets
.reserve_commit_diff
= 0;
2099 * Perform retryable operations.
2102 if (ltt_relay_try_switch(mode
, ltt_channel
, ltt_buf
,
2103 rchan
, buf
, &offsets
, &tsc
))
2105 } while (local_cmpxchg(<t_buf
->offset
, offsets
.old
,
2106 offsets
.end
) != offsets
.old
);
2109 * Atomically update last_tsc. This update races against concurrent
2110 * atomic updates, but the race will always cause supplementary full TSC
2111 * events, never the opposite (missing a full TSC event when it would be
2114 save_last_tsc(ltt_buf
, tsc
);
2117 * Push the reader if necessary
2119 if (mode
== FORCE_ACTIVE
)
2120 ltt_reserve_push_reader(ltt_channel
, ltt_buf
, rchan
,
2124 * Switch old subbuffer if needed.
2126 if (offsets
.end_switch_old
)
2127 ltt_reserve_switch_old_subbuf(ltt_channel
, ltt_buf
, rchan
, buf
,
2131 * Populate new subbuffer.
2133 if (mode
== FORCE_ACTIVE
)
2134 ltt_reserve_switch_new_subbuf(ltt_channel
,
2135 ltt_buf
, rchan
, buf
, &offsets
, &tsc
);
2139 * for flight recording. must be called after relay_commit.
2140 * This function decrements de subbuffer's lost_size each time the commit count
2141 * reaches back the reserve offset (module subbuffer size). It is useful for
2143 * We use slot_size - 1 to make sure we deal correctly with the case where we
2144 * fill the subbuffer completely (so the subbuf index stays in the previous
2147 #ifdef CONFIG_LTT_VMCORE
2148 static inline void ltt_write_commit_counter(struct rchan_buf
*buf
,
2149 long buf_offset
, size_t slot_size
)
2151 struct ltt_channel_struct
*ltt_channel
=
2152 (struct ltt_channel_struct
*)buf
->chan
->private_data
;
2153 struct ltt_channel_buf_struct
*ltt_buf
=
2154 percpu_ptr(ltt_channel
->buf
, buf
->cpu
);
2155 struct ltt_subbuffer_header
*header
;
2156 long offset
, subbuf_idx
, commit_count
;
2157 uint32_t lost_old
, lost_new
;
2159 subbuf_idx
= SUBBUF_INDEX(buf_offset
- 1, buf
->chan
);
2160 offset
= buf_offset
+ slot_size
;
2161 header
= (struct ltt_subbuffer_header
*)
2162 ltt_relay_offset_address(buf
,
2163 subbuf_idx
* buf
->chan
->subbuf_size
);
2165 lost_old
= header
->lost_size
;
2167 local_read(<t_buf
->commit_count
[subbuf_idx
]);
2168 /* SUBBUF_OFFSET includes commit_count_mask */
2169 if (!SUBBUF_OFFSET(offset
- commit_count
, buf
->chan
)) {
2170 lost_new
= (uint32_t)buf
->chan
->subbuf_size
2171 - SUBBUF_OFFSET(commit_count
, buf
->chan
);
2172 lost_old
= cmpxchg_local(&header
->lost_size
, lost_old
,
2174 if (lost_old
<= lost_new
)
2182 static inline void ltt_write_commit_counter(struct rchan_buf
*buf
,
2183 long buf_offset
, size_t slot_size
)
2189 * Atomic unordered slot commit. Increments the commit count in the
2190 * specified sub-buffer, and delivers it if necessary.
2194 * @ltt_channel : channel structure
2195 * @transport_data: transport-specific data
2196 * @buf_offset : offset following the event header.
2197 * @slot_size : size of the reserved slot.
2199 static notrace
void ltt_relay_commit_slot(
2200 struct ltt_channel_struct
*ltt_channel
,
2201 void **transport_data
, long buf_offset
, size_t slot_size
)
2203 struct rchan_buf
*buf
= *transport_data
;
2204 struct ltt_channel_buf_struct
*ltt_buf
= ltt_channel
->buf
;
2205 struct rchan
*rchan
= buf
->chan
;
2206 long offset_end
= buf_offset
;
2207 long endidx
= SUBBUF_INDEX(offset_end
- 1, rchan
);
2210 /* Must write slot data before incrementing commit count */
2212 commit_count
= local_add_return(slot_size
,
2213 <t_buf
->commit_count
[endidx
]);
2214 /* Check if all commits have been done */
2215 if ((BUFFER_TRUNC(offset_end
- 1, rchan
)
2216 >> ltt_channel
->n_subbufs_order
)
2217 - ((commit_count
- rchan
->subbuf_size
)
2218 & ltt_channel
->commit_count_mask
) == 0)
2219 ltt_deliver(buf
, endidx
, NULL
);
2221 * Update lost_size for each commit. It's needed only for extracting
2222 * ltt buffers from vmcore, after crash.
2224 ltt_write_commit_counter(buf
, buf_offset
, slot_size
);
2228 * This is called with preemption disabled when user space has requested
2229 * blocking mode. If one of the active traces has free space below a
2230 * specific threshold value, we reenable preemption and block.
2232 static int ltt_relay_user_blocking(struct ltt_trace_struct
*trace
,
2233 unsigned int chan_index
, size_t data_size
,
2234 struct user_dbg_data
*dbg
)
2236 //ust// struct rchan *rchan;
2237 //ust// struct ltt_channel_buf_struct *ltt_buf;
2238 //ust// struct ltt_channel_struct *channel;
2239 //ust// struct rchan_buf *relay_buf;
2241 //ust// DECLARE_WAITQUEUE(wait, current);
2243 //ust// channel = &trace->channels[chan_index];
2244 //ust// rchan = channel->trans_channel_data;
2245 //ust// cpu = smp_processor_id();
2246 //ust// relay_buf = rchan->buf[cpu];
2247 //ust// ltt_buf = percpu_ptr(channel->buf, cpu);
2250 //ust// * Check if data is too big for the channel : do not
2251 //ust// * block for it.
2253 //ust// if (LTT_RESERVE_CRITICAL + data_size > relay_buf->chan->subbuf_size)
2257 //ust// * If free space too low, we block. We restart from the
2258 //ust// * beginning after we resume (cpu id may have changed
2259 //ust// * while preemption is active).
2261 //ust// spin_lock(<t_buf->full_lock);
2262 //ust// if (!channel->overwrite) {
2263 //ust// dbg->write = local_read(<t_buf->offset);
2264 //ust// dbg->read = atomic_long_read(<t_buf->consumed);
2265 //ust// dbg->avail_size = dbg->write + LTT_RESERVE_CRITICAL + data_size
2266 //ust// - SUBBUF_TRUNC(dbg->read,
2267 //ust// relay_buf->chan);
2268 //ust// if (dbg->avail_size > rchan->alloc_size) {
2269 //ust// __set_current_state(TASK_INTERRUPTIBLE);
2270 //ust// add_wait_queue(<t_buf->write_wait, &wait);
2271 //ust// spin_unlock(<t_buf->full_lock);
2272 //ust// preempt_enable();
2274 //ust// __set_current_state(TASK_RUNNING);
2275 //ust// remove_wait_queue(<t_buf->write_wait, &wait);
2276 //ust// if (signal_pending(current))
2277 //ust// return -ERESTARTSYS;
2278 //ust// preempt_disable();
2282 //ust// spin_unlock(<t_buf->full_lock);
2286 static void ltt_relay_print_user_errors(struct ltt_trace_struct
*trace
,
2287 unsigned int chan_index
, size_t data_size
,
2288 struct user_dbg_data
*dbg
)
2290 struct rchan
*rchan
;
2291 struct ltt_channel_buf_struct
*ltt_buf
;
2292 struct ltt_channel_struct
*channel
;
2293 struct rchan_buf
*relay_buf
;
2295 channel
= &trace
->channels
[chan_index
];
2296 rchan
= channel
->trans_channel_data
;
2297 relay_buf
= rchan
->buf
;
2298 ltt_buf
= channel
->buf
;
2300 printk(KERN_ERR
"Error in LTT usertrace : "
2301 "buffer full : event lost in blocking "
2302 "mode. Increase LTT_RESERVE_CRITICAL.\n");
2303 printk(KERN_ERR
"LTT nesting level is %u.\n", ltt_nesting
);
2304 printk(KERN_ERR
"LTT avail size %lu.\n",
2306 printk(KERN_ERR
"avai write : %lu, read : %lu\n",
2307 dbg
->write
, dbg
->read
);
2309 dbg
->write
= local_read(<t_buf
->offset
);
2310 dbg
->read
= atomic_long_read(<t_buf
->consumed
);
2312 printk(KERN_ERR
"LTT cur size %lu.\n",
2313 dbg
->write
+ LTT_RESERVE_CRITICAL
+ data_size
2314 - SUBBUF_TRUNC(dbg
->read
, relay_buf
->chan
));
2315 printk(KERN_ERR
"cur write : %lu, read : %lu\n",
2316 dbg
->write
, dbg
->read
);
2319 //ust// static struct ltt_transport ltt_relay_transport = {
2320 //ust// .name = "relay",
2321 //ust// .owner = THIS_MODULE,
2323 //ust// .create_dirs = ltt_relay_create_dirs,
2324 //ust// .remove_dirs = ltt_relay_remove_dirs,
2325 //ust// .create_channel = ltt_relay_create_channel,
2326 //ust// .finish_channel = ltt_relay_finish_channel,
2327 //ust// .remove_channel = ltt_relay_remove_channel,
2328 //ust// .wakeup_channel = ltt_relay_async_wakeup_chan,
2329 //ust// .commit_slot = ltt_relay_commit_slot,
2330 //ust// .reserve_slot = ltt_relay_reserve_slot,
2331 //ust// .user_blocking = ltt_relay_user_blocking,
2332 //ust// .user_errors = ltt_relay_print_user_errors,
2336 static struct ltt_transport ust_relay_transport
= {
2338 //ust// .owner = THIS_MODULE,
2340 .create_dirs
= ltt_relay_create_dirs
,
2341 .remove_dirs
= ltt_relay_remove_dirs
,
2342 .create_channel
= ltt_relay_create_channel
,
2343 .finish_channel
= ltt_relay_finish_channel
,
2344 .remove_channel
= ltt_relay_remove_channel
,
2345 .wakeup_channel
= ltt_relay_async_wakeup_chan
,
2346 .commit_slot
= ltt_relay_commit_slot
,
2347 .reserve_slot
= ltt_relay_reserve_slot
,
2348 .user_blocking
= ltt_relay_user_blocking
,
2349 .user_errors
= ltt_relay_print_user_errors
,
2353 //ust// static int __init ltt_relay_init(void)
2355 //ust// printk(KERN_INFO "LTT : ltt-relay init\n");
2357 //ust// ltt_file_operations = ltt_relay_file_operations;
2358 //ust// ltt_file_operations.owner = THIS_MODULE;
2359 //ust// ltt_file_operations.open = ltt_open;
2360 //ust// ltt_file_operations.release = ltt_release;
2361 //ust// ltt_file_operations.poll = ltt_poll;
2362 //ust// ltt_file_operations.splice_read = ltt_relay_file_splice_read,
2363 //ust// ltt_file_operations.ioctl = ltt_ioctl;
2364 //ust//#ifdef CONFIG_COMPAT
2365 //ust// ltt_file_operations.compat_ioctl = ltt_compat_ioctl;
2368 //ust// ltt_transport_register(<t_relay_transport);
2373 static char initialized
= 0;
2375 void __attribute__((constructor
)) init_ustrelay_transport(void)
2378 ltt_transport_register(&ust_relay_transport
);
2383 static void __exit
ltt_relay_exit(void)
2385 //ust// printk(KERN_INFO "LTT : ltt-relay exit\n");
2387 ltt_transport_unregister(&ust_relay_transport
);
2390 //ust// module_init(ltt_relay_init);
2391 //ust// module_exit(ltt_relay_exit);
2393 //ust// MODULE_LICENSE("GPL");
2394 //ust// MODULE_AUTHOR("Mathieu Desnoyers");
2395 //ust// MODULE_DESCRIPTION("Linux Trace Toolkit Next Generation Lockless Relay");