2 * Public API and common code for kernel->userspace relay file support.
4 * Copyright (C) 2002-2005 - Tom Zanussi (zanussi@us.ibm.com), IBM Corp
5 * Copyright (C) 1999-2005 - Karim Yaghmour (karim@opersys.com)
6 * Copyright (C) 2008 - Mathieu Desnoyers (mathieu.desnoyers@polymtl.ca)
8 * Moved to kernel/relay.c by Paul Mundt, 2006.
9 * November 2006 - CPU hotplug support by Mathieu Desnoyers
10 * (mathieu.desnoyers@polymtl.ca)
12 * This file is released under the GPL.
14 //ust// #include <linux/errno.h>
15 //ust// #include <linux/stddef.h>
16 //ust// #include <linux/slab.h>
17 //ust// #include <linux/module.h>
18 //ust// #include <linux/string.h>
19 //ust// #include <linux/ltt-relay.h>
20 //ust// #include <linux/vmalloc.h>
21 //ust// #include <linux/mm.h>
22 //ust// #include <linux/cpu.h>
23 //ust// #include <linux/splice.h>
24 //ust// #include <linux/bitops.h>
26 #include "kernelcompat.h"
32 #include "tracercore.h"
35 /* list of open channels, for cpu hotplug */
36 static DEFINE_MUTEX(relay_channels_mutex
);
37 static LIST_HEAD(relay_channels
);
40 * relay_alloc_buf - allocate a channel buffer
41 * @buf: the buffer struct
42 * @size: total size of the buffer
44 //ust// static int relay_alloc_buf(struct rchan_buf *buf, size_t *size)
46 //ust// unsigned int i, n_pages;
47 //ust// struct buf_page *buf_page, *n;
49 //ust// *size = PAGE_ALIGN(*size);
50 //ust// n_pages = *size >> PAGE_SHIFT;
52 //ust// INIT_LIST_HEAD(&buf->pages);
54 //ust// for (i = 0; i < n_pages; i++) {
55 //ust// buf_page = kmalloc_node(sizeof(*buf_page), GFP_KERNEL,
56 //ust// cpu_to_node(buf->cpu));
57 //ust// if (unlikely(!buf_page))
58 //ust// goto depopulate;
59 //ust// buf_page->page = alloc_pages_node(cpu_to_node(buf->cpu),
60 //ust// GFP_KERNEL | __GFP_ZERO, 0);
61 //ust// if (unlikely(!buf_page->page)) {
62 //ust// kfree(buf_page);
63 //ust// goto depopulate;
65 //ust// list_add_tail(&buf_page->list, &buf->pages);
66 //ust// buf_page->offset = (size_t)i << PAGE_SHIFT;
67 //ust// buf_page->buf = buf;
68 //ust// set_page_private(buf_page->page, (unsigned long)buf_page);
70 //ust// buf->wpage = buf_page;
71 //ust// buf->hpage[0] = buf_page;
72 //ust// buf->hpage[1] = buf_page;
73 //ust// buf->rpage = buf_page;
76 //ust// buf->page_count = n_pages;
80 //ust// list_for_each_entry_safe(buf_page, n, &buf->pages, list) {
81 //ust// list_del_init(&buf_page->list);
82 //ust// __free_page(buf_page->page);
83 //ust// kfree(buf_page);
85 //ust// return -ENOMEM;
88 static int relay_alloc_buf(struct rchan_buf
*buf
, size_t *size
)
91 struct buf_page
*buf_page
, *n
;
95 *size
= PAGE_ALIGN(*size
);
97 /* Maybe do read-ahead */
98 result
= mmap(NULL
, *size
, PROT_READ
| PROT_WRITE
, MAP_ANONYMOUS
| MAP_PRIVATE
, -1, 0);
99 if(result
== MAP_FAILED
) {
104 buf
->buf_data
= result
;
105 buf
->buf_size
= *size
;
111 * relay_create_buf - allocate and initialize a channel buffer
112 * @chan: the relay channel
113 * @cpu: cpu the buffer belongs to
115 * Returns channel buffer if successful, %NULL otherwise.
117 static struct rchan_buf
*relay_create_buf(struct rchan
*chan
)
120 struct rchan_buf
*buf
= kzalloc(sizeof(struct rchan_buf
), GFP_KERNEL
);
125 ret
= relay_alloc_buf(buf
, &chan
->alloc_size
);
130 kref_get(&buf
->chan
->kref
);
139 * relay_destroy_channel - free the channel struct
140 * @kref: target kernel reference that contains the relay channel
142 * Should only be called from kref_put().
144 static void relay_destroy_channel(struct kref
*kref
)
146 struct rchan
*chan
= container_of(kref
, struct rchan
, kref
);
151 * relay_destroy_buf - destroy an rchan_buf struct and associated buffer
152 * @buf: the buffer struct
154 static void relay_destroy_buf(struct rchan_buf
*buf
)
156 struct rchan
*chan
= buf
->chan
;
157 struct buf_page
*buf_page
, *n
;
160 result
= munmap(buf
->buf_data
, buf
->buf_size
);
165 //ust// chan->buf[buf->cpu] = NULL;
167 kref_put(&chan
->kref
, relay_destroy_channel
);
171 * relay_remove_buf - remove a channel buffer
172 * @kref: target kernel reference that contains the relay buffer
174 * Removes the file from the fileystem, which also frees the
175 * rchan_buf_struct and the channel buffer. Should only be called from
178 static void relay_remove_buf(struct kref
*kref
)
180 struct rchan_buf
*buf
= container_of(kref
, struct rchan_buf
, kref
);
181 buf
->chan
->cb
->remove_buf_file(buf
);
182 relay_destroy_buf(buf
);
186 * High-level relay kernel API and associated functions.
190 * rchan_callback implementations defining default channel behavior. Used
191 * in place of corresponding NULL values in client callback struct.
195 * create_buf_file_create() default callback. Does nothing.
197 static struct dentry
*create_buf_file_default_callback(const char *filename
,
198 struct dentry
*parent
,
200 struct rchan_buf
*buf
)
206 * remove_buf_file() default callback. Does nothing.
208 static int remove_buf_file_default_callback(struct dentry
*dentry
)
214 * wakeup_readers - wake up readers waiting on a channel
215 * @data: contains the channel buffer
217 * This is the timer function used to defer reader waking.
219 //ust// static void wakeup_readers(unsigned long data)
221 //ust// struct rchan_buf *buf = (struct rchan_buf *)data;
222 //ust// wake_up_interruptible(&buf->read_wait);
226 * __relay_reset - reset a channel buffer
227 * @buf: the channel buffer
228 * @init: 1 if this is a first-time initialization
230 * See relay_reset() for description of effect.
232 static void __relay_reset(struct rchan_buf
*buf
, unsigned int init
)
235 //ust// init_waitqueue_head(&buf->read_wait);
236 kref_init(&buf
->kref
);
237 //ust// setup_timer(&buf->timer, wakeup_readers, (unsigned long)buf);
239 //ust// del_timer_sync(&buf->timer);
245 * relay_open_buf - create a new relay channel buffer
247 * used by relay_open() and CPU hotplug.
249 static struct rchan_buf
*relay_open_buf(struct rchan
*chan
)
251 struct rchan_buf
*buf
= NULL
;
252 struct dentry
*dentry
;
253 //ust// char *tmpname;
255 //ust// tmpname = kzalloc(NAME_MAX + 1, GFP_KERNEL);
256 //ust// if (!tmpname)
258 //ust// snprintf(tmpname, NAME_MAX, "%s%d", chan->base_filename, cpu);
260 buf
= relay_create_buf(chan
);
264 __relay_reset(buf
, 1);
266 /* Create file in fs */
267 //ust// dentry = chan->cb->create_buf_file(tmpname, chan->parent, S_IRUSR,
270 //ust// goto free_buf;
272 //ust// buf->dentry = dentry;
277 relay_destroy_buf(buf
);
280 //ust// kfree(tmpname);
286 * relay_close_buf - close a channel buffer
287 * @buf: channel buffer
289 * Marks the buffer finalized and restores the default callbacks.
290 * The channel buffer and channel buffer data structure are then freed
291 * automatically when the last reference is given up.
293 static void relay_close_buf(struct rchan_buf
*buf
)
295 //ust// del_timer_sync(&buf->timer);
296 kref_put(&buf
->kref
, relay_remove_buf
);
299 //ust// static void setup_callbacks(struct rchan *chan,
300 //ust// struct rchan_callbacks *cb)
303 //ust// chan->cb = &default_channel_callbacks;
307 //ust// if (!cb->create_buf_file)
308 //ust// cb->create_buf_file = create_buf_file_default_callback;
309 //ust// if (!cb->remove_buf_file)
310 //ust// cb->remove_buf_file = remove_buf_file_default_callback;
311 //ust// chan->cb = cb;
315 * relay_hotcpu_callback - CPU hotplug callback
316 * @nb: notifier block
317 * @action: hotplug action to take
320 * Returns the success/failure of the operation. (%NOTIFY_OK, %NOTIFY_BAD)
322 //ust// static int __cpuinit relay_hotcpu_callback(struct notifier_block *nb,
323 //ust// unsigned long action,
326 //ust// unsigned int hotcpu = (unsigned long)hcpu;
327 //ust// struct rchan *chan;
329 //ust// switch (action) {
330 //ust// case CPU_UP_PREPARE:
331 //ust// case CPU_UP_PREPARE_FROZEN:
332 //ust// mutex_lock(&relay_channels_mutex);
333 //ust// list_for_each_entry(chan, &relay_channels, list) {
334 //ust// if (chan->buf[hotcpu])
336 //ust// chan->buf[hotcpu] = relay_open_buf(chan, hotcpu);
337 //ust// if (!chan->buf[hotcpu]) {
338 //ust// printk(KERN_ERR
339 //ust// "relay_hotcpu_callback: cpu %d buffer "
340 //ust// "creation failed\n", hotcpu);
341 //ust// mutex_unlock(&relay_channels_mutex);
342 //ust// return NOTIFY_BAD;
345 //ust// mutex_unlock(&relay_channels_mutex);
347 //ust// case CPU_DEAD:
348 //ust// case CPU_DEAD_FROZEN:
349 //ust// /* No need to flush the cpu : will be flushed upon
350 //ust// * final relay_flush() call. */
353 //ust// return NOTIFY_OK;
357 * ltt_relay_open - create a new relay channel
358 * @base_filename: base name of files to create
359 * @parent: dentry of parent directory, %NULL for root directory
360 * @subbuf_size: size of sub-buffers
361 * @n_subbufs: number of sub-buffers
362 * @cb: client callback functions
363 * @private_data: user-defined data
365 * Returns channel pointer if successful, %NULL otherwise.
367 * Creates a channel buffer for each cpu using the sizes and
368 * attributes specified. The created channel buffer files
369 * will be named base_filename0...base_filenameN-1. File
370 * permissions will be %S_IRUSR.
372 struct rchan
*ltt_relay_open(const char *base_filename
,
373 struct dentry
*parent
,
380 //ust// if (!base_filename)
383 if (!(subbuf_size
&& n_subbufs
))
386 chan
= kzalloc(sizeof(struct rchan
), GFP_KERNEL
);
390 chan
->version
= LTT_RELAY_CHANNEL_VERSION
;
391 chan
->n_subbufs
= n_subbufs
;
392 chan
->subbuf_size
= subbuf_size
;
393 chan
->subbuf_size_order
= get_count_order(subbuf_size
);
394 chan
->alloc_size
= FIX_SIZE(subbuf_size
* n_subbufs
);
395 chan
->parent
= parent
;
396 chan
->private_data
= private_data
;
397 //ust// strlcpy(chan->base_filename, base_filename, NAME_MAX);
398 //ust// setup_callbacks(chan, cb);
399 kref_init(&chan
->kref
);
401 mutex_lock(&relay_channels_mutex
);
402 //ust// for_each_online_cpu(i) {
403 chan
->buf
= relay_open_buf(chan
);
407 list_add(&chan
->list
, &relay_channels
);
408 mutex_unlock(&relay_channels_mutex
);
413 //ust// for_each_possible_cpu(i) {
414 //ust// if (!chan->buf[i])
416 //ust// relay_close_buf(chan->buf[i]);
420 kref_put(&chan
->kref
, relay_destroy_channel
);
421 mutex_unlock(&relay_channels_mutex
);
424 //ust// EXPORT_SYMBOL_GPL(ltt_relay_open);
427 * ltt_relay_close - close the channel
430 * Closes all channel buffers and frees the channel.
432 void ltt_relay_close(struct rchan
*chan
)
439 mutex_lock(&relay_channels_mutex
);
440 //ust// for_each_possible_cpu(i)
442 relay_close_buf(chan
->buf
);
444 list_del(&chan
->list
);
445 kref_put(&chan
->kref
, relay_destroy_channel
);
446 mutex_unlock(&relay_channels_mutex
);
448 //ust// EXPORT_SYMBOL_GPL(ltt_relay_close);
451 * Start iteration at the previous element. Skip the real list head.
453 //ust// struct buf_page *ltt_relay_find_prev_page(struct rchan_buf *buf,
454 //ust// struct buf_page *page, size_t offset, ssize_t diff_offset)
456 //ust// struct buf_page *iter;
457 //ust// size_t orig_iter_off;
458 //ust// unsigned int i = 0;
460 //ust// orig_iter_off = page->offset;
461 //ust// list_for_each_entry_reverse(iter, &page->list, list) {
463 //ust// * Skip the real list head.
465 //ust// if (&iter->list == &buf->pages)
468 //ust// if (offset >= iter->offset
469 //ust// && offset < iter->offset + PAGE_SIZE) {
470 //ust// #ifdef CONFIG_LTT_RELAY_CHECK_RANDOM_ACCESS
472 //ust// printk(KERN_WARNING
473 //ust// "Backward random access detected in "
474 //ust// "ltt_relay. Iterations %u, "
475 //ust// "offset %zu, orig iter->off %zu, "
476 //ust// "iter->off %zu diff_offset %zd.\n", i,
477 //ust// offset, orig_iter_off, iter->offset,
478 //ust// diff_offset);
488 //ust// EXPORT_SYMBOL_GPL(ltt_relay_find_prev_page);
491 * Start iteration at the next element. Skip the real list head.
493 //ust// struct buf_page *ltt_relay_find_next_page(struct rchan_buf *buf,
494 //ust// struct buf_page *page, size_t offset, ssize_t diff_offset)
496 //ust// struct buf_page *iter;
497 //ust// unsigned int i = 0;
498 //ust// size_t orig_iter_off;
500 //ust// orig_iter_off = page->offset;
501 //ust// list_for_each_entry(iter, &page->list, list) {
503 //ust// * Skip the real list head.
505 //ust// if (&iter->list == &buf->pages)
508 //ust// if (offset >= iter->offset
509 //ust// && offset < iter->offset + PAGE_SIZE) {
510 //ust// #ifdef CONFIG_LTT_RELAY_CHECK_RANDOM_ACCESS
512 //ust// printk(KERN_WARNING
513 //ust// "Forward random access detected in "
514 //ust// "ltt_relay. Iterations %u, "
515 //ust// "offset %zu, orig iter->off %zu, "
516 //ust// "iter->off %zu diff_offset %zd.\n", i,
517 //ust// offset, orig_iter_off, iter->offset,
518 //ust// diff_offset);
528 //ust// EXPORT_SYMBOL_GPL(ltt_relay_find_next_page);
531 * ltt_relay_write - write data to a ltt_relay buffer.
533 * @offset : offset within the buffer
534 * @src : source address
535 * @len : length to write
536 * @page : cached buffer page
537 * @pagecpy : page size copied so far
539 void _ltt_relay_write(struct rchan_buf
*buf
, size_t offset
,
540 const void *src
, size_t len
, ssize_t cpy
)
547 * Underlying layer should never ask for writes across
550 WARN_ON(offset
>= buf
->buf_size
);
552 cpy
= min_t(size_t, len
, buf
->buf_size
- offset
);
553 ltt_relay_do_copy(buf
->buf_data
+ offset
, src
, cpy
);
554 } while (unlikely(len
!= cpy
));
556 //ust// EXPORT_SYMBOL_GPL(_ltt_relay_write);
559 * ltt_relay_read - read data from ltt_relay_buffer.
561 * @offset : offset within the buffer
562 * @dest : destination address
563 * @len : length to write
565 //ust// int ltt_relay_read(struct rchan_buf *buf, size_t offset,
566 //ust// void *dest, size_t len)
568 //ust// struct buf_page *page;
569 //ust// ssize_t pagecpy, orig_len;
571 //ust// orig_len = len;
572 //ust// offset &= buf->chan->alloc_size - 1;
573 //ust// page = buf->rpage;
574 //ust// if (unlikely(!len))
577 //ust// page = ltt_relay_cache_page(buf, &buf->rpage, page, offset);
578 //ust// pagecpy = min_t(size_t, len, PAGE_SIZE - (offset & ~PAGE_MASK));
579 //ust// memcpy(dest, page_address(page->page) + (offset & ~PAGE_MASK),
581 //ust// len -= pagecpy;
582 //ust// if (likely(!len))
584 //ust// dest += pagecpy;
585 //ust// offset += pagecpy;
587 //ust// * Underlying layer should never ask for reads across
588 //ust// * subbuffers.
590 //ust// WARN_ON(offset >= buf->chan->alloc_size);
592 //ust// return orig_len;
594 //ust// EXPORT_SYMBOL_GPL(ltt_relay_read);
597 * ltt_relay_read_get_page - Get a whole page to read from
599 * @offset : offset within the buffer
601 //ust// struct buf_page *ltt_relay_read_get_page(struct rchan_buf *buf, size_t offset)
603 //ust// struct buf_page *page;
605 //ust// offset &= buf->chan->alloc_size - 1;
606 //ust// page = buf->rpage;
607 //ust// page = ltt_relay_cache_page(buf, &buf->rpage, page, offset);
610 //ust// EXPORT_SYMBOL_GPL(ltt_relay_read_get_page);
613 * ltt_relay_offset_address - get address of a location within the buffer
615 * @offset : offset within the buffer.
617 * Return the address where a given offset is located.
618 * Should be used to get the current subbuffer header pointer. Given we know
619 * it's never on a page boundary, it's safe to write directly to this address,
620 * as long as the write is never bigger than a page size.
622 void *ltt_relay_offset_address(struct rchan_buf
*buf
, size_t offset
)
624 //ust// struct buf_page *page;
625 //ust// unsigned int odd;
627 //ust// offset &= buf->chan->alloc_size - 1;
628 //ust// odd = !!(offset & buf->chan->subbuf_size);
629 //ust// page = buf->hpage[odd];
630 //ust// if (offset < page->offset || offset >= page->offset + PAGE_SIZE)
631 //ust// buf->hpage[odd] = page = buf->wpage;
632 //ust// page = ltt_relay_cache_page(buf, &buf->hpage[odd], page, offset);
633 //ust// return page_address(page->page) + (offset & ~PAGE_MASK);
636 //ust// EXPORT_SYMBOL_GPL(ltt_relay_offset_address);
639 * relay_file_open - open file op for relay files
643 * Increments the channel buffer refcount.
645 //ust// static int relay_file_open(struct inode *inode, struct file *filp)
647 //ust// struct rchan_buf *buf = inode->i_private;
648 //ust// kref_get(&buf->kref);
649 //ust// filp->private_data = buf;
651 //ust// return nonseekable_open(inode, filp);
655 * relay_file_release - release file op for relay files
659 * Decrements the channel refcount, as the filesystem is
660 * no longer using it.
662 //ust// static int relay_file_release(struct inode *inode, struct file *filp)
664 //ust// struct rchan_buf *buf = filp->private_data;
665 //ust// kref_put(&buf->kref, relay_remove_buf);
670 //ust// const struct file_operations ltt_relay_file_operations = {
671 //ust// .open = relay_file_open,
672 //ust// .release = relay_file_release,
674 //ust// EXPORT_SYMBOL_GPL(ltt_relay_file_operations);
676 //ust// static __init int relay_init(void)
678 //ust// hotcpu_notifier(relay_hotcpu_callback, 5);
682 //ust// module_init(relay_init);
686 * (C) Copyright 2005-2008 - Mathieu Desnoyers (mathieu.desnoyers@polymtl.ca)
688 * LTTng lockless buffer space management (reader/writer).
691 * Mathieu Desnoyers (mathieu.desnoyers@polymtl.ca)
693 * Inspired from LTT :
694 * Karim Yaghmour (karim@opersys.com)
695 * Tom Zanussi (zanussi@us.ibm.com)
696 * Bob Wisniewski (bob@watson.ibm.com)
698 * Bob Wisniewski (bob@watson.ibm.com)
702 * 19/10/05, Complete lockless mechanism.
703 * 27/05/05, Modular redesign and rewrite.
705 * Userspace reader semantic :
706 * while (poll fd != POLLHUP) {
707 * - ioctl RELAY_GET_SUBBUF_SIZE
710 * - splice 1 subbuffer worth of data to a pipe
711 * - splice the data from pipe to disk/network
712 * - ioctl PUT_SUBBUF, check error value
713 * if err val < 0, previous subbuffer was corrupted.
718 //ust// #include <linux/time.h>
719 //ust// #include <linux/ltt-tracer.h>
720 //ust// #include <linux/ltt-relay.h>
721 //ust// #include <linux/module.h>
722 //ust// #include <linux/string.h>
723 //ust// #include <linux/slab.h>
724 //ust// #include <linux/init.h>
725 //ust// #include <linux/rcupdate.h>
726 //ust// #include <linux/sched.h>
727 //ust// #include <linux/bitops.h>
728 //ust// #include <linux/fs.h>
729 //ust// #include <linux/smp_lock.h>
730 //ust// #include <linux/debugfs.h>
731 //ust// #include <linux/stat.h>
732 //ust// #include <linux/cpu.h>
733 //ust// #include <linux/pipe_fs_i.h>
734 //ust// #include <linux/splice.h>
735 //ust// #include <asm/atomic.h>
736 //ust// #include <asm/local.h>
739 #define printk_dbg(fmt, args...) printk(fmt, args)
741 #define printk_dbg(fmt, args...)
744 /* LTTng lockless logging buffer info */
745 struct ltt_channel_buf_struct
{
746 /* First 32 bytes cache-hot cacheline */
747 local_t offset
; /* Current offset in the buffer */
748 local_t
*commit_count
; /* Commit count per sub-buffer */
749 atomic_long_t consumed
; /*
750 * Current offset in the buffer
751 * standard atomic access (shared)
753 unsigned long last_tsc
; /*
754 * Last timestamp written in the buffer.
756 /* End of first 32 bytes cacheline */
757 atomic_long_t active_readers
; /*
758 * Active readers count
759 * standard atomic access (shared)
762 local_t corrupted_subbuffers
;
763 spinlock_t full_lock
; /*
764 * buffer full condition spinlock, only
765 * for userspace tracing blocking mode
766 * synchronization with reader.
768 //ust// wait_queue_head_t write_wait; /*
769 //ust// * Wait queue for blocking user space
772 atomic_t wakeup_readers
; /* Boolean : wakeup readers waiting ? */
773 } ____cacheline_aligned
;
776 * Last TSC comparison functions. Check if the current TSC overflows
777 * LTT_TSC_BITS bits from the last TSC read. Reads and writes last_tsc
781 #if (BITS_PER_LONG == 32)
782 static inline void save_last_tsc(struct ltt_channel_buf_struct
*ltt_buf
,
785 ltt_buf
->last_tsc
= (unsigned long)(tsc
>> LTT_TSC_BITS
);
788 static inline int last_tsc_overflow(struct ltt_channel_buf_struct
*ltt_buf
,
791 unsigned long tsc_shifted
= (unsigned long)(tsc
>> LTT_TSC_BITS
);
793 if (unlikely((tsc_shifted
- ltt_buf
->last_tsc
)))
799 static inline void save_last_tsc(struct ltt_channel_buf_struct
*ltt_buf
,
802 ltt_buf
->last_tsc
= (unsigned long)tsc
;
805 static inline int last_tsc_overflow(struct ltt_channel_buf_struct
*ltt_buf
,
808 if (unlikely((tsc
- ltt_buf
->last_tsc
) >> LTT_TSC_BITS
))
815 //ust// static struct file_operations ltt_file_operations;
818 * A switch is done during tracing or as a final flush after tracing (so it
819 * won't write in the new sub-buffer).
821 enum force_switch_mode
{ FORCE_ACTIVE
, FORCE_FLUSH
};
823 static int ltt_relay_create_buffer(struct ltt_trace_struct
*trace
,
824 struct ltt_channel_struct
*ltt_chan
,
825 struct rchan_buf
*buf
,
826 unsigned int n_subbufs
);
828 static void ltt_relay_destroy_buffer(struct ltt_channel_struct
*ltt_chan
);
830 static void ltt_force_switch(struct rchan_buf
*buf
,
831 enum force_switch_mode mode
);
836 static void ltt_buffer_begin_callback(struct rchan_buf
*buf
,
837 u64 tsc
, unsigned int subbuf_idx
)
839 struct ltt_channel_struct
*channel
=
840 (struct ltt_channel_struct
*)buf
->chan
->private_data
;
841 struct ltt_subbuffer_header
*header
=
842 (struct ltt_subbuffer_header
*)
843 ltt_relay_offset_address(buf
,
844 subbuf_idx
* buf
->chan
->subbuf_size
);
846 header
->cycle_count_begin
= tsc
;
847 header
->lost_size
= 0xFFFFFFFF; /* for debugging */
848 header
->buf_size
= buf
->chan
->subbuf_size
;
849 ltt_write_trace_header(channel
->trace
, header
);
853 * offset is assumed to never be 0 here : never deliver a completely empty
854 * subbuffer. The lost size is between 0 and subbuf_size-1.
856 static notrace
void ltt_buffer_end_callback(struct rchan_buf
*buf
,
857 u64 tsc
, unsigned int offset
, unsigned int subbuf_idx
)
859 struct ltt_channel_struct
*channel
=
860 (struct ltt_channel_struct
*)buf
->chan
->private_data
;
861 struct ltt_channel_buf_struct
*ltt_buf
= channel
->buf
;
862 struct ltt_subbuffer_header
*header
=
863 (struct ltt_subbuffer_header
*)
864 ltt_relay_offset_address(buf
,
865 subbuf_idx
* buf
->chan
->subbuf_size
);
867 header
->lost_size
= SUBBUF_OFFSET((buf
->chan
->subbuf_size
- offset
),
869 header
->cycle_count_end
= tsc
;
870 header
->events_lost
= ltt_buf
->events_lost
;
871 header
->subbuf_corrupt
= ltt_buf
->corrupted_subbuffers
;
874 static notrace
void ltt_deliver(struct rchan_buf
*buf
, unsigned int subbuf_idx
,
877 struct ltt_channel_struct
*channel
=
878 (struct ltt_channel_struct
*)buf
->chan
->private_data
;
879 struct ltt_channel_buf_struct
*ltt_buf
= channel
->buf
;
881 atomic_set(<t_buf
->wakeup_readers
, 1);
884 static struct dentry
*ltt_create_buf_file_callback(const char *filename
,
885 struct dentry
*parent
, int mode
,
886 struct rchan_buf
*buf
)
888 struct ltt_channel_struct
*ltt_chan
;
890 //ust// struct dentry *dentry;
892 ltt_chan
= buf
->chan
->private_data
;
893 err
= ltt_relay_create_buffer(ltt_chan
->trace
, ltt_chan
, buf
, buf
->chan
->n_subbufs
);
897 //ust// dentry = debugfs_create_file(filename, mode, parent, buf,
898 //ust// <t_file_operations);
901 //ust// return dentry;
903 ltt_relay_destroy_buffer(ltt_chan
);
907 static int ltt_remove_buf_file_callback(struct rchan_buf
*buf
)
909 //ust// struct rchan_buf *buf = dentry->d_inode->i_private;
910 struct ltt_channel_struct
*ltt_chan
= buf
->chan
->private_data
;
912 //ust// debugfs_remove(dentry);
913 ltt_relay_destroy_buffer(ltt_chan
);
921 * This must be done after the trace is removed from the RCU list so that there
922 * are no stalled writers.
924 //ust// static void ltt_relay_wake_writers(struct ltt_channel_buf_struct *ltt_buf)
927 //ust// if (waitqueue_active(<t_buf->write_wait))
928 //ust// wake_up_interruptible(<t_buf->write_wait);
932 * This function should not be called from NMI interrupt context
934 static notrace
void ltt_buf_unfull(struct rchan_buf
*buf
,
935 unsigned int subbuf_idx
,
938 //ust// struct ltt_channel_struct *ltt_channel =
939 //ust// (struct ltt_channel_struct *)buf->chan->private_data;
940 //ust// struct ltt_channel_buf_struct *ltt_buf = ltt_channel->buf;
942 //ust// ltt_relay_wake_writers(ltt_buf);
946 * ltt_open - open file op for ltt files
947 * @inode: opened inode
950 * Open implementation. Makes sure only one open instance of a buffer is
951 * done at a given moment.
953 //ust// static int ltt_open(struct inode *inode, struct file *file)
955 //ust// struct rchan_buf *buf = inode->i_private;
956 //ust// struct ltt_channel_struct *ltt_channel =
957 //ust// (struct ltt_channel_struct *)buf->chan->private_data;
958 //ust// struct ltt_channel_buf_struct *ltt_buf =
959 //ust// percpu_ptr(ltt_channel->buf, buf->cpu);
961 //ust// if (!atomic_long_add_unless(<t_buf->active_readers, 1, 1))
962 //ust// return -EBUSY;
963 //ust// return ltt_relay_file_operations.open(inode, file);
967 * ltt_release - release file op for ltt files
968 * @inode: opened inode
971 * Release implementation.
973 //ust// static int ltt_release(struct inode *inode, struct file *file)
975 //ust// struct rchan_buf *buf = inode->i_private;
976 //ust// struct ltt_channel_struct *ltt_channel =
977 //ust// (struct ltt_channel_struct *)buf->chan->private_data;
978 //ust// struct ltt_channel_buf_struct *ltt_buf =
979 //ust// percpu_ptr(ltt_channel->buf, buf->cpu);
982 //ust// WARN_ON(atomic_long_read(<t_buf->active_readers) != 1);
983 //ust// atomic_long_dec(<t_buf->active_readers);
984 //ust// ret = ltt_relay_file_operations.release(inode, file);
985 //ust// WARN_ON(ret);
990 * ltt_poll - file op for ltt files
994 * Poll implementation.
996 //ust// static unsigned int ltt_poll(struct file *filp, poll_table *wait)
998 //ust// unsigned int mask = 0;
999 //ust// struct inode *inode = filp->f_dentry->d_inode;
1000 //ust// struct rchan_buf *buf = inode->i_private;
1001 //ust// struct ltt_channel_struct *ltt_channel =
1002 //ust// (struct ltt_channel_struct *)buf->chan->private_data;
1003 //ust// struct ltt_channel_buf_struct *ltt_buf =
1004 //ust// percpu_ptr(ltt_channel->buf, buf->cpu);
1006 //ust// if (filp->f_mode & FMODE_READ) {
1007 //ust// poll_wait_set_exclusive(wait);
1008 //ust// poll_wait(filp, &buf->read_wait, wait);
1010 //ust// WARN_ON(atomic_long_read(<t_buf->active_readers) != 1);
1011 //ust// if (SUBBUF_TRUNC(local_read(<t_buf->offset),
1013 //ust// - SUBBUF_TRUNC(atomic_long_read(<t_buf->consumed),
1016 //ust// if (buf->finalized)
1017 //ust// return POLLHUP;
1021 //ust// struct rchan *rchan =
1022 //ust// ltt_channel->trans_channel_data;
1023 //ust// if (SUBBUF_TRUNC(local_read(<t_buf->offset),
1025 //ust// - SUBBUF_TRUNC(atomic_long_read(
1026 //ust// <t_buf->consumed),
1028 //ust// >= rchan->alloc_size)
1029 //ust// return POLLPRI | POLLRDBAND;
1031 //ust// return POLLIN | POLLRDNORM;
1034 //ust// return mask;
1037 static int ltt_do_get_subbuf(struct rchan_buf
*buf
, struct ltt_channel_buf_struct
*ltt_buf
, long *pconsumed_old
)
1039 struct ltt_channel_struct
*ltt_channel
= (struct ltt_channel_struct
*)buf
->chan
->private_data
;
1040 long consumed_old
, consumed_idx
, commit_count
, write_offset
;
1041 consumed_old
= atomic_long_read(<t_buf
->consumed
);
1042 consumed_idx
= SUBBUF_INDEX(consumed_old
, buf
->chan
);
1043 commit_count
= local_read(<t_buf
->commit_count
[consumed_idx
]);
1045 * Make sure we read the commit count before reading the buffer
1046 * data and the write offset. Correct consumed offset ordering
1047 * wrt commit count is insured by the use of cmpxchg to update
1048 * the consumed offset.
1051 write_offset
= local_read(<t_buf
->offset
);
1053 * Check that the subbuffer we are trying to consume has been
1054 * already fully committed.
1056 if (((commit_count
- buf
->chan
->subbuf_size
)
1057 & ltt_channel
->commit_count_mask
)
1058 - (BUFFER_TRUNC(consumed_old
, buf
->chan
)
1059 >> ltt_channel
->n_subbufs_order
)
1064 * Check that we are not about to read the same subbuffer in
1065 * which the writer head is.
1067 if ((SUBBUF_TRUNC(write_offset
, buf
->chan
)
1068 - SUBBUF_TRUNC(consumed_old
, buf
->chan
))
1073 *pconsumed_old
= consumed_old
;
1077 static int ltt_do_put_subbuf(struct rchan_buf
*buf
, struct ltt_channel_buf_struct
*ltt_buf
, u32 uconsumed_old
)
1079 long consumed_new
, consumed_old
;
1081 consumed_old
= atomic_long_read(<t_buf
->consumed
);
1082 consumed_old
= consumed_old
& (~0xFFFFFFFFL
);
1083 consumed_old
= consumed_old
| uconsumed_old
;
1084 consumed_new
= SUBBUF_ALIGN(consumed_old
, buf
->chan
);
1086 spin_lock(<t_buf
->full_lock
);
1087 if (atomic_long_cmpxchg(<t_buf
->consumed
, consumed_old
,
1090 /* We have been pushed by the writer : the last
1091 * buffer read _is_ corrupted! It can also
1092 * happen if this is a buffer we never got. */
1093 spin_unlock(<t_buf
->full_lock
);
1096 /* tell the client that buffer is now unfull */
1099 index
= SUBBUF_INDEX(consumed_old
, buf
->chan
);
1100 data
= BUFFER_OFFSET(consumed_old
, buf
->chan
);
1101 ltt_buf_unfull(buf
, index
, data
);
1102 spin_unlock(<t_buf
->full_lock
);
1108 * ltt_ioctl - control on the debugfs file
1115 * This ioctl implements three commands necessary for a minimal
1116 * producer/consumer implementation :
1118 * Get the next sub buffer that can be read. It never blocks.
1120 * Release the currently read sub-buffer. Parameter is the last
1121 * put subbuffer (returned by GET_SUBBUF).
1122 * RELAY_GET_N_BUBBUFS
1123 * returns the number of sub buffers in the per cpu channel.
1124 * RELAY_GET_SUBBUF_SIZE
1125 * returns the size of the sub buffers.
1127 //ust// static int ltt_ioctl(struct inode *inode, struct file *filp,
1128 //ust// unsigned int cmd, unsigned long arg)
1130 //ust// struct rchan_buf *buf = inode->i_private;
1131 //ust// struct ltt_channel_struct *ltt_channel =
1132 //ust// (struct ltt_channel_struct *)buf->chan->private_data;
1133 //ust// struct ltt_channel_buf_struct *ltt_buf =
1134 //ust// percpu_ptr(ltt_channel->buf, buf->cpu);
1135 //ust// u32 __user *argp = (u32 __user *)arg;
1137 //ust// WARN_ON(atomic_long_read(<t_buf->active_readers) != 1);
1138 //ust// switch (cmd) {
1139 //ust// case RELAY_GET_SUBBUF:
1142 //ust// ret = ltt_do_get_subbuf(buf, ltt_buf, &consumed_old);
1145 //ust// return put_user((u32)consumed_old, argp);
1147 //ust// case RELAY_PUT_SUBBUF:
1150 //ust// u32 uconsumed_old;
1151 //ust// ret = get_user(uconsumed_old, argp);
1153 //ust// return ret; /* will return -EFAULT */
1154 //ust// return ltt_do_put_subbuf(buf, ltt_buf, uconsumed_old);
1156 //ust// case RELAY_GET_N_SUBBUFS:
1157 //ust// return put_user((u32)buf->chan->n_subbufs, argp);
1159 //ust// case RELAY_GET_SUBBUF_SIZE:
1160 //ust// return put_user((u32)buf->chan->subbuf_size, argp);
1163 //ust// return -ENOIOCTLCMD;
1168 //ust// #ifdef CONFIG_COMPAT
1169 //ust// static long ltt_compat_ioctl(struct file *file, unsigned int cmd,
1170 //ust// unsigned long arg)
1172 //ust// long ret = -ENOIOCTLCMD;
1174 //ust// lock_kernel();
1175 //ust// ret = ltt_ioctl(file->f_dentry->d_inode, file, cmd, arg);
1176 //ust// unlock_kernel();
1182 //ust// static void ltt_relay_pipe_buf_release(struct pipe_inode_info *pipe,
1183 //ust// struct pipe_buffer *pbuf)
1187 //ust// static struct pipe_buf_operations ltt_relay_pipe_buf_ops = {
1188 //ust// .can_merge = 0,
1189 //ust// .map = generic_pipe_buf_map,
1190 //ust// .unmap = generic_pipe_buf_unmap,
1191 //ust// .confirm = generic_pipe_buf_confirm,
1192 //ust// .release = ltt_relay_pipe_buf_release,
1193 //ust// .steal = generic_pipe_buf_steal,
1194 //ust// .get = generic_pipe_buf_get,
1197 //ust// static void ltt_relay_page_release(struct splice_pipe_desc *spd, unsigned int i)
1202 * subbuf_splice_actor - splice up to one subbuf's worth of data
1204 //ust// static int subbuf_splice_actor(struct file *in,
1205 //ust// loff_t *ppos,
1206 //ust// struct pipe_inode_info *pipe,
1208 //ust// unsigned int flags)
1210 //ust// struct rchan_buf *buf = in->private_data;
1211 //ust// struct ltt_channel_struct *ltt_channel =
1212 //ust// (struct ltt_channel_struct *)buf->chan->private_data;
1213 //ust// struct ltt_channel_buf_struct *ltt_buf =
1214 //ust// percpu_ptr(ltt_channel->buf, buf->cpu);
1215 //ust// unsigned int poff, subbuf_pages, nr_pages;
1216 //ust// struct page *pages[PIPE_BUFFERS];
1217 //ust// struct partial_page partial[PIPE_BUFFERS];
1218 //ust// struct splice_pipe_desc spd = {
1219 //ust// .pages = pages,
1220 //ust// .nr_pages = 0,
1221 //ust// .partial = partial,
1222 //ust// .flags = flags,
1223 //ust// .ops = <t_relay_pipe_buf_ops,
1224 //ust// .spd_release = ltt_relay_page_release,
1226 //ust// long consumed_old, consumed_idx, roffset;
1227 //ust// unsigned long bytes_avail;
1230 //ust// * Check that a GET_SUBBUF ioctl has been done before.
1232 //ust// WARN_ON(atomic_long_read(<t_buf->active_readers) != 1);
1233 //ust// consumed_old = atomic_long_read(<t_buf->consumed);
1234 //ust// consumed_old += *ppos;
1235 //ust// consumed_idx = SUBBUF_INDEX(consumed_old, buf->chan);
1238 //ust// * Adjust read len, if longer than what is available
1240 //ust// bytes_avail = SUBBUF_TRUNC(local_read(<t_buf->offset), buf->chan)
1241 //ust// - consumed_old;
1242 //ust// WARN_ON(bytes_avail > buf->chan->alloc_size);
1243 //ust// len = min_t(size_t, len, bytes_avail);
1244 //ust// subbuf_pages = bytes_avail >> PAGE_SHIFT;
1245 //ust// nr_pages = min_t(unsigned int, subbuf_pages, PIPE_BUFFERS);
1246 //ust// roffset = consumed_old & PAGE_MASK;
1247 //ust// poff = consumed_old & ~PAGE_MASK;
1248 //ust// printk_dbg(KERN_DEBUG "SPLICE actor len %zu pos %zd write_pos %ld\n",
1249 //ust// len, (ssize_t)*ppos, local_read(<t_buf->offset));
1251 //ust// for (; spd.nr_pages < nr_pages; spd.nr_pages++) {
1252 //ust// unsigned int this_len;
1253 //ust// struct buf_page *page;
1257 //ust// printk_dbg(KERN_DEBUG "SPLICE actor loop len %zu roffset %ld\n",
1258 //ust// len, roffset);
1260 //ust// this_len = PAGE_SIZE - poff;
1261 //ust// page = ltt_relay_read_get_page(buf, roffset);
1262 //ust// spd.pages[spd.nr_pages] = page->page;
1263 //ust// spd.partial[spd.nr_pages].offset = poff;
1264 //ust// spd.partial[spd.nr_pages].len = this_len;
1267 //ust// roffset += PAGE_SIZE;
1268 //ust// len -= this_len;
1271 //ust// if (!spd.nr_pages)
1274 //ust// return splice_to_pipe(pipe, &spd);
1277 //ust// static ssize_t ltt_relay_file_splice_read(struct file *in,
1278 //ust// loff_t *ppos,
1279 //ust// struct pipe_inode_info *pipe,
1281 //ust// unsigned int flags)
1283 //ust// ssize_t spliced;
1287 //ust// spliced = 0;
1289 //ust// printk_dbg(KERN_DEBUG "SPLICE read len %zu pos %zd\n",
1290 //ust// len, (ssize_t)*ppos);
1291 //ust// while (len && !spliced) {
1292 //ust// ret = subbuf_splice_actor(in, ppos, pipe, len, flags);
1293 //ust// printk_dbg(KERN_DEBUG "SPLICE read loop ret %d\n", ret);
1294 //ust// if (ret < 0)
1296 //ust// else if (!ret) {
1297 //ust// if (flags & SPLICE_F_NONBLOCK)
1298 //ust// ret = -EAGAIN;
1302 //ust// *ppos += ret;
1303 //ust// if (ret > len)
1307 //ust// spliced += ret;
1310 //ust// if (spliced)
1311 //ust// return spliced;
1316 static void ltt_relay_print_subbuffer_errors(
1317 struct ltt_channel_struct
*ltt_chan
,
1320 struct rchan
*rchan
= ltt_chan
->trans_channel_data
;
1321 struct ltt_channel_buf_struct
*ltt_buf
= ltt_chan
->buf
;
1322 long cons_idx
, commit_count
, write_offset
;
1324 cons_idx
= SUBBUF_INDEX(cons_off
, rchan
);
1325 commit_count
= local_read(<t_buf
->commit_count
[cons_idx
]);
1327 * No need to order commit_count and write_offset reads because we
1328 * execute after trace is stopped when there are no readers left.
1330 write_offset
= local_read(<t_buf
->offset
);
1332 "LTT : unread channel %s offset is %ld "
1333 "and cons_off : %ld\n",
1334 ltt_chan
->channel_name
, write_offset
, cons_off
);
1335 /* Check each sub-buffer for non filled commit count */
1336 if (((commit_count
- rchan
->subbuf_size
) & ltt_chan
->commit_count_mask
)
1337 - (BUFFER_TRUNC(cons_off
, rchan
) >> ltt_chan
->n_subbufs_order
)
1340 "LTT : %s : subbuffer %lu has non filled "
1341 "commit count %lu.\n",
1342 ltt_chan
->channel_name
, cons_idx
, commit_count
);
1343 printk(KERN_ALERT
"LTT : %s : commit count : %lu, subbuf size %zd\n",
1344 ltt_chan
->channel_name
, commit_count
,
1345 rchan
->subbuf_size
);
1348 static void ltt_relay_print_errors(struct ltt_trace_struct
*trace
,
1349 struct ltt_channel_struct
*ltt_chan
)
1351 struct rchan
*rchan
= ltt_chan
->trans_channel_data
;
1352 struct ltt_channel_buf_struct
*ltt_buf
= ltt_chan
->buf
;
1355 for (cons_off
= atomic_long_read(<t_buf
->consumed
);
1356 (SUBBUF_TRUNC(local_read(<t_buf
->offset
),
1359 cons_off
= SUBBUF_ALIGN(cons_off
, rchan
))
1360 ltt_relay_print_subbuffer_errors(ltt_chan
, cons_off
);
1363 static void ltt_relay_print_buffer_errors(struct ltt_channel_struct
*ltt_chan
)
1365 struct ltt_trace_struct
*trace
= ltt_chan
->trace
;
1366 struct ltt_channel_buf_struct
*ltt_buf
= ltt_chan
->buf
;
1368 if (local_read(<t_buf
->events_lost
))
1370 "LTT : %s : %ld events lost "
1372 ltt_chan
->channel_name
,
1373 local_read(<t_buf
->events_lost
),
1374 ltt_chan
->channel_name
);
1375 if (local_read(<t_buf
->corrupted_subbuffers
))
1377 "LTT : %s : %ld corrupted subbuffers "
1379 ltt_chan
->channel_name
,
1380 local_read(<t_buf
->corrupted_subbuffers
),
1381 ltt_chan
->channel_name
);
1383 ltt_relay_print_errors(trace
, ltt_chan
);
1386 static void ltt_relay_remove_dirs(struct ltt_trace_struct
*trace
)
1388 //ust// debugfs_remove(trace->dentry.trace_root);
1391 static void ltt_relay_release_channel(struct kref
*kref
)
1393 struct ltt_channel_struct
*ltt_chan
= container_of(kref
,
1394 struct ltt_channel_struct
, kref
);
1395 free(ltt_chan
->buf
);
1399 * Create ltt buffer.
1401 //ust// static int ltt_relay_create_buffer(struct ltt_trace_struct *trace,
1402 //ust// struct ltt_channel_struct *ltt_chan, struct rchan_buf *buf,
1403 //ust// unsigned int cpu, unsigned int n_subbufs)
1405 //ust// struct ltt_channel_buf_struct *ltt_buf =
1406 //ust// percpu_ptr(ltt_chan->buf, cpu);
1407 //ust// unsigned int j;
1409 //ust// ltt_buf->commit_count =
1410 //ust// kzalloc_node(sizeof(ltt_buf->commit_count) * n_subbufs,
1411 //ust// GFP_KERNEL, cpu_to_node(cpu));
1412 //ust// if (!ltt_buf->commit_count)
1413 //ust// return -ENOMEM;
1414 //ust// kref_get(&trace->kref);
1415 //ust// kref_get(&trace->ltt_transport_kref);
1416 //ust// kref_get(<t_chan->kref);
1417 //ust// local_set(<t_buf->offset, ltt_subbuffer_header_size());
1418 //ust// atomic_long_set(<t_buf->consumed, 0);
1419 //ust// atomic_long_set(<t_buf->active_readers, 0);
1420 //ust// for (j = 0; j < n_subbufs; j++)
1421 //ust// local_set(<t_buf->commit_count[j], 0);
1422 //ust// init_waitqueue_head(<t_buf->write_wait);
1423 //ust// atomic_set(<t_buf->wakeup_readers, 0);
1424 //ust// spin_lock_init(<t_buf->full_lock);
1426 //ust// ltt_buffer_begin_callback(buf, trace->start_tsc, 0);
1427 //ust// /* atomic_add made on local variable on data that belongs to
1428 //ust// * various CPUs : ok because tracing not started (for this cpu). */
1429 //ust// local_add(ltt_subbuffer_header_size(), <t_buf->commit_count[0]);
1431 //ust// local_set(<t_buf->events_lost, 0);
1432 //ust// local_set(<t_buf->corrupted_subbuffers, 0);
1437 static int ltt_relay_create_buffer(struct ltt_trace_struct
*trace
,
1438 struct ltt_channel_struct
*ltt_chan
, struct rchan_buf
*buf
,
1439 unsigned int n_subbufs
)
1441 struct ltt_channel_buf_struct
*ltt_buf
= ltt_chan
->buf
;
1444 ltt_buf
->commit_count
=
1445 malloc(sizeof(ltt_buf
->commit_count
) * n_subbufs
);
1446 if (!ltt_buf
->commit_count
)
1448 kref_get(&trace
->kref
);
1449 kref_get(&trace
->ltt_transport_kref
);
1450 kref_get(<t_chan
->kref
);
1451 ltt_buf
->offset
= ltt_subbuffer_header_size();
1452 atomic_long_set(<t_buf
->consumed
, 0);
1453 atomic_long_set(<t_buf
->active_readers
, 0);
1454 for (j
= 0; j
< n_subbufs
; j
++)
1455 local_set(<t_buf
->commit_count
[j
], 0);
1456 //ust// init_waitqueue_head(<t_buf->write_wait);
1457 atomic_set(<t_buf
->wakeup_readers
, 0);
1458 spin_lock_init(<t_buf
->full_lock
);
1460 ltt_buffer_begin_callback(buf
, trace
->start_tsc
, 0);
1462 ltt_buf
->commit_count
[0] += ltt_subbuffer_header_size();
1464 ltt_buf
->events_lost
= 0;
1465 ltt_buf
->corrupted_subbuffers
= 0;
1470 static void ltt_relay_destroy_buffer(struct ltt_channel_struct
*ltt_chan
)
1472 struct ltt_trace_struct
*trace
= ltt_chan
->trace
;
1473 struct ltt_channel_buf_struct
*ltt_buf
= ltt_chan
->buf
;
1475 kref_put(<t_chan
->trace
->ltt_transport_kref
,
1476 ltt_release_transport
);
1477 ltt_relay_print_buffer_errors(ltt_chan
);
1478 kfree(ltt_buf
->commit_count
);
1479 ltt_buf
->commit_count
= NULL
;
1480 kref_put(<t_chan
->kref
, ltt_relay_release_channel
);
1481 kref_put(&trace
->kref
, ltt_release_trace
);
1482 //ust// wake_up_interruptible(&trace->kref_wq);
1488 static int ltt_relay_create_channel(const char *trace_name
,
1489 struct ltt_trace_struct
*trace
, struct dentry
*dir
,
1490 const char *channel_name
, struct ltt_channel_struct
*ltt_chan
,
1491 unsigned int subbuf_size
, unsigned int n_subbufs
,
1495 unsigned int tmpname_len
;
1498 tmpname
= kmalloc(PATH_MAX
, GFP_KERNEL
);
1502 strncpy(tmpname
, LTT_FLIGHT_PREFIX
, PATH_MAX
-1);
1503 strncat(tmpname
, channel_name
,
1504 PATH_MAX
-1-sizeof(LTT_FLIGHT_PREFIX
));
1506 strncpy(tmpname
, channel_name
, PATH_MAX
-1);
1508 strncat(tmpname
, "_", PATH_MAX
-1-strlen(tmpname
));
1510 kref_init(<t_chan
->kref
);
1512 ltt_chan
->trace
= trace
;
1513 ltt_chan
->buffer_begin
= ltt_buffer_begin_callback
;
1514 ltt_chan
->buffer_end
= ltt_buffer_end_callback
;
1515 ltt_chan
->overwrite
= overwrite
;
1516 ltt_chan
->n_subbufs_order
= get_count_order(n_subbufs
);
1517 ltt_chan
->commit_count_mask
= (~0UL >> ltt_chan
->n_subbufs_order
);
1518 //ust// ltt_chan->buf = percpu_alloc_mask(sizeof(struct ltt_channel_buf_struct), GFP_KERNEL, cpu_possible_map);
1519 ltt_chan
->buf
= malloc(sizeof(struct ltt_channel_buf_struct
));
1522 ltt_chan
->trans_channel_data
= ltt_relay_open(tmpname
,
1527 tmpname_len
= strlen(tmpname
);
1528 if (tmpname_len
> 0) {
1529 /* Remove final _ for pretty printing */
1530 tmpname
[tmpname_len
-1] = '\0';
1532 if (ltt_chan
->trans_channel_data
== NULL
) {
1533 printk(KERN_ERR
"LTT : Can't open %s channel for trace %s\n",
1534 tmpname
, trace_name
);
1535 goto relay_open_error
;
1542 //ust// percpu_free(ltt_chan->buf);
1550 static int ltt_relay_create_dirs(struct ltt_trace_struct
*new_trace
)
1552 //ust// new_trace->dentry.trace_root = debugfs_create_dir(new_trace->trace_name,
1553 //ust// get_ltt_root());
1554 //ust// if (new_trace->dentry.trace_root == NULL) {
1555 //ust// printk(KERN_ERR "LTT : Trace directory name %s already taken\n",
1556 //ust// new_trace->trace_name);
1557 //ust// return EEXIST;
1560 //ust// new_trace->callbacks.create_buf_file = ltt_create_buf_file_callback;
1561 //ust// new_trace->callbacks.remove_buf_file = ltt_remove_buf_file_callback;
1567 * LTTng channel flush function.
1569 * Must be called when no tracing is active in the channel, because of
1570 * accesses across CPUs.
1572 static notrace
void ltt_relay_buffer_flush(struct rchan_buf
*buf
)
1575 ltt_force_switch(buf
, FORCE_FLUSH
);
1578 static void ltt_relay_async_wakeup_chan(struct ltt_channel_struct
*ltt_channel
)
1580 //ust// unsigned int i;
1581 //ust// struct rchan *rchan = ltt_channel->trans_channel_data;
1583 //ust// for_each_possible_cpu(i) {
1584 //ust// struct ltt_channel_buf_struct *ltt_buf =
1585 //ust// percpu_ptr(ltt_channel->buf, i);
1587 //ust// if (atomic_read(<t_buf->wakeup_readers) == 1) {
1588 //ust// atomic_set(<t_buf->wakeup_readers, 0);
1589 //ust// wake_up_interruptible(&rchan->buf[i]->read_wait);
1594 static void ltt_relay_finish_buffer(struct ltt_channel_struct
*ltt_channel
)
1596 struct rchan
*rchan
= ltt_channel
->trans_channel_data
;
1599 struct ltt_channel_buf_struct
*ltt_buf
= ltt_channel
->buf
;
1600 ltt_relay_buffer_flush(rchan
->buf
);
1601 //ust// ltt_relay_wake_writers(ltt_buf);
1606 static void ltt_relay_finish_channel(struct ltt_channel_struct
*ltt_channel
)
1610 //ust// for_each_possible_cpu(i)
1611 ltt_relay_finish_buffer(ltt_channel
);
1614 static void ltt_relay_remove_channel(struct ltt_channel_struct
*channel
)
1616 struct rchan
*rchan
= channel
->trans_channel_data
;
1618 ltt_relay_close(rchan
);
1619 kref_put(&channel
->kref
, ltt_relay_release_channel
);
1622 struct ltt_reserve_switch_offsets
{
1623 long begin
, end
, old
;
1624 long begin_switch
, end_switch_current
, end_switch_old
;
1625 long commit_count
, reserve_commit_diff
;
1626 size_t before_hdr_pad
, size
;
1632 * !0 if execution must be aborted.
1634 static inline int ltt_relay_try_reserve(
1635 struct ltt_channel_struct
*ltt_channel
,
1636 struct ltt_channel_buf_struct
*ltt_buf
, struct rchan
*rchan
,
1637 struct rchan_buf
*buf
,
1638 struct ltt_reserve_switch_offsets
*offsets
, size_t data_size
,
1639 u64
*tsc
, unsigned int *rflags
, int largest_align
)
1641 offsets
->begin
= local_read(<t_buf
->offset
);
1642 offsets
->old
= offsets
->begin
;
1643 offsets
->begin_switch
= 0;
1644 offsets
->end_switch_current
= 0;
1645 offsets
->end_switch_old
= 0;
1647 *tsc
= trace_clock_read64();
1648 if (last_tsc_overflow(ltt_buf
, *tsc
))
1649 *rflags
= LTT_RFLAG_ID_SIZE_TSC
;
1651 if (SUBBUF_OFFSET(offsets
->begin
, buf
->chan
) == 0) {
1652 offsets
->begin_switch
= 1; /* For offsets->begin */
1654 offsets
->size
= ltt_get_header_size(ltt_channel
,
1655 offsets
->begin
, data_size
,
1656 &offsets
->before_hdr_pad
, *rflags
);
1657 offsets
->size
+= ltt_align(offsets
->begin
+ offsets
->size
,
1660 if ((SUBBUF_OFFSET(offsets
->begin
, buf
->chan
) + offsets
->size
)
1661 > buf
->chan
->subbuf_size
) {
1662 offsets
->end_switch_old
= 1; /* For offsets->old */
1663 offsets
->begin_switch
= 1; /* For offsets->begin */
1666 if (offsets
->begin_switch
) {
1669 if (offsets
->end_switch_old
)
1670 offsets
->begin
= SUBBUF_ALIGN(offsets
->begin
,
1672 offsets
->begin
= offsets
->begin
+ ltt_subbuffer_header_size();
1673 /* Test new buffer integrity */
1674 subbuf_index
= SUBBUF_INDEX(offsets
->begin
, buf
->chan
);
1675 offsets
->reserve_commit_diff
=
1676 (BUFFER_TRUNC(offsets
->begin
, buf
->chan
)
1677 >> ltt_channel
->n_subbufs_order
)
1678 - (local_read(<t_buf
->commit_count
[subbuf_index
])
1679 & ltt_channel
->commit_count_mask
);
1680 if (offsets
->reserve_commit_diff
== 0) {
1681 /* Next buffer not corrupted. */
1682 if (!ltt_channel
->overwrite
&&
1683 (SUBBUF_TRUNC(offsets
->begin
, buf
->chan
)
1684 - SUBBUF_TRUNC(atomic_long_read(
1685 <t_buf
->consumed
),
1687 >= rchan
->alloc_size
) {
1689 * We do not overwrite non consumed buffers
1690 * and we are full : event is lost.
1692 local_inc(<t_buf
->events_lost
);
1696 * next buffer not corrupted, we are either in
1697 * overwrite mode or the buffer is not full.
1698 * It's safe to write in this new subbuffer.
1703 * Next subbuffer corrupted. Force pushing reader even
1704 * in normal mode. It's safe to write in this new
1708 offsets
->size
= ltt_get_header_size(ltt_channel
,
1709 offsets
->begin
, data_size
,
1710 &offsets
->before_hdr_pad
, *rflags
);
1711 offsets
->size
+= ltt_align(offsets
->begin
+ offsets
->size
,
1714 if ((SUBBUF_OFFSET(offsets
->begin
, buf
->chan
) + offsets
->size
)
1715 > buf
->chan
->subbuf_size
) {
1717 * Event too big for subbuffers, report error, don't
1718 * complete the sub-buffer switch.
1720 local_inc(<t_buf
->events_lost
);
1724 * We just made a successful buffer switch and the event
1725 * fits in the new subbuffer. Let's write.
1730 * Event fits in the current buffer and we are not on a switch
1731 * boundary. It's safe to write.
1734 offsets
->end
= offsets
->begin
+ offsets
->size
;
1736 if ((SUBBUF_OFFSET(offsets
->end
, buf
->chan
)) == 0) {
1738 * The offset_end will fall at the very beginning of the next
1741 offsets
->end_switch_current
= 1; /* For offsets->begin */
1749 * !0 if execution must be aborted.
1751 static inline int ltt_relay_try_switch(
1752 enum force_switch_mode mode
,
1753 struct ltt_channel_struct
*ltt_channel
,
1754 struct ltt_channel_buf_struct
*ltt_buf
, struct rchan
*rchan
,
1755 struct rchan_buf
*buf
,
1756 struct ltt_reserve_switch_offsets
*offsets
,
1761 offsets
->begin
= local_read(<t_buf
->offset
);
1762 offsets
->old
= offsets
->begin
;
1763 offsets
->begin_switch
= 0;
1764 offsets
->end_switch_old
= 0;
1766 *tsc
= trace_clock_read64();
1768 if (SUBBUF_OFFSET(offsets
->begin
, buf
->chan
) != 0) {
1769 offsets
->begin
= SUBBUF_ALIGN(offsets
->begin
, buf
->chan
);
1770 offsets
->end_switch_old
= 1;
1772 /* we do not have to switch : buffer is empty */
1775 if (mode
== FORCE_ACTIVE
)
1776 offsets
->begin
+= ltt_subbuffer_header_size();
1778 * Always begin_switch in FORCE_ACTIVE mode.
1779 * Test new buffer integrity
1781 subbuf_index
= SUBBUF_INDEX(offsets
->begin
, buf
->chan
);
1782 offsets
->reserve_commit_diff
=
1783 (BUFFER_TRUNC(offsets
->begin
, buf
->chan
)
1784 >> ltt_channel
->n_subbufs_order
)
1785 - (local_read(<t_buf
->commit_count
[subbuf_index
])
1786 & ltt_channel
->commit_count_mask
);
1787 if (offsets
->reserve_commit_diff
== 0) {
1788 /* Next buffer not corrupted. */
1789 if (mode
== FORCE_ACTIVE
1790 && !ltt_channel
->overwrite
1791 && offsets
->begin
- atomic_long_read(<t_buf
->consumed
)
1792 >= rchan
->alloc_size
) {
1794 * We do not overwrite non consumed buffers and we are
1795 * full : ignore switch while tracing is active.
1801 * Next subbuffer corrupted. Force pushing reader even in normal
1805 offsets
->end
= offsets
->begin
;
1809 static inline void ltt_reserve_push_reader(
1810 struct ltt_channel_struct
*ltt_channel
,
1811 struct ltt_channel_buf_struct
*ltt_buf
,
1812 struct rchan
*rchan
,
1813 struct rchan_buf
*buf
,
1814 struct ltt_reserve_switch_offsets
*offsets
)
1816 long consumed_old
, consumed_new
;
1819 consumed_old
= atomic_long_read(<t_buf
->consumed
);
1821 * If buffer is in overwrite mode, push the reader consumed
1822 * count if the write position has reached it and we are not
1823 * at the first iteration (don't push the reader farther than
1824 * the writer). This operation can be done concurrently by many
1825 * writers in the same buffer, the writer being at the farthest
1826 * write position sub-buffer index in the buffer being the one
1827 * which will win this loop.
1828 * If the buffer is not in overwrite mode, pushing the reader
1829 * only happens if a sub-buffer is corrupted.
1831 if ((SUBBUF_TRUNC(offsets
->end
-1, buf
->chan
)
1832 - SUBBUF_TRUNC(consumed_old
, buf
->chan
))
1833 >= rchan
->alloc_size
)
1834 consumed_new
= SUBBUF_ALIGN(consumed_old
, buf
->chan
);
1836 consumed_new
= consumed_old
;
1839 } while (atomic_long_cmpxchg(<t_buf
->consumed
, consumed_old
,
1840 consumed_new
) != consumed_old
);
1842 if (consumed_old
!= consumed_new
) {
1844 * Reader pushed : we are the winner of the push, we can
1845 * therefore reequilibrate reserve and commit. Atomic increment
1846 * of the commit count permits other writers to play around
1847 * with this variable before us. We keep track of
1848 * corrupted_subbuffers even in overwrite mode :
1849 * we never want to write over a non completely committed
1850 * sub-buffer : possible causes : the buffer size is too low
1851 * compared to the unordered data input, or there is a writer
1852 * that died between the reserve and the commit.
1854 if (offsets
->reserve_commit_diff
) {
1856 * We have to alter the sub-buffer commit count.
1857 * We do not deliver the previous subbuffer, given it
1858 * was either corrupted or not consumed (overwrite
1861 local_add(offsets
->reserve_commit_diff
,
1862 <t_buf
->commit_count
[
1863 SUBBUF_INDEX(offsets
->begin
,
1865 if (!ltt_channel
->overwrite
1866 || offsets
->reserve_commit_diff
1867 != rchan
->subbuf_size
) {
1869 * The reserve commit diff was not subbuf_size :
1870 * it means the subbuffer was partly written to
1871 * and is therefore corrupted. If it is multiple
1872 * of subbuffer size and we are in flight
1873 * recorder mode, we are skipping over a whole
1876 local_inc(<t_buf
->corrupted_subbuffers
);
1884 * ltt_reserve_switch_old_subbuf: switch old subbuffer
1886 * Concurrency safe because we are the last and only thread to alter this
1887 * sub-buffer. As long as it is not delivered and read, no other thread can
1888 * alter the offset, alter the reserve_count or call the
1889 * client_buffer_end_callback on this sub-buffer.
1891 * The only remaining threads could be the ones with pending commits. They will
1892 * have to do the deliver themselves. Not concurrency safe in overwrite mode.
1893 * We detect corrupted subbuffers with commit and reserve counts. We keep a
1894 * corrupted sub-buffers count and push the readers across these sub-buffers.
1896 * Not concurrency safe if a writer is stalled in a subbuffer and another writer
1897 * switches in, finding out it's corrupted. The result will be than the old
1898 * (uncommited) subbuffer will be declared corrupted, and that the new subbuffer
1899 * will be declared corrupted too because of the commit count adjustment.
1901 * Note : offset_old should never be 0 here.
1903 static inline void ltt_reserve_switch_old_subbuf(
1904 struct ltt_channel_struct
*ltt_channel
,
1905 struct ltt_channel_buf_struct
*ltt_buf
, struct rchan
*rchan
,
1906 struct rchan_buf
*buf
,
1907 struct ltt_reserve_switch_offsets
*offsets
, u64
*tsc
)
1909 long oldidx
= SUBBUF_INDEX(offsets
->old
- 1, rchan
);
1911 ltt_channel
->buffer_end(buf
, *tsc
, offsets
->old
, oldidx
);
1912 /* Must write buffer end before incrementing commit count */
1914 offsets
->commit_count
=
1915 local_add_return(rchan
->subbuf_size
1916 - (SUBBUF_OFFSET(offsets
->old
- 1, rchan
)
1918 <t_buf
->commit_count
[oldidx
]);
1919 if ((BUFFER_TRUNC(offsets
->old
- 1, rchan
)
1920 >> ltt_channel
->n_subbufs_order
)
1921 - ((offsets
->commit_count
- rchan
->subbuf_size
)
1922 & ltt_channel
->commit_count_mask
) == 0)
1923 ltt_deliver(buf
, oldidx
, NULL
);
1927 * ltt_reserve_switch_new_subbuf: Populate new subbuffer.
1929 * This code can be executed unordered : writers may already have written to the
1930 * sub-buffer before this code gets executed, caution. The commit makes sure
1931 * that this code is executed before the deliver of this sub-buffer.
1933 static inline void ltt_reserve_switch_new_subbuf(
1934 struct ltt_channel_struct
*ltt_channel
,
1935 struct ltt_channel_buf_struct
*ltt_buf
, struct rchan
*rchan
,
1936 struct rchan_buf
*buf
,
1937 struct ltt_reserve_switch_offsets
*offsets
, u64
*tsc
)
1939 long beginidx
= SUBBUF_INDEX(offsets
->begin
, rchan
);
1941 ltt_channel
->buffer_begin(buf
, *tsc
, beginidx
);
1942 /* Must write buffer end before incrementing commit count */
1944 offsets
->commit_count
= local_add_return(ltt_subbuffer_header_size(),
1945 <t_buf
->commit_count
[beginidx
]);
1946 /* Check if the written buffer has to be delivered */
1947 if ((BUFFER_TRUNC(offsets
->begin
, rchan
)
1948 >> ltt_channel
->n_subbufs_order
)
1949 - ((offsets
->commit_count
- rchan
->subbuf_size
)
1950 & ltt_channel
->commit_count_mask
) == 0)
1951 ltt_deliver(buf
, beginidx
, NULL
);
1956 * ltt_reserve_end_switch_current: finish switching current subbuffer
1958 * Concurrency safe because we are the last and only thread to alter this
1959 * sub-buffer. As long as it is not delivered and read, no other thread can
1960 * alter the offset, alter the reserve_count or call the
1961 * client_buffer_end_callback on this sub-buffer.
1963 * The only remaining threads could be the ones with pending commits. They will
1964 * have to do the deliver themselves. Not concurrency safe in overwrite mode.
1965 * We detect corrupted subbuffers with commit and reserve counts. We keep a
1966 * corrupted sub-buffers count and push the readers across these sub-buffers.
1968 * Not concurrency safe if a writer is stalled in a subbuffer and another writer
1969 * switches in, finding out it's corrupted. The result will be than the old
1970 * (uncommited) subbuffer will be declared corrupted, and that the new subbuffer
1971 * will be declared corrupted too because of the commit count adjustment.
1973 static inline void ltt_reserve_end_switch_current(
1974 struct ltt_channel_struct
*ltt_channel
,
1975 struct ltt_channel_buf_struct
*ltt_buf
, struct rchan
*rchan
,
1976 struct rchan_buf
*buf
,
1977 struct ltt_reserve_switch_offsets
*offsets
, u64
*tsc
)
1979 long endidx
= SUBBUF_INDEX(offsets
->end
- 1, rchan
);
1981 ltt_channel
->buffer_end(buf
, *tsc
, offsets
->end
, endidx
);
1982 /* Must write buffer begin before incrementing commit count */
1984 offsets
->commit_count
=
1985 local_add_return(rchan
->subbuf_size
1986 - (SUBBUF_OFFSET(offsets
->end
- 1, rchan
)
1988 <t_buf
->commit_count
[endidx
]);
1989 if ((BUFFER_TRUNC(offsets
->end
- 1, rchan
)
1990 >> ltt_channel
->n_subbufs_order
)
1991 - ((offsets
->commit_count
- rchan
->subbuf_size
)
1992 & ltt_channel
->commit_count_mask
) == 0)
1993 ltt_deliver(buf
, endidx
, NULL
);
1997 * ltt_relay_reserve_slot - Atomic slot reservation in a LTTng buffer.
1998 * @trace: the trace structure to log to.
1999 * @ltt_channel: channel structure
2000 * @transport_data: data structure specific to ltt relay
2001 * @data_size: size of the variable length data to log.
2002 * @slot_size: pointer to total size of the slot (out)
2003 * @buf_offset : pointer to reserved buffer offset (out)
2004 * @tsc: pointer to the tsc at the slot reservation (out)
2007 * Return : -ENOSPC if not enough space, else returns 0.
2008 * It will take care of sub-buffer switching.
2010 static notrace
int ltt_relay_reserve_slot(struct ltt_trace_struct
*trace
,
2011 struct ltt_channel_struct
*ltt_channel
, void **transport_data
,
2012 size_t data_size
, size_t *slot_size
, long *buf_offset
, u64
*tsc
,
2013 unsigned int *rflags
, int largest_align
, int cpu
)
2015 struct rchan
*rchan
= ltt_channel
->trans_channel_data
;
2016 struct rchan_buf
*buf
= *transport_data
= rchan
->buf
;
2017 struct ltt_channel_buf_struct
*ltt_buf
= ltt_channel
->buf
;
2018 struct ltt_reserve_switch_offsets offsets
;
2020 offsets
.reserve_commit_diff
= 0;
2024 * Perform retryable operations.
2026 if (ltt_nesting
> 4) {
2027 local_inc(<t_buf
->events_lost
);
2031 if (ltt_relay_try_reserve(ltt_channel
, ltt_buf
,
2032 rchan
, buf
, &offsets
, data_size
, tsc
, rflags
,
2035 } while (local_cmpxchg(<t_buf
->offset
, offsets
.old
,
2036 offsets
.end
) != offsets
.old
);
2039 * Atomically update last_tsc. This update races against concurrent
2040 * atomic updates, but the race will always cause supplementary full TSC
2041 * events, never the opposite (missing a full TSC event when it would be
2044 save_last_tsc(ltt_buf
, *tsc
);
2047 * Push the reader if necessary
2049 ltt_reserve_push_reader(ltt_channel
, ltt_buf
, rchan
, buf
, &offsets
);
2052 * Switch old subbuffer if needed.
2054 if (offsets
.end_switch_old
)
2055 ltt_reserve_switch_old_subbuf(ltt_channel
, ltt_buf
, rchan
, buf
,
2059 * Populate new subbuffer.
2061 if (offsets
.begin_switch
)
2062 ltt_reserve_switch_new_subbuf(ltt_channel
, ltt_buf
, rchan
,
2063 buf
, &offsets
, tsc
);
2065 if (offsets
.end_switch_current
)
2066 ltt_reserve_end_switch_current(ltt_channel
, ltt_buf
, rchan
,
2067 buf
, &offsets
, tsc
);
2069 *slot_size
= offsets
.size
;
2070 *buf_offset
= offsets
.begin
+ offsets
.before_hdr_pad
;
2075 * Force a sub-buffer switch for a per-cpu buffer. This operation is
2076 * completely reentrant : can be called while tracing is active with
2077 * absolutely no lock held.
2079 * Note, however, that as a local_cmpxchg is used for some atomic
2080 * operations, this function must be called from the CPU which owns the buffer
2081 * for a ACTIVE flush.
2083 static notrace
void ltt_force_switch(struct rchan_buf
*buf
,
2084 enum force_switch_mode mode
)
2086 struct ltt_channel_struct
*ltt_channel
=
2087 (struct ltt_channel_struct
*)buf
->chan
->private_data
;
2088 struct ltt_channel_buf_struct
*ltt_buf
= ltt_channel
->buf
;
2089 struct rchan
*rchan
= ltt_channel
->trans_channel_data
;
2090 struct ltt_reserve_switch_offsets offsets
;
2093 offsets
.reserve_commit_diff
= 0;
2097 * Perform retryable operations.
2100 if (ltt_relay_try_switch(mode
, ltt_channel
, ltt_buf
,
2101 rchan
, buf
, &offsets
, &tsc
))
2103 } while (local_cmpxchg(<t_buf
->offset
, offsets
.old
,
2104 offsets
.end
) != offsets
.old
);
2107 * Atomically update last_tsc. This update races against concurrent
2108 * atomic updates, but the race will always cause supplementary full TSC
2109 * events, never the opposite (missing a full TSC event when it would be
2112 save_last_tsc(ltt_buf
, tsc
);
2115 * Push the reader if necessary
2117 if (mode
== FORCE_ACTIVE
)
2118 ltt_reserve_push_reader(ltt_channel
, ltt_buf
, rchan
,
2122 * Switch old subbuffer if needed.
2124 if (offsets
.end_switch_old
)
2125 ltt_reserve_switch_old_subbuf(ltt_channel
, ltt_buf
, rchan
, buf
,
2129 * Populate new subbuffer.
2131 if (mode
== FORCE_ACTIVE
)
2132 ltt_reserve_switch_new_subbuf(ltt_channel
,
2133 ltt_buf
, rchan
, buf
, &offsets
, &tsc
);
2137 * for flight recording. must be called after relay_commit.
2138 * This function decrements de subbuffer's lost_size each time the commit count
2139 * reaches back the reserve offset (module subbuffer size). It is useful for
2141 * We use slot_size - 1 to make sure we deal correctly with the case where we
2142 * fill the subbuffer completely (so the subbuf index stays in the previous
2145 #ifdef CONFIG_LTT_VMCORE
2146 static inline void ltt_write_commit_counter(struct rchan_buf
*buf
,
2147 long buf_offset
, size_t slot_size
)
2149 struct ltt_channel_struct
*ltt_channel
=
2150 (struct ltt_channel_struct
*)buf
->chan
->private_data
;
2151 struct ltt_channel_buf_struct
*ltt_buf
=
2152 percpu_ptr(ltt_channel
->buf
, buf
->cpu
);
2153 struct ltt_subbuffer_header
*header
;
2154 long offset
, subbuf_idx
, commit_count
;
2155 uint32_t lost_old
, lost_new
;
2157 subbuf_idx
= SUBBUF_INDEX(buf_offset
- 1, buf
->chan
);
2158 offset
= buf_offset
+ slot_size
;
2159 header
= (struct ltt_subbuffer_header
*)
2160 ltt_relay_offset_address(buf
,
2161 subbuf_idx
* buf
->chan
->subbuf_size
);
2163 lost_old
= header
->lost_size
;
2165 local_read(<t_buf
->commit_count
[subbuf_idx
]);
2166 /* SUBBUF_OFFSET includes commit_count_mask */
2167 if (!SUBBUF_OFFSET(offset
- commit_count
, buf
->chan
)) {
2168 lost_new
= (uint32_t)buf
->chan
->subbuf_size
2169 - SUBBUF_OFFSET(commit_count
, buf
->chan
);
2170 lost_old
= cmpxchg_local(&header
->lost_size
, lost_old
,
2172 if (lost_old
<= lost_new
)
2180 static inline void ltt_write_commit_counter(struct rchan_buf
*buf
,
2181 long buf_offset
, size_t slot_size
)
2187 * Atomic unordered slot commit. Increments the commit count in the
2188 * specified sub-buffer, and delivers it if necessary.
2192 * @ltt_channel : channel structure
2193 * @transport_data: transport-specific data
2194 * @buf_offset : offset following the event header.
2195 * @slot_size : size of the reserved slot.
2197 static notrace
void ltt_relay_commit_slot(
2198 struct ltt_channel_struct
*ltt_channel
,
2199 void **transport_data
, long buf_offset
, size_t slot_size
)
2201 struct rchan_buf
*buf
= *transport_data
;
2202 struct ltt_channel_buf_struct
*ltt_buf
= ltt_channel
->buf
;
2203 struct rchan
*rchan
= buf
->chan
;
2204 long offset_end
= buf_offset
;
2205 long endidx
= SUBBUF_INDEX(offset_end
- 1, rchan
);
2208 /* Must write slot data before incrementing commit count */
2210 commit_count
= local_add_return(slot_size
,
2211 <t_buf
->commit_count
[endidx
]);
2212 /* Check if all commits have been done */
2213 if ((BUFFER_TRUNC(offset_end
- 1, rchan
)
2214 >> ltt_channel
->n_subbufs_order
)
2215 - ((commit_count
- rchan
->subbuf_size
)
2216 & ltt_channel
->commit_count_mask
) == 0)
2217 ltt_deliver(buf
, endidx
, NULL
);
2219 * Update lost_size for each commit. It's needed only for extracting
2220 * ltt buffers from vmcore, after crash.
2222 ltt_write_commit_counter(buf
, buf_offset
, slot_size
);
2226 * This is called with preemption disabled when user space has requested
2227 * blocking mode. If one of the active traces has free space below a
2228 * specific threshold value, we reenable preemption and block.
2230 static int ltt_relay_user_blocking(struct ltt_trace_struct
*trace
,
2231 unsigned int chan_index
, size_t data_size
,
2232 struct user_dbg_data
*dbg
)
2234 //ust// struct rchan *rchan;
2235 //ust// struct ltt_channel_buf_struct *ltt_buf;
2236 //ust// struct ltt_channel_struct *channel;
2237 //ust// struct rchan_buf *relay_buf;
2239 //ust// DECLARE_WAITQUEUE(wait, current);
2241 //ust// channel = &trace->channels[chan_index];
2242 //ust// rchan = channel->trans_channel_data;
2243 //ust// cpu = smp_processor_id();
2244 //ust// relay_buf = rchan->buf[cpu];
2245 //ust// ltt_buf = percpu_ptr(channel->buf, cpu);
2248 //ust// * Check if data is too big for the channel : do not
2249 //ust// * block for it.
2251 //ust// if (LTT_RESERVE_CRITICAL + data_size > relay_buf->chan->subbuf_size)
2255 //ust// * If free space too low, we block. We restart from the
2256 //ust// * beginning after we resume (cpu id may have changed
2257 //ust// * while preemption is active).
2259 //ust// spin_lock(<t_buf->full_lock);
2260 //ust// if (!channel->overwrite) {
2261 //ust// dbg->write = local_read(<t_buf->offset);
2262 //ust// dbg->read = atomic_long_read(<t_buf->consumed);
2263 //ust// dbg->avail_size = dbg->write + LTT_RESERVE_CRITICAL + data_size
2264 //ust// - SUBBUF_TRUNC(dbg->read,
2265 //ust// relay_buf->chan);
2266 //ust// if (dbg->avail_size > rchan->alloc_size) {
2267 //ust// __set_current_state(TASK_INTERRUPTIBLE);
2268 //ust// add_wait_queue(<t_buf->write_wait, &wait);
2269 //ust// spin_unlock(<t_buf->full_lock);
2270 //ust// preempt_enable();
2272 //ust// __set_current_state(TASK_RUNNING);
2273 //ust// remove_wait_queue(<t_buf->write_wait, &wait);
2274 //ust// if (signal_pending(current))
2275 //ust// return -ERESTARTSYS;
2276 //ust// preempt_disable();
2280 //ust// spin_unlock(<t_buf->full_lock);
2284 static void ltt_relay_print_user_errors(struct ltt_trace_struct
*trace
,
2285 unsigned int chan_index
, size_t data_size
,
2286 struct user_dbg_data
*dbg
, int cpu
)
2288 struct rchan
*rchan
;
2289 struct ltt_channel_buf_struct
*ltt_buf
;
2290 struct ltt_channel_struct
*channel
;
2291 struct rchan_buf
*relay_buf
;
2293 channel
= &trace
->channels
[chan_index
];
2294 rchan
= channel
->trans_channel_data
;
2295 relay_buf
= rchan
->buf
;
2296 ltt_buf
= channel
->buf
;
2298 printk(KERN_ERR
"Error in LTT usertrace : "
2299 "buffer full : event lost in blocking "
2300 "mode. Increase LTT_RESERVE_CRITICAL.\n");
2301 printk(KERN_ERR
"LTT nesting level is %u.\n", ltt_nesting
);
2302 printk(KERN_ERR
"LTT avail size %lu.\n",
2304 printk(KERN_ERR
"avai write : %lu, read : %lu\n",
2305 dbg
->write
, dbg
->read
);
2307 dbg
->write
= local_read(<t_buf
->offset
);
2308 dbg
->read
= atomic_long_read(<t_buf
->consumed
);
2310 printk(KERN_ERR
"LTT cur size %lu.\n",
2311 dbg
->write
+ LTT_RESERVE_CRITICAL
+ data_size
2312 - SUBBUF_TRUNC(dbg
->read
, relay_buf
->chan
));
2313 printk(KERN_ERR
"cur write : %lu, read : %lu\n",
2314 dbg
->write
, dbg
->read
);
2317 //ust// static struct ltt_transport ltt_relay_transport = {
2318 //ust// .name = "relay",
2319 //ust// .owner = THIS_MODULE,
2321 //ust// .create_dirs = ltt_relay_create_dirs,
2322 //ust// .remove_dirs = ltt_relay_remove_dirs,
2323 //ust// .create_channel = ltt_relay_create_channel,
2324 //ust// .finish_channel = ltt_relay_finish_channel,
2325 //ust// .remove_channel = ltt_relay_remove_channel,
2326 //ust// .wakeup_channel = ltt_relay_async_wakeup_chan,
2327 //ust// .commit_slot = ltt_relay_commit_slot,
2328 //ust// .reserve_slot = ltt_relay_reserve_slot,
2329 //ust// .user_blocking = ltt_relay_user_blocking,
2330 //ust// .user_errors = ltt_relay_print_user_errors,
2334 static struct ltt_transport ust_relay_transport
= {
2336 //ust// .owner = THIS_MODULE,
2338 .create_dirs
= ltt_relay_create_dirs
,
2339 .remove_dirs
= ltt_relay_remove_dirs
,
2340 .create_channel
= ltt_relay_create_channel
,
2341 .finish_channel
= ltt_relay_finish_channel
,
2342 .remove_channel
= ltt_relay_remove_channel
,
2343 .wakeup_channel
= ltt_relay_async_wakeup_chan
,
2344 .commit_slot
= ltt_relay_commit_slot
,
2345 .reserve_slot
= ltt_relay_reserve_slot
,
2346 .user_blocking
= ltt_relay_user_blocking
,
2347 .user_errors
= ltt_relay_print_user_errors
,
2351 //ust// static int __init ltt_relay_init(void)
2353 //ust// printk(KERN_INFO "LTT : ltt-relay init\n");
2355 //ust// ltt_file_operations = ltt_relay_file_operations;
2356 //ust// ltt_file_operations.owner = THIS_MODULE;
2357 //ust// ltt_file_operations.open = ltt_open;
2358 //ust// ltt_file_operations.release = ltt_release;
2359 //ust// ltt_file_operations.poll = ltt_poll;
2360 //ust// ltt_file_operations.splice_read = ltt_relay_file_splice_read,
2361 //ust// ltt_file_operations.ioctl = ltt_ioctl;
2362 //ust//#ifdef CONFIG_COMPAT
2363 //ust// ltt_file_operations.compat_ioctl = ltt_compat_ioctl;
2366 //ust// ltt_transport_register(<t_relay_transport);
2371 void init_ustrelay_transport(void)
2373 ltt_transport_register(&ust_relay_transport
);
2376 static void __exit
ltt_relay_exit(void)
2378 //ust// printk(KERN_INFO "LTT : ltt-relay exit\n");
2380 ltt_transport_unregister(&ust_relay_transport
);
2383 //ust// module_init(ltt_relay_init);
2384 //ust// module_exit(ltt_relay_exit);
2386 //ust// MODULE_LICENSE("GPL");
2387 //ust// MODULE_AUTHOR("Mathieu Desnoyers");
2388 //ust// MODULE_DESCRIPTION("Linux Trace Toolkit Next Generation Lockless Relay");