relay.h: fix strict aliasing warning
[lttng-ust.git] / libust / relay.h
CommitLineData
bb07823d
PMF
1/*
2 * linux/include/linux/ltt-relay.h
3 *
4 * Copyright (C) 2002, 2003 - Tom Zanussi (zanussi@us.ibm.com), IBM Corp
5 * Copyright (C) 1999, 2000, 2001, 2002 - Karim Yaghmour (karim@opersys.com)
6 * Copyright (C) 2008 - Mathieu Desnoyers (mathieu.desnoyers@polymtl.ca)
7 *
8 * CONFIG_RELAY definitions and declarations
9 */
10
11#ifndef _LINUX_LTT_RELAY_H
12#define _LINUX_LTT_RELAY_H
13
14//ust// #include <linux/types.h>
15//ust// #include <linux/sched.h>
16//ust// #include <linux/timer.h>
17//ust// #include <linux/wait.h>
18//ust// #include <linux/list.h>
19//ust// #include <linux/fs.h>
20//ust// #include <linux/poll.h>
21//ust// #include <linux/kref.h>
22//ust// #include <linux/mm.h>
23//ust// #include <linux/ltt-core.h>
64d9b80b 24#include <assert.h>
79d4d545 25#include <kcompat/kref.h>
769d0157 26//#include "list.h"
8431032f 27#include "channels.h"
7add3898 28#include "buffer.h"
bb07823d
PMF
29
30/* Needs a _much_ better name... */
31#define FIX_SIZE(x) ((((x) - 1) & PAGE_MASK) + PAGE_SIZE)
32
33/*
34 * Tracks changes to rchan/rchan_buf structs
35 */
36#define LTT_RELAY_CHANNEL_VERSION 8
37
38struct rchan_buf;
39
40struct buf_page {
41 struct page *page;
42 struct rchan_buf *buf; /* buffer the page belongs to */
43 size_t offset; /* page offset in the buffer */
44 struct list_head list; /* buffer linked list */
45};
46
47/*
48 * Per-cpu relay channel buffer
49 */
50struct rchan_buf {
51 struct rchan *chan; /* associated channel */
52//ust// wait_queue_head_t read_wait; /* reader wait queue */
53//ust// struct timer_list timer; /* reader wake-up timer */
54//ust// struct dentry *dentry; /* channel file dentry */
55 struct kref kref; /* channel buffer refcount */
56//ust// struct list_head pages; /* list of buffer pages */
57 void *buf_data; //ust//
58 size_t buf_size;
59//ust// struct buf_page *wpage; /* current write page (cache) */
60//ust// struct buf_page *hpage[2]; /* current subbuf header page (cache) */
61//ust// struct buf_page *rpage; /* current subbuf read page (cache) */
62//ust// unsigned int page_count; /* number of current buffer pages */
63 unsigned int finalized; /* buffer has been finalized */
64//ust// unsigned int cpu; /* this buf's cpu */
46ef48cd 65 int shmid; /* the shmid of the buffer data pages */
bb07823d
PMF
66} ____cacheline_aligned;
67
68/*
69 * Relay channel data structure
70 */
71struct rchan {
72 u32 version; /* the version of this struct */
73 size_t subbuf_size; /* sub-buffer size */
74 size_t n_subbufs; /* number of sub-buffers per buffer */
75 size_t alloc_size; /* total buffer size allocated */
76 struct rchan_callbacks *cb; /* client callbacks */
77 struct kref kref; /* channel refcount */
78 void *private_data; /* for user-defined data */
79//ust// struct rchan_buf *buf[NR_CPUS]; /* per-cpu channel buffers */
80 struct rchan_buf *buf;
81 struct list_head list; /* for channel list */
82 struct dentry *parent; /* parent dentry passed to open */
83 int subbuf_size_order; /* order of sub-buffer size */
84//ust// char base_filename[NAME_MAX]; /* saved base filename */
85};
86
87/*
88 * Relay channel client callbacks
89 */
90struct rchan_callbacks {
91 /*
92 * subbuf_start - called on buffer-switch to a new sub-buffer
93 * @buf: the channel buffer containing the new sub-buffer
94 * @subbuf: the start of the new sub-buffer
95 * @prev_subbuf: the start of the previous sub-buffer
96 * @prev_padding: unused space at the end of previous sub-buffer
97 *
98 * The client should return 1 to continue logging, 0 to stop
99 * logging.
100 *
101 * NOTE: subbuf_start will also be invoked when the buffer is
102 * created, so that the first sub-buffer can be initialized
103 * if necessary. In this case, prev_subbuf will be NULL.
104 *
105 * NOTE: the client can reserve bytes at the beginning of the new
106 * sub-buffer by calling subbuf_start_reserve() in this callback.
107 */
108 int (*subbuf_start) (struct rchan_buf *buf,
109 void *subbuf,
110 void *prev_subbuf,
111 size_t prev_padding);
112
113 /*
114 * create_buf_file - create file to represent a relay channel buffer
115 * @filename: the name of the file to create
116 * @parent: the parent of the file to create
117 * @mode: the mode of the file to create
118 * @buf: the channel buffer
119 *
120 * Called during relay_open(), once for each per-cpu buffer,
121 * to allow the client to create a file to be used to
122 * represent the corresponding channel buffer. If the file is
123 * created outside of relay, the parent must also exist in
124 * that filesystem.
125 *
126 * The callback should return the dentry of the file created
127 * to represent the relay buffer.
128 *
129 * Setting the is_global outparam to a non-zero value will
130 * cause relay_open() to create a single global buffer rather
131 * than the default set of per-cpu buffers.
132 *
133 * See Documentation/filesystems/relayfs.txt for more info.
134 */
135 struct dentry *(*create_buf_file)(const char *filename,
136 struct dentry *parent,
137 int mode,
138 struct rchan_buf *buf);
139
140 /*
141 * remove_buf_file - remove file representing a relay channel buffer
142 * @dentry: the dentry of the file to remove
143 *
144 * Called during relay_close(), once for each per-cpu buffer,
145 * to allow the client to remove a file used to represent a
146 * channel buffer.
147 *
148 * The callback should return 0 if successful, negative if not.
149 */
98963de4 150//ust// int (*remove_buf_file)(struct rchan_buf *buf);
bb07823d
PMF
151};
152
153extern struct buf_page *ltt_relay_find_prev_page(struct rchan_buf *buf,
154 struct buf_page *page, size_t offset, ssize_t diff_offset);
155
156extern struct buf_page *ltt_relay_find_next_page(struct rchan_buf *buf,
157 struct buf_page *page, size_t offset, ssize_t diff_offset);
158
159extern void _ltt_relay_write(struct rchan_buf *buf, size_t offset,
160 const void *src, size_t len, ssize_t cpy);
161
162extern int ltt_relay_read(struct rchan_buf *buf, size_t offset,
163 void *dest, size_t len);
164
165extern struct buf_page *ltt_relay_read_get_page(struct rchan_buf *buf,
166 size_t offset);
167
168/*
169 * Return the address where a given offset is located.
170 * Should be used to get the current subbuffer header pointer. Given we know
171 * it's never on a page boundary, it's safe to write directly to this address,
172 * as long as the write is never bigger than a page size.
173 */
174extern void *ltt_relay_offset_address(struct rchan_buf *buf,
175 size_t offset);
176
177/*
178 * Find the page containing "offset". Cache it if it is after the currently
179 * cached page.
180 */
181static inline struct buf_page *ltt_relay_cache_page(struct rchan_buf *buf,
182 struct buf_page **page_cache,
183 struct buf_page *page, size_t offset)
184{
185 ssize_t diff_offset;
186 ssize_t half_buf_size = buf->chan->alloc_size >> 1;
187
188 /*
189 * Make sure this is the page we want to write into. The current
190 * page is changed concurrently by other writers. [wrh]page are
191 * used as a cache remembering the last page written
192 * to/read/looked up for header address. No synchronization;
193 * could have to find the previous page is a nested write
194 * occured. Finding the right page is done by comparing the
195 * dest_offset with the buf_page offsets.
196 * When at the exact opposite of the buffer, bias towards forward search
197 * because it will be cached.
198 */
199
200 diff_offset = (ssize_t)offset - (ssize_t)page->offset;
201 if (diff_offset <= -(ssize_t)half_buf_size)
202 diff_offset += buf->chan->alloc_size;
203 else if (diff_offset > half_buf_size)
204 diff_offset -= buf->chan->alloc_size;
205
206 if (unlikely(diff_offset >= (ssize_t)PAGE_SIZE)) {
207 page = ltt_relay_find_next_page(buf, page, offset, diff_offset);
208 *page_cache = page;
209 } else if (unlikely(diff_offset < 0)) {
210 page = ltt_relay_find_prev_page(buf, page, offset, diff_offset);
211 }
212 return page;
213}
214
215//ust// #ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
216 static inline void ltt_relay_do_copy(void *dest, const void *src, size_t len)
217{
957f86e7
PMF
218 union {
219 const void *src;
220 const u8 *src8;
221 const u16 *src16;
222 const u32 *src32;
223 const u64 *src64;
224 } u = { .src = src };
225
bb07823d
PMF
226 switch (len) {
227 case 0: break;
957f86e7 228 case 1: *(u8 *)dest = *u.src8;
bb07823d 229 break;
957f86e7 230 case 2: *(u16 *)dest = *u.src16;
bb07823d 231 break;
957f86e7 232 case 4: *(u32 *)dest = *u.src32;
bb07823d
PMF
233 break;
234//ust// #if (BITS_PER_LONG == 64)
957f86e7 235 case 8: *(u64 *)dest = *u.src64;
bb07823d
PMF
236 break;
237//ust// #endif
238 default:
239 memcpy(dest, src, len);
240 }
241}
242//ust// #else
243//ust// /*
244//ust// * Returns whether the dest and src addresses are aligned on
245//ust// * min(sizeof(void *), len). Call this with statically known len for efficiency.
246//ust// */
247//ust// static inline int addr_aligned(const void *dest, const void *src, size_t len)
248//ust// {
249//ust// if (ltt_align((size_t)dest, len))
250//ust// return 0;
251//ust// if (ltt_align((size_t)src, len))
252//ust// return 0;
253//ust// return 1;
254//ust// }
255//ust//
256//ust// static inline void ltt_relay_do_copy(void *dest, const void *src, size_t len)
257//ust// {
258//ust// switch (len) {
259//ust// case 0: break;
260//ust// case 1: *(u8 *)dest = *(const u8 *)src;
261//ust// break;
262//ust// case 2: if (unlikely(!addr_aligned(dest, src, 2)))
263//ust// goto memcpy_fallback;
264//ust// *(u16 *)dest = *(const u16 *)src;
265//ust// break;
266//ust// case 4: if (unlikely(!addr_aligned(dest, src, 4)))
267//ust// goto memcpy_fallback;
268//ust// *(u32 *)dest = *(const u32 *)src;
269//ust// break;
270//ust// #if (BITS_PER_LONG == 64)
271//ust// case 8: if (unlikely(!addr_aligned(dest, src, 8)))
272//ust// goto memcpy_fallback;
273//ust// *(u64 *)dest = *(const u64 *)src;
274//ust// break;
275//ust// #endif
276//ust// default:
277//ust// goto memcpy_fallback;
278//ust// }
279//ust// return;
280//ust// memcpy_fallback:
281//ust// memcpy(dest, src, len);
282//ust// }
283//ust// #endif
284
285static inline int ltt_relay_write(struct rchan_buf *buf, size_t offset,
286 const void *src, size_t len)
287{
288//ust// struct buf_page *page;
289//ust// ssize_t pagecpy;
290//ust//
291//ust// offset &= buf->chan->alloc_size - 1;
292//ust// page = buf->wpage;
293//ust//
294//ust// page = ltt_relay_cache_page(buf, &buf->wpage, page, offset);
295//ust// pagecpy = min_t(size_t, len, PAGE_SIZE - (offset & ~PAGE_MASK));
296//ust// ltt_relay_do_copy(page_address(page->page)
297//ust// + (offset & ~PAGE_MASK), src, pagecpy);
298//ust//
299//ust// if (unlikely(len != pagecpy))
300//ust// _ltt_relay_write(buf, offset, src, len, page, pagecpy);
301//ust// return len;
302
bb07823d 303 size_t cpy;
64d9b80b
PMF
304 size_t buf_offset = BUFFER_OFFSET(offset, buf->chan);
305
306 assert(buf_offset < buf->chan->subbuf_size*buf->chan->n_subbufs);
307
308 cpy = min_t(size_t, len, buf->buf_size - buf_offset);
309 ltt_relay_do_copy(buf->buf_data + buf_offset, src, cpy);
bb07823d
PMF
310
311 if (unlikely(len != cpy))
64d9b80b 312 _ltt_relay_write(buf, buf_offset, src, len, cpy);
bb07823d
PMF
313 return len;
314}
315
316/*
317 * CONFIG_LTT_RELAY kernel API, ltt/ltt-relay-alloc.c
318 */
319
320struct rchan *ltt_relay_open(const char *base_filename,
321 struct dentry *parent,
322 size_t subbuf_size,
323 size_t n_subbufs,
324 void *private_data);
325extern void ltt_relay_close(struct rchan *chan);
326
327/*
328 * exported ltt_relay file operations, ltt/ltt-relay-alloc.c
329 */
330extern const struct file_operations ltt_relay_file_operations;
331
9c67dc50
PMF
332
333/* LTTng lockless logging buffer info */
334struct ltt_channel_buf_struct {
335 /* First 32 bytes cache-hot cacheline */
336 local_t offset; /* Current offset in the buffer */
8431032f 337 local_t *commit_count; /* Commit count per sub-buffer */
9c67dc50
PMF
338 atomic_long_t consumed; /*
339 * Current offset in the buffer
340 * standard atomic access (shared)
341 */
342 unsigned long last_tsc; /*
343 * Last timestamp written in the buffer.
344 */
345 /* End of first 32 bytes cacheline */
8431032f
PMF
346//ust// #ifdef CONFIG_LTT_VMCORE
347//ust// local_t *commit_seq; /* Consecutive commits */
348//ust// #endif
9c67dc50
PMF
349 atomic_long_t active_readers; /*
350 * Active readers count
351 * standard atomic access (shared)
352 */
353 local_t events_lost;
354 local_t corrupted_subbuffers;
46ef48cd
PMF
355//ust// spinlock_t full_lock; /*
356//ust// * buffer full condition spinlock, only
357//ust// * for userspace tracing blocking mode
358//ust// * synchronization with reader.
359//ust// */
9c67dc50
PMF
360//ust// wait_queue_head_t write_wait; /*
361//ust// * Wait queue for blocking user space
362//ust// * writers
363//ust// */
46ef48cd 364//ust// atomic_t wakeup_readers; /* Boolean : wakeup readers waiting ? */
3a7b90de
PMF
365 /* one byte is written to this pipe when data is available, in order
366 to wake the consumer */
367 /* portability: Single byte writes must be as quick as possible. The kernel-side
368 buffer must be large enough so the writer doesn't block. From the pipe(7)
369 man page: Since linux 2.6.11, the pipe capacity is 65536 bytes. */
370 int data_ready_fd_write;
371 /* the reading end of the pipe */
372 int data_ready_fd_read;
0b0cd937
PMF
373
374 /* commit count per subbuffer; must be at end of struct */
8431032f 375 local_t commit_seq[0] ____cacheline_aligned;
9c67dc50
PMF
376} ____cacheline_aligned;
377
378int ltt_do_get_subbuf(struct rchan_buf *buf, struct ltt_channel_buf_struct *ltt_buf, long *pconsumed_old);
379
380int ltt_do_put_subbuf(struct rchan_buf *buf, struct ltt_channel_buf_struct *ltt_buf, u32 uconsumed_old);
381
9160b4e4
PMF
382void init_ustrelay_transport(void);
383
bb07823d 384#endif /* _LINUX_LTT_RELAY_H */
This page took 0.044235 seconds and 4 git commands to generate.