Commit | Line | Data |
---|---|---|
bb07823d PMF |
1 | /* |
2 | * linux/include/linux/ltt-relay.h | |
3 | * | |
4 | * Copyright (C) 2002, 2003 - Tom Zanussi (zanussi@us.ibm.com), IBM Corp | |
5 | * Copyright (C) 1999, 2000, 2001, 2002 - Karim Yaghmour (karim@opersys.com) | |
6 | * Copyright (C) 2008 - Mathieu Desnoyers (mathieu.desnoyers@polymtl.ca) | |
7 | * | |
8 | * CONFIG_RELAY definitions and declarations | |
9 | */ | |
10 | ||
11 | #ifndef _LINUX_LTT_RELAY_H | |
12 | #define _LINUX_LTT_RELAY_H | |
13 | ||
14 | //ust// #include <linux/types.h> | |
15 | //ust// #include <linux/sched.h> | |
16 | //ust// #include <linux/timer.h> | |
17 | //ust// #include <linux/wait.h> | |
18 | //ust// #include <linux/list.h> | |
19 | //ust// #include <linux/fs.h> | |
20 | //ust// #include <linux/poll.h> | |
21 | //ust// #include <linux/kref.h> | |
22 | //ust// #include <linux/mm.h> | |
23 | //ust// #include <linux/ltt-core.h> | |
64d9b80b | 24 | #include <assert.h> |
bb07823d | 25 | #include "kref.h" |
769d0157 | 26 | //#include "list.h" |
8431032f | 27 | #include "channels.h" |
7add3898 | 28 | #include "buffer.h" |
bb07823d PMF |
29 | |
30 | /* Needs a _much_ better name... */ | |
31 | #define FIX_SIZE(x) ((((x) - 1) & PAGE_MASK) + PAGE_SIZE) | |
32 | ||
33 | /* | |
34 | * Tracks changes to rchan/rchan_buf structs | |
35 | */ | |
36 | #define LTT_RELAY_CHANNEL_VERSION 8 | |
37 | ||
38 | struct rchan_buf; | |
39 | ||
40 | struct buf_page { | |
41 | struct page *page; | |
42 | struct rchan_buf *buf; /* buffer the page belongs to */ | |
43 | size_t offset; /* page offset in the buffer */ | |
44 | struct list_head list; /* buffer linked list */ | |
45 | }; | |
46 | ||
47 | /* | |
48 | * Per-cpu relay channel buffer | |
49 | */ | |
50 | struct rchan_buf { | |
51 | struct rchan *chan; /* associated channel */ | |
52 | //ust// wait_queue_head_t read_wait; /* reader wait queue */ | |
53 | //ust// struct timer_list timer; /* reader wake-up timer */ | |
54 | //ust// struct dentry *dentry; /* channel file dentry */ | |
55 | struct kref kref; /* channel buffer refcount */ | |
56 | //ust// struct list_head pages; /* list of buffer pages */ | |
57 | void *buf_data; //ust// | |
58 | size_t buf_size; | |
59 | //ust// struct buf_page *wpage; /* current write page (cache) */ | |
60 | //ust// struct buf_page *hpage[2]; /* current subbuf header page (cache) */ | |
61 | //ust// struct buf_page *rpage; /* current subbuf read page (cache) */ | |
62 | //ust// unsigned int page_count; /* number of current buffer pages */ | |
63 | unsigned int finalized; /* buffer has been finalized */ | |
64 | //ust// unsigned int cpu; /* this buf's cpu */ | |
46ef48cd | 65 | int shmid; /* the shmid of the buffer data pages */ |
bb07823d PMF |
66 | } ____cacheline_aligned; |
67 | ||
68 | /* | |
69 | * Relay channel data structure | |
70 | */ | |
71 | struct rchan { | |
72 | u32 version; /* the version of this struct */ | |
73 | size_t subbuf_size; /* sub-buffer size */ | |
74 | size_t n_subbufs; /* number of sub-buffers per buffer */ | |
75 | size_t alloc_size; /* total buffer size allocated */ | |
76 | struct rchan_callbacks *cb; /* client callbacks */ | |
77 | struct kref kref; /* channel refcount */ | |
78 | void *private_data; /* for user-defined data */ | |
79 | //ust// struct rchan_buf *buf[NR_CPUS]; /* per-cpu channel buffers */ | |
80 | struct rchan_buf *buf; | |
81 | struct list_head list; /* for channel list */ | |
82 | struct dentry *parent; /* parent dentry passed to open */ | |
83 | int subbuf_size_order; /* order of sub-buffer size */ | |
84 | //ust// char base_filename[NAME_MAX]; /* saved base filename */ | |
85 | }; | |
86 | ||
87 | /* | |
88 | * Relay channel client callbacks | |
89 | */ | |
90 | struct rchan_callbacks { | |
91 | /* | |
92 | * subbuf_start - called on buffer-switch to a new sub-buffer | |
93 | * @buf: the channel buffer containing the new sub-buffer | |
94 | * @subbuf: the start of the new sub-buffer | |
95 | * @prev_subbuf: the start of the previous sub-buffer | |
96 | * @prev_padding: unused space at the end of previous sub-buffer | |
97 | * | |
98 | * The client should return 1 to continue logging, 0 to stop | |
99 | * logging. | |
100 | * | |
101 | * NOTE: subbuf_start will also be invoked when the buffer is | |
102 | * created, so that the first sub-buffer can be initialized | |
103 | * if necessary. In this case, prev_subbuf will be NULL. | |
104 | * | |
105 | * NOTE: the client can reserve bytes at the beginning of the new | |
106 | * sub-buffer by calling subbuf_start_reserve() in this callback. | |
107 | */ | |
108 | int (*subbuf_start) (struct rchan_buf *buf, | |
109 | void *subbuf, | |
110 | void *prev_subbuf, | |
111 | size_t prev_padding); | |
112 | ||
113 | /* | |
114 | * create_buf_file - create file to represent a relay channel buffer | |
115 | * @filename: the name of the file to create | |
116 | * @parent: the parent of the file to create | |
117 | * @mode: the mode of the file to create | |
118 | * @buf: the channel buffer | |
119 | * | |
120 | * Called during relay_open(), once for each per-cpu buffer, | |
121 | * to allow the client to create a file to be used to | |
122 | * represent the corresponding channel buffer. If the file is | |
123 | * created outside of relay, the parent must also exist in | |
124 | * that filesystem. | |
125 | * | |
126 | * The callback should return the dentry of the file created | |
127 | * to represent the relay buffer. | |
128 | * | |
129 | * Setting the is_global outparam to a non-zero value will | |
130 | * cause relay_open() to create a single global buffer rather | |
131 | * than the default set of per-cpu buffers. | |
132 | * | |
133 | * See Documentation/filesystems/relayfs.txt for more info. | |
134 | */ | |
135 | struct dentry *(*create_buf_file)(const char *filename, | |
136 | struct dentry *parent, | |
137 | int mode, | |
138 | struct rchan_buf *buf); | |
139 | ||
140 | /* | |
141 | * remove_buf_file - remove file representing a relay channel buffer | |
142 | * @dentry: the dentry of the file to remove | |
143 | * | |
144 | * Called during relay_close(), once for each per-cpu buffer, | |
145 | * to allow the client to remove a file used to represent a | |
146 | * channel buffer. | |
147 | * | |
148 | * The callback should return 0 if successful, negative if not. | |
149 | */ | |
98963de4 | 150 | //ust// int (*remove_buf_file)(struct rchan_buf *buf); |
bb07823d PMF |
151 | }; |
152 | ||
153 | extern struct buf_page *ltt_relay_find_prev_page(struct rchan_buf *buf, | |
154 | struct buf_page *page, size_t offset, ssize_t diff_offset); | |
155 | ||
156 | extern struct buf_page *ltt_relay_find_next_page(struct rchan_buf *buf, | |
157 | struct buf_page *page, size_t offset, ssize_t diff_offset); | |
158 | ||
159 | extern void _ltt_relay_write(struct rchan_buf *buf, size_t offset, | |
160 | const void *src, size_t len, ssize_t cpy); | |
161 | ||
162 | extern int ltt_relay_read(struct rchan_buf *buf, size_t offset, | |
163 | void *dest, size_t len); | |
164 | ||
165 | extern struct buf_page *ltt_relay_read_get_page(struct rchan_buf *buf, | |
166 | size_t offset); | |
167 | ||
168 | /* | |
169 | * Return the address where a given offset is located. | |
170 | * Should be used to get the current subbuffer header pointer. Given we know | |
171 | * it's never on a page boundary, it's safe to write directly to this address, | |
172 | * as long as the write is never bigger than a page size. | |
173 | */ | |
174 | extern void *ltt_relay_offset_address(struct rchan_buf *buf, | |
175 | size_t offset); | |
176 | ||
177 | /* | |
178 | * Find the page containing "offset". Cache it if it is after the currently | |
179 | * cached page. | |
180 | */ | |
181 | static inline struct buf_page *ltt_relay_cache_page(struct rchan_buf *buf, | |
182 | struct buf_page **page_cache, | |
183 | struct buf_page *page, size_t offset) | |
184 | { | |
185 | ssize_t diff_offset; | |
186 | ssize_t half_buf_size = buf->chan->alloc_size >> 1; | |
187 | ||
188 | /* | |
189 | * Make sure this is the page we want to write into. The current | |
190 | * page is changed concurrently by other writers. [wrh]page are | |
191 | * used as a cache remembering the last page written | |
192 | * to/read/looked up for header address. No synchronization; | |
193 | * could have to find the previous page is a nested write | |
194 | * occured. Finding the right page is done by comparing the | |
195 | * dest_offset with the buf_page offsets. | |
196 | * When at the exact opposite of the buffer, bias towards forward search | |
197 | * because it will be cached. | |
198 | */ | |
199 | ||
200 | diff_offset = (ssize_t)offset - (ssize_t)page->offset; | |
201 | if (diff_offset <= -(ssize_t)half_buf_size) | |
202 | diff_offset += buf->chan->alloc_size; | |
203 | else if (diff_offset > half_buf_size) | |
204 | diff_offset -= buf->chan->alloc_size; | |
205 | ||
206 | if (unlikely(diff_offset >= (ssize_t)PAGE_SIZE)) { | |
207 | page = ltt_relay_find_next_page(buf, page, offset, diff_offset); | |
208 | *page_cache = page; | |
209 | } else if (unlikely(diff_offset < 0)) { | |
210 | page = ltt_relay_find_prev_page(buf, page, offset, diff_offset); | |
211 | } | |
212 | return page; | |
213 | } | |
214 | ||
215 | //ust// #ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS | |
216 | static inline void ltt_relay_do_copy(void *dest, const void *src, size_t len) | |
217 | { | |
218 | switch (len) { | |
219 | case 0: break; | |
220 | case 1: *(u8 *)dest = *(const u8 *)src; | |
221 | break; | |
222 | case 2: *(u16 *)dest = *(const u16 *)src; | |
223 | break; | |
224 | case 4: *(u32 *)dest = *(const u32 *)src; | |
225 | break; | |
226 | //ust// #if (BITS_PER_LONG == 64) | |
227 | case 8: *(u64 *)dest = *(const u64 *)src; | |
228 | break; | |
229 | //ust// #endif | |
230 | default: | |
231 | memcpy(dest, src, len); | |
232 | } | |
233 | } | |
234 | //ust// #else | |
235 | //ust// /* | |
236 | //ust// * Returns whether the dest and src addresses are aligned on | |
237 | //ust// * min(sizeof(void *), len). Call this with statically known len for efficiency. | |
238 | //ust// */ | |
239 | //ust// static inline int addr_aligned(const void *dest, const void *src, size_t len) | |
240 | //ust// { | |
241 | //ust// if (ltt_align((size_t)dest, len)) | |
242 | //ust// return 0; | |
243 | //ust// if (ltt_align((size_t)src, len)) | |
244 | //ust// return 0; | |
245 | //ust// return 1; | |
246 | //ust// } | |
247 | //ust// | |
248 | //ust// static inline void ltt_relay_do_copy(void *dest, const void *src, size_t len) | |
249 | //ust// { | |
250 | //ust// switch (len) { | |
251 | //ust// case 0: break; | |
252 | //ust// case 1: *(u8 *)dest = *(const u8 *)src; | |
253 | //ust// break; | |
254 | //ust// case 2: if (unlikely(!addr_aligned(dest, src, 2))) | |
255 | //ust// goto memcpy_fallback; | |
256 | //ust// *(u16 *)dest = *(const u16 *)src; | |
257 | //ust// break; | |
258 | //ust// case 4: if (unlikely(!addr_aligned(dest, src, 4))) | |
259 | //ust// goto memcpy_fallback; | |
260 | //ust// *(u32 *)dest = *(const u32 *)src; | |
261 | //ust// break; | |
262 | //ust// #if (BITS_PER_LONG == 64) | |
263 | //ust// case 8: if (unlikely(!addr_aligned(dest, src, 8))) | |
264 | //ust// goto memcpy_fallback; | |
265 | //ust// *(u64 *)dest = *(const u64 *)src; | |
266 | //ust// break; | |
267 | //ust// #endif | |
268 | //ust// default: | |
269 | //ust// goto memcpy_fallback; | |
270 | //ust// } | |
271 | //ust// return; | |
272 | //ust// memcpy_fallback: | |
273 | //ust// memcpy(dest, src, len); | |
274 | //ust// } | |
275 | //ust// #endif | |
276 | ||
277 | static inline int ltt_relay_write(struct rchan_buf *buf, size_t offset, | |
278 | const void *src, size_t len) | |
279 | { | |
280 | //ust// struct buf_page *page; | |
281 | //ust// ssize_t pagecpy; | |
282 | //ust// | |
283 | //ust// offset &= buf->chan->alloc_size - 1; | |
284 | //ust// page = buf->wpage; | |
285 | //ust// | |
286 | //ust// page = ltt_relay_cache_page(buf, &buf->wpage, page, offset); | |
287 | //ust// pagecpy = min_t(size_t, len, PAGE_SIZE - (offset & ~PAGE_MASK)); | |
288 | //ust// ltt_relay_do_copy(page_address(page->page) | |
289 | //ust// + (offset & ~PAGE_MASK), src, pagecpy); | |
290 | //ust// | |
291 | //ust// if (unlikely(len != pagecpy)) | |
292 | //ust// _ltt_relay_write(buf, offset, src, len, page, pagecpy); | |
293 | //ust// return len; | |
294 | ||
bb07823d | 295 | size_t cpy; |
64d9b80b PMF |
296 | size_t buf_offset = BUFFER_OFFSET(offset, buf->chan); |
297 | ||
298 | assert(buf_offset < buf->chan->subbuf_size*buf->chan->n_subbufs); | |
299 | ||
300 | cpy = min_t(size_t, len, buf->buf_size - buf_offset); | |
301 | ltt_relay_do_copy(buf->buf_data + buf_offset, src, cpy); | |
bb07823d PMF |
302 | |
303 | if (unlikely(len != cpy)) | |
64d9b80b | 304 | _ltt_relay_write(buf, buf_offset, src, len, cpy); |
bb07823d PMF |
305 | return len; |
306 | } | |
307 | ||
308 | /* | |
309 | * CONFIG_LTT_RELAY kernel API, ltt/ltt-relay-alloc.c | |
310 | */ | |
311 | ||
312 | struct rchan *ltt_relay_open(const char *base_filename, | |
313 | struct dentry *parent, | |
314 | size_t subbuf_size, | |
315 | size_t n_subbufs, | |
316 | void *private_data); | |
317 | extern void ltt_relay_close(struct rchan *chan); | |
318 | ||
319 | /* | |
320 | * exported ltt_relay file operations, ltt/ltt-relay-alloc.c | |
321 | */ | |
322 | extern const struct file_operations ltt_relay_file_operations; | |
323 | ||
9c67dc50 PMF |
324 | |
325 | /* LTTng lockless logging buffer info */ | |
326 | struct ltt_channel_buf_struct { | |
327 | /* First 32 bytes cache-hot cacheline */ | |
328 | local_t offset; /* Current offset in the buffer */ | |
8431032f | 329 | local_t *commit_count; /* Commit count per sub-buffer */ |
9c67dc50 PMF |
330 | atomic_long_t consumed; /* |
331 | * Current offset in the buffer | |
332 | * standard atomic access (shared) | |
333 | */ | |
334 | unsigned long last_tsc; /* | |
335 | * Last timestamp written in the buffer. | |
336 | */ | |
337 | /* End of first 32 bytes cacheline */ | |
8431032f PMF |
338 | //ust// #ifdef CONFIG_LTT_VMCORE |
339 | //ust// local_t *commit_seq; /* Consecutive commits */ | |
340 | //ust// #endif | |
9c67dc50 PMF |
341 | atomic_long_t active_readers; /* |
342 | * Active readers count | |
343 | * standard atomic access (shared) | |
344 | */ | |
345 | local_t events_lost; | |
346 | local_t corrupted_subbuffers; | |
46ef48cd PMF |
347 | //ust// spinlock_t full_lock; /* |
348 | //ust// * buffer full condition spinlock, only | |
349 | //ust// * for userspace tracing blocking mode | |
350 | //ust// * synchronization with reader. | |
351 | //ust// */ | |
9c67dc50 PMF |
352 | //ust// wait_queue_head_t write_wait; /* |
353 | //ust// * Wait queue for blocking user space | |
354 | //ust// * writers | |
355 | //ust// */ | |
46ef48cd | 356 | //ust// atomic_t wakeup_readers; /* Boolean : wakeup readers waiting ? */ |
3a7b90de PMF |
357 | /* one byte is written to this pipe when data is available, in order |
358 | to wake the consumer */ | |
359 | /* portability: Single byte writes must be as quick as possible. The kernel-side | |
360 | buffer must be large enough so the writer doesn't block. From the pipe(7) | |
361 | man page: Since linux 2.6.11, the pipe capacity is 65536 bytes. */ | |
362 | int data_ready_fd_write; | |
363 | /* the reading end of the pipe */ | |
364 | int data_ready_fd_read; | |
0b0cd937 PMF |
365 | |
366 | /* commit count per subbuffer; must be at end of struct */ | |
8431032f | 367 | local_t commit_seq[0] ____cacheline_aligned; |
9c67dc50 PMF |
368 | } ____cacheline_aligned; |
369 | ||
370 | int ltt_do_get_subbuf(struct rchan_buf *buf, struct ltt_channel_buf_struct *ltt_buf, long *pconsumed_old); | |
371 | ||
372 | int ltt_do_put_subbuf(struct rchan_buf *buf, struct ltt_channel_buf_struct *ltt_buf, u32 uconsumed_old); | |
373 | ||
bb07823d | 374 | #endif /* _LINUX_LTT_RELAY_H */ |