return 0;
}
-/* print_event_logging_function_user
+/* print_event_logging_function_user_generic
* Print the logging function of an event for userspace tracing. This is the
* core of genevent */
-int print_event_logging_function_user(char *basename, facility_t *fac,
+int print_event_logging_function_user_generic(char *basename, facility_t *fac,
event_t *event, FILE *fd)
{
char *attrib;
+
+ fprintf(fd, "#ifndef LTT_TRACE_FAST\n");
+
if(event->no_instrument_function) {
attrib = "__attribute__((no_instrument_function)) ";
} else {
fprintf(fd, "}\n");
fprintf(fd,
"#endif //LTT_TRACE\n");
+ fprintf(fd, "#endif //!LTT_TRACE_FAST\n\n");
return 0;
}
+/* print_event_logging_function_user_fast
+ * Print the logging function of an event for userspace tracing. This is the
+ * core of genevent */
+int print_event_logging_function_user_fast(char *basename, facility_t *fac,
+ event_t *event, FILE *fd)
+{
+ char *attrib;
+
+ fprintf(fd, "#ifdef LTT_TRACE_FAST\n");
+
+ if(event->no_instrument_function) {
+ attrib = "__attribute__((no_instrument_function)) ";
+ } else {
+ attrib = "";
+ }
+ fprintf(fd, "static inline %sint trace_%s(\n",attrib, basename);
+
+ int has_argument = 0;
+ int has_type_fixed = 0;
+
+ for(unsigned int j = 0; j < event->fields.position; j++) {
+ /* For each field, print the function argument */
+ field_t *f = (field_t*)event->fields.array[j];
+ type_descriptor_t *t = f->type;
+ if(has_argument) {
+ fprintf(fd, ",");
+ fprintf(fd, "\n");
+ }
+ if(print_arg(t, fd, 2, basename, f->name)) return 1;
+ has_argument = 1;
+ }
+ if(!has_argument) {
+ print_tabs(2, fd);
+ fprintf(fd, "void");
+ }
+ fprintf(fd,")\n");
+ fprintf(fd,
+ "#ifndef LTT_TRACE\n");
+ fprintf(fd, "{\n");
+ fprintf(fd, "}\n");
+ fprintf(fd,"#else\n");
+ fprintf(fd, "{\n");
+ /* Print the function variables */
+ print_tabs(1, fd);
+ fprintf(fd, "unsigned int index;\n");
+ print_tabs(1, fd);
+ fprintf(fd, "struct ltt_trace_info *trace = thread_trace_info;\n");
+ print_tabs(1, fd);
+ fprintf(fd, "struct ltt_buf *ltt_buf;\n");
+ print_tabs(1, fd);
+ fprintf(fd, "void *buffer = NULL;\n");
+ print_tabs(1, fd);
+ fprintf(fd, "size_t real_to_base = 0; /* The buffer is allocated on arch_size alignment */\n");
+ print_tabs(1, fd);
+ fprintf(fd, "size_t *to_base = &real_to_base;\n");
+ print_tabs(1, fd);
+ fprintf(fd, "size_t real_to = 0;\n");
+ print_tabs(1, fd);
+ fprintf(fd, "size_t *to = &real_to;\n");
+ print_tabs(1, fd);
+ fprintf(fd, "size_t real_len = 0;\n");
+ print_tabs(1, fd);
+ fprintf(fd, "size_t *len = &real_len;\n");
+ print_tabs(1, fd);
+ fprintf(fd, "size_t reserve_size;\n");
+ print_tabs(1, fd);
+ fprintf(fd, "size_t slot_size;\n");
+ print_tabs(1, fd);
+
+ if(event->fields.position > 0) {
+ for(unsigned int i=0;i<event->fields.position;i++){
+ /* Search for at least one child with fixed size. It means
+ * we need local variables.*/
+ field_t *field = (field_t*)(event->fields.array[i]);
+ type_descriptor_t *type = field->type;
+ has_type_fixed = has_type_local(type);
+ if(has_type_fixed) break;
+ }
+
+ if(has_type_fixed) {
+ fprintf(fd, "size_t align;\n");
+ print_tabs(1, fd);
+ }
+
+ fprintf(fd, "const void *real_from;\n");
+ print_tabs(1, fd);
+ fprintf(fd, "const void **from = &real_from;\n");
+ print_tabs(1, fd);
+ }
+ fprintf(fd, "uint64_t tsc;\n");
+ print_tabs(1, fd);
+ fprintf(fd, "size_t before_hdr_pad, after_hdr_pad, header_size;\n");
+ fprintf(fd, "\n");
+
+ print_tabs(1, fd);
+ fprintf(fd, "if(!trace) ltt_thread_init();\n");
+ fprintf(fd, "\n");
+
+ /* Calculate event variable len + event data alignment offset.
+ * Assume that the padding for alignment starts at a void*
+ * address.
+ * This excludes the header size and alignment. */
+
+ print_tabs(1, fd);
+ fprintf(fd, "/* For each field, calculate the field size. */\n");
+ print_tabs(1, fd);
+ fprintf(fd, "/* size = *to_base + *to + *len */\n");
+ print_tabs(1, fd);
+ fprintf(fd, "/* Assume that the padding for alignment starts at a\n");
+ print_tabs(1, fd);
+ fprintf(fd, " * sizeof(void *) address. */\n");
+ fprintf(fd, "\n");
+
+ for(unsigned int i=0;i<event->fields.position;i++){
+ field_t *field = (field_t*)(event->fields.array[i]);
+ type_descriptor_t *type = field->type;
+ /* Set from */
+ print_tabs(1, fd);
+ switch(type->type) {
+ case SEQUENCE:
+ case UNION:
+ case ARRAY:
+ case STRUCT:
+ case STRING:
+ fprintf(fd, "*from = lttng_param_%s;\n", field->name);
+ break;
+ default:
+ fprintf(fd, "*from = <tng_param_%s;\n", field->name);
+ break;
+ }
+
+ if(print_type_write(type,
+ fd, 1, basename, field->name, "lttng_param_", 0)) return 1;
+ fprintf(fd, "\n");
+ }
+ print_tabs(1, fd);
+ fprintf(fd, "reserve_size = *to_base + *to + *len;\n");
+
+ print_tabs(1, fd);
+ fprintf(fd, "trace->nesting++;\n");
+
+ /* Get facility index */
+
+ print_tabs(1, fd);
+ fprintf(fd,
+ "index = ltt_get_index_from_facility(ltt_facility_%s_%X,\n"\
+ "\t\t\t\t\t\tevent_%s_%s);\n",
+ fac->name, fac->checksum, fac->name, event->name);
+ fprintf(fd,"\n");
+
+
+ print_tabs(1, fd);
+ fprintf(fd, "{\n");
+
+ if(event->per_trace) {
+ print_tabs(2, fd);
+ fprintf(fd, "if(dest_trace != trace) continue;\n\n");
+ }
+
+ print_tabs(2, fd);
+ fprintf(fd, "ltt_buf = ltt_get_channel_from_index(trace, index);\n");
+ print_tabs(2, fd);
+
+
+ /* Relay reserve */
+ /* If error, increment event lost counter (done by ltt_reserve_slot) and
+ * return */
+ print_tabs(2, fd);
+ fprintf(fd, "slot_size = 0;\n");
+ print_tabs(2, fd);
+ fprintf(fd, "buffer = ltt_reserve_slot(trace, ltt_buf,\n");
+ print_tabs(3, fd);
+ fprintf(fd, "reserve_size, &slot_size, &tsc,\n");
+ print_tabs(3, fd);
+ fprintf(fd, "&before_hdr_pad, &after_hdr_pad, &header_size);\n");
+ /* If error, return */
+ print_tabs(2, fd);
+ fprintf(fd, "if(!buffer) goto end; /* buffer full */\n\n");
+ //print_tabs(2, fd);
+ // for DEBUG only
+ // fprintf(fd, "goto commit; /* DEBUG : never actually write. */\n\n");
+ print_tabs(2, fd);
+ fprintf(fd, "*to_base = *to = *len = 0;\n");
+ fprintf(fd, "\n");
+
+ /* Write event header */
+ print_tabs(2, fd);
+ fprintf(fd, "ltt_write_event_header(trace, ltt_buf, buffer,\n");
+ print_tabs(3, fd);
+ fprintf(fd, "ltt_facility_%s_%X, event_%s_%s,\n", fac->name, fac->checksum,
+ fac->name, event->name);
+ print_tabs(3, fd);
+ fprintf(fd, "reserve_size, before_hdr_pad, tsc);\n");
+ print_tabs(2, fd);
+ fprintf(fd, "*to_base += before_hdr_pad + after_hdr_pad + header_size;\n");
+ fprintf(fd, "\n");
+
+ /* write data. */
+
+ for(unsigned int i=0;i<event->fields.position;i++){
+ field_t *field = (field_t*)(event->fields.array[i]);
+ type_descriptor_t *type = field->type;
+
+ /* Set from */
+ print_tabs(2, fd);
+ switch(type->type) {
+ case SEQUENCE:
+ case UNION:
+ case ARRAY:
+ case STRUCT:
+ case STRING:
+ fprintf(fd, "*from = lttng_param_%s;\n", field->name);
+ break;
+ default:
+ fprintf(fd, "*from = <tng_param_%s;\n", field->name);
+ break;
+ }
+
+
+ if(print_type_write(type,
+ fd, 2, basename, field->name, "lttng_param_", 0)) return 1;
+ fprintf(fd, "\n");
+
+ /* Don't forget to flush pending memcpy */
+ print_tabs(2, fd);
+ fprintf(fd, "/* Flush pending memcpy */\n");
+ print_tabs(2, fd);
+ fprintf(fd, "if(*len != 0) {\n");
+ print_tabs(3, fd);
+ fprintf(fd, "memcpy(buffer+*to_base+*to, *from, *len);\n");
+ print_tabs(3, fd);
+ fprintf(fd, "*to += *len;\n");
+ //print_tabs(3, fd);
+ //fprintf(fd, "from += len;\n");
+ print_tabs(3, fd);
+ fprintf(fd, "*len = 0;\n");
+ print_tabs(2, fd);
+ fprintf(fd, "}\n");
+ fprintf(fd, "\n");
+ }
+
+
+ /* commit */
+ // for DEBUG only.
+ //fprintf(fd, "commit:\n"); /* DEBUG! */
+ print_tabs(2, fd);
+ fprintf(fd, "ltt_commit_slot(ltt_buf, buffer, slot_size);\n\n");
+
+ fprintf(fd, "}\n\n");
+
+ fprintf(fd, "end:\n");
+ /* Release locks */
+ print_tabs(1, fd);
+ fprintf(fd, "trace->nesting--;\n");
+
+
+ fprintf(fd, "}\n");
+ fprintf(fd,
+ "#endif //LTT_TRACE\n");
+ fprintf(fd, "#endif //LTT_TRACE_FAST\n");
+
+ return 0;
+}
+
+
+
+
+
+
/* ltt-facility-name.h : main logging header.
* log_header */
if(!fac->user) {
if(print_event_logging_function(basename, fac, event, fd)) return 1;
} else {
- if(print_event_logging_function_user(basename, fac, event, fd)) return 1;
+ if(print_event_logging_function_user_generic(basename, fac, event, fd))
+ return 1;
+ if(print_event_logging_function_user_fast(basename, fac, event, fd))
+ return 1;
}
fprintf(fd, "\n");
*
*/
+#define inline inline __attribute__((always_inline))
+
#define _GNU_SOURCE
#define LTT_TRACE
#include <sys/types.h>
#include <ltt/ltt-usertrace-fast.h>
+#ifdef LTT_SHOW_DEBUG
+#define dbg_printf(...) dbg_printf(__VA_ARGS__)
+#else
+#define dbg_printf(...)
+#endif //LTT_SHOW_DEBUG
+
+
enum force_switch_mode { FORCE_ACTIVE, FORCE_FLUSH };
/* Writer (the traced application) */
/* signal handling */
static void handler_sigusr1(int signo)
{
- printf("LTT Signal %d received : parent buffer switch.\n", signo);
+ dbg_printf("LTT Signal %d received : parent buffer switch.\n", signo);
}
static void handler_sigusr2(int signo)
{
- printf("LTT Signal %d received : parent exited.\n", signo);
+ dbg_printf("LTT Signal %d received : parent exited.\n", signo);
parent_exited = 1;
}
static void handler_sigalarm(int signo)
{
- printf("LTT Signal %d received\n", signo);
+ dbg_printf("LTT Signal %d received\n", signo);
if(getppid() != traced_pid) {
/* Parent died */
- printf("LTT Parent %lu died, cleaning up\n", traced_pid);
+ dbg_printf("LTT Parent %lu died, cleaning up\n", traced_pid);
traced_pid = 0;
}
alarm(3);
ret = futex((unsigned long)<t_buf->full,
FUTEX_WAKE, 1, 0, 0, 0);
if(ret != 1) {
- printf("LTT warning : race condition : writer not waiting or too many writers\n");
+ dbg_printf("LTT warning : race condition : writer not waiting or too many writers\n");
}
atomic_set(<t_buf->full, 0);
}
{
unsigned int consumed_old;
int err;
- printf("LTT read buffer\n");
+ dbg_printf("LTT read buffer\n");
err = ltt_buffer_get(ltt_buf, &consumed_old);
if(err != 0) {
- if(err != -EAGAIN) printf("LTT Reserving sub buffer failed\n");
+ if(err != -EAGAIN) dbg_printf("LTT Reserving sub buffer failed\n");
goto get_error;
}
if(err != 0) {
if(err == -EIO) {
- printf("Reader has been pushed by the writer, last subbuffer corrupted.\n");
+ dbg_printf("Reader has been pushed by the writer, last subbuffer corrupted.\n");
/* FIXME : we may delete the last written buffer if we wish. */
}
goto get_error;
traced_pid = l_traced_pid;
traced_tid = l_traced_tid;
- printf("LTT ltt_usertrace_fast_daemon : init is %d, pid is %lu, traced_pid is %lu, traced_tid is %lu\n",
+ dbg_printf("LTT ltt_usertrace_fast_daemon : init is %d, pid is %lu, traced_pid is %lu, traced_tid is %lu\n",
shared_trace_info->init, getpid(), traced_pid, traced_tid);
act.sa_handler = handler_sigusr1;
/* Enable signals */
ret = pthread_sigmask(SIG_SETMASK, &oldset, NULL);
if(ret) {
- printf("LTT Error in pthread_sigmask\n");
+ dbg_printf("LTT Error in pthread_sigmask\n");
}
alarm(3);
pause();
if(traced_pid == 0) break; /* parent died */
if(parent_exited) break;
- printf("LTT Doing a buffer switch read. pid is : %lu\n", getpid());
+ dbg_printf("LTT Doing a buffer switch read. pid is : %lu\n", getpid());
do {
ret = read_subbuffer(&shared_trace_info->channel.cpu, fd_cpu);
/* parent : create the shared memory map */
shared_trace_info = mmap(0, sizeof(*thread_trace_info),
PROT_READ|PROT_WRITE, MAP_SHARED|MAP_ANONYMOUS, 0, 0);
- memset(shared_trace_info, 0, sizeof(*shared_trace_info));
+ shared_trace_info->init=0;
+ shared_trace_info->filter=0;
+ shared_trace_info->daemon_id=0;
+ shared_trace_info->nesting=0;
+ memset(&shared_trace_info->channel.facilities, 0,
+ sizeof(shared_trace_info->channel.facilities));
+ memset(&shared_trace_info->channel.cpu, 0,
+ sizeof(shared_trace_info->channel.cpu));
/* Tricky semaphore : is in a shared memory space, so it's ok for a fast
* mutex (futex). */
atomic_set(&shared_trace_info->channel.facilities.full, 0);
/* Disable signals */
ret = sigfillset(&set);
if(ret) {
- printf("LTT Error in sigfillset\n");
+ dbg_printf("LTT Error in sigfillset\n");
}
ret = pthread_sigmask(SIG_BLOCK, &set, &oldset);
if(ret) {
- printf("LTT Error in pthread_sigmask\n");
+ dbg_printf("LTT Error in pthread_sigmask\n");
}
pid = fork();
/* Enable signals */
ret = pthread_sigmask(SIG_SETMASK, &oldset, NULL);
if(ret) {
- printf("LTT Error in pthread_sigmask\n");
+ dbg_printf("LTT Error in pthread_sigmask\n");
}
} else if(pid == 0) {
pid_t sid;
/* Child */
role = LTT_ROLE_READER;
sid = setsid();
+ ret = nice(1);
+ if(ret < 0) {
+ perror("Error in nice");
+ }
if(sid < 0) {
perror("Error setting sid");
}
void __attribute__((constructor)) __ltt_usertrace_fast_init(void)
{
- printf("LTT usertrace-fast init\n");
+ dbg_printf("LTT usertrace-fast init\n");
ltt_rw_init();
}
void __attribute__((destructor)) __ltt_usertrace_fast_fini(void)
{
if(role == LTT_ROLE_WRITER) {
- printf("LTT usertrace-fast fini\n");
+ dbg_printf("LTT usertrace-fast fini\n");
ltt_usertrace_fast_cleanup(NULL);
}
}
/* Event string logging function */
+#ifndef LTT_TRACE_FAST
static inline int trace_user_generic_string(
const char * lttng_param_data)
#ifndef LTT_TRACE
}
#endif //LTT_TRACE
+#endif //!LTT_TRACE_FAST
+
+#ifdef LTT_TRACE_FAST
+static inline int trace_user_generic_string(
+ const char * lttng_param_data)
+#ifndef LTT_TRACE
+{
+}
+#else
+{
+ unsigned int index;
+ struct ltt_trace_info *trace = thread_trace_info;
+ struct ltt_buf *ltt_buf;
+ void *buffer = NULL;
+ size_t real_to_base = 0; /* The buffer is allocated on arch_size alignment */
+ size_t *to_base = &real_to_base;
+ size_t real_to = 0;
+ size_t *to = &real_to;
+ size_t real_len = 0;
+ size_t *len = &real_len;
+ size_t reserve_size;
+ size_t slot_size;
+ const void *real_from;
+ const void **from = &real_from;
+ uint64_t tsc;
+ size_t before_hdr_pad, after_hdr_pad, header_size;
+
+ if(!trace) ltt_thread_init();
+
+ /* For each field, calculate the field size. */
+ /* size = *to_base + *to + *len */
+ /* Assume that the padding for alignment starts at a
+ * sizeof(void *) address. */
+
+ *from = lttng_param_data;
+ lttng_write_string_user_generic_string_data(buffer, to_base, to, from, len, lttng_param_data);
+
+ reserve_size = *to_base + *to + *len;
+ trace->nesting++;
+ index = ltt_get_index_from_facility(ltt_facility_user_generic_F583779E,
+ event_user_generic_string);
+
+ {
+ ltt_buf = ltt_get_channel_from_index(trace, index);
+ slot_size = 0;
+ buffer = ltt_reserve_slot(trace, ltt_buf,
+ reserve_size, &slot_size, &tsc,
+ &before_hdr_pad, &after_hdr_pad, &header_size);
+ if(!buffer) goto end; /* buffer full */
+
+ *to_base = *to = *len = 0;
+
+ ltt_write_event_header(trace, ltt_buf, buffer,
+ ltt_facility_user_generic_F583779E, event_user_generic_string,
+ reserve_size, before_hdr_pad, tsc);
+ *to_base += before_hdr_pad + after_hdr_pad + header_size;
+
+ *from = lttng_param_data;
+ lttng_write_string_user_generic_string_data(buffer, to_base, to, from, len, lttng_param_data);
+
+ /* Flush pending memcpy */
+ if(*len != 0) {
+ memcpy(buffer+*to_base+*to, *from, *len);
+ *to += *len;
+ *len = 0;
+ }
+
+ ltt_commit_slot(ltt_buf, buffer, slot_size);
+
+}
+
+end:
+ trace->nesting--;
+}
+#endif //LTT_TRACE
+#endif //LTT_TRACE_FAST
/* Event string_pointer structures */
static inline void lttng_write_string_user_generic_string_pointer_string(
/* Event string_pointer logging function */
+#ifndef LTT_TRACE_FAST
static inline int trace_user_generic_string_pointer(
const char * lttng_param_string,
const void * lttng_param_pointer)
}
#endif //LTT_TRACE
+#endif //!LTT_TRACE_FAST
+
+#ifdef LTT_TRACE_FAST
+static inline int trace_user_generic_string_pointer(
+ const char * lttng_param_string,
+ const void * lttng_param_pointer)
+#ifndef LTT_TRACE
+{
+}
+#else
+{
+ unsigned int index;
+ struct ltt_trace_info *trace = thread_trace_info;
+ struct ltt_buf *ltt_buf;
+ void *buffer = NULL;
+ size_t real_to_base = 0; /* The buffer is allocated on arch_size alignment */
+ size_t *to_base = &real_to_base;
+ size_t real_to = 0;
+ size_t *to = &real_to;
+ size_t real_len = 0;
+ size_t *len = &real_len;
+ size_t reserve_size;
+ size_t slot_size;
+ size_t align;
+ const void *real_from;
+ const void **from = &real_from;
+ uint64_t tsc;
+ size_t before_hdr_pad, after_hdr_pad, header_size;
+
+ if(!trace) ltt_thread_init();
+
+ /* For each field, calculate the field size. */
+ /* size = *to_base + *to + *len */
+ /* Assume that the padding for alignment starts at a
+ * sizeof(void *) address. */
+
+ *from = lttng_param_string;
+ lttng_write_string_user_generic_string_pointer_string(buffer, to_base, to, from, len, lttng_param_string);
+
+ *from = <tng_param_pointer;
+ align = sizeof(const void *);
+
+ if(*len == 0) {
+ *to += ltt_align(*to, align); /* align output */
+ } else {
+ *len += ltt_align(*to+*len, align); /* alignment, ok to do a memcpy of it */
+ }
+
+ *len += sizeof(const void *);
+
+ reserve_size = *to_base + *to + *len;
+ trace->nesting++;
+ index = ltt_get_index_from_facility(ltt_facility_user_generic_F583779E,
+ event_user_generic_string_pointer);
+
+ {
+ ltt_buf = ltt_get_channel_from_index(trace, index);
+ slot_size = 0;
+ buffer = ltt_reserve_slot(trace, ltt_buf,
+ reserve_size, &slot_size, &tsc,
+ &before_hdr_pad, &after_hdr_pad, &header_size);
+ if(!buffer) goto end; /* buffer full */
+
+ *to_base = *to = *len = 0;
+
+ ltt_write_event_header(trace, ltt_buf, buffer,
+ ltt_facility_user_generic_F583779E, event_user_generic_string_pointer,
+ reserve_size, before_hdr_pad, tsc);
+ *to_base += before_hdr_pad + after_hdr_pad + header_size;
+
+ *from = lttng_param_string;
+ lttng_write_string_user_generic_string_pointer_string(buffer, to_base, to, from, len, lttng_param_string);
+
+ /* Flush pending memcpy */
+ if(*len != 0) {
+ memcpy(buffer+*to_base+*to, *from, *len);
+ *to += *len;
+ *len = 0;
+ }
+
+ *from = <tng_param_pointer;
+ align = sizeof(const void *);
+
+ if(*len == 0) {
+ *to += ltt_align(*to, align); /* align output */
+ } else {
+ *len += ltt_align(*to+*len, align); /* alignment, ok to do a memcpy of it */
+ }
+
+ *len += sizeof(const void *);
+
+ /* Flush pending memcpy */
+ if(*len != 0) {
+ memcpy(buffer+*to_base+*to, *from, *len);
+ *to += *len;
+ *len = 0;
+ }
+
+ ltt_commit_slot(ltt_buf, buffer, slot_size);
+
+}
+
+end:
+ trace->nesting--;
+}
+#endif //LTT_TRACE
+#endif //LTT_TRACE_FAST
/* Event slow_printf structures */
static inline void lttng_write_string_user_generic_slow_printf_string(
/* Event slow_printf logging function */
+#ifndef LTT_TRACE_FAST
static inline int trace_user_generic_slow_printf_param_buffer(
void *buffer,
size_t reserve_size)
}
#endif //LTT_TRACE
+#endif //!LTT_TRACE_FAST
+
+#ifdef LTT_TRACE_FAST
+static inline int trace_user_generic_slow_printf(
+ const char * lttng_param_string)
+#ifndef LTT_TRACE
+{
+}
+#else
+{
+ unsigned int index;
+ struct ltt_trace_info *trace = thread_trace_info;
+ struct ltt_buf *ltt_buf;
+ void *buffer = NULL;
+ size_t real_to_base = 0; /* The buffer is allocated on arch_size alignment */
+ size_t *to_base = &real_to_base;
+ size_t real_to = 0;
+ size_t *to = &real_to;
+ size_t real_len = 0;
+ size_t *len = &real_len;
+ size_t reserve_size;
+ size_t slot_size;
+ const void *real_from;
+ const void **from = &real_from;
+ uint64_t tsc;
+ size_t before_hdr_pad, after_hdr_pad, header_size;
+
+ if(!trace) ltt_thread_init();
+
+ /* For each field, calculate the field size. */
+ /* size = *to_base + *to + *len */
+ /* Assume that the padding for alignment starts at a
+ * sizeof(void *) address. */
+
+ *from = lttng_param_string;
+ lttng_write_string_user_generic_slow_printf_string(buffer, to_base, to, from, len, lttng_param_string);
+
+ reserve_size = *to_base + *to + *len;
+ trace->nesting++;
+ index = ltt_get_index_from_facility(ltt_facility_user_generic_F583779E,
+ event_user_generic_slow_printf);
+
+ {
+ ltt_buf = ltt_get_channel_from_index(trace, index);
+ slot_size = 0;
+ buffer = ltt_reserve_slot(trace, ltt_buf,
+ reserve_size, &slot_size, &tsc,
+ &before_hdr_pad, &after_hdr_pad, &header_size);
+ if(!buffer) goto end; /* buffer full */
+
+ *to_base = *to = *len = 0;
+
+ ltt_write_event_header(trace, ltt_buf, buffer,
+ ltt_facility_user_generic_F583779E, event_user_generic_slow_printf,
+ reserve_size, before_hdr_pad, tsc);
+ *to_base += before_hdr_pad + after_hdr_pad + header_size;
+
+ *from = lttng_param_string;
+ lttng_write_string_user_generic_slow_printf_string(buffer, to_base, to, from, len, lttng_param_string);
+
+ /* Flush pending memcpy */
+ if(*len != 0) {
+ memcpy(buffer+*to_base+*to, *from, *len);
+ *to += *len;
+ *len = 0;
+ }
+
+ ltt_commit_slot(ltt_buf, buffer, slot_size);
+
+}
+
+end:
+ trace->nesting--;
+}
+#endif //LTT_TRACE
+#endif //LTT_TRACE_FAST
/* Event function_entry structures */
/* Event function_entry logging function */
+#ifndef LTT_TRACE_FAST
static inline __attribute__((no_instrument_function)) int trace_user_generic_function_entry(
const void * lttng_param_this_fn,
const void * lttng_param_call_site)
}
#endif //LTT_TRACE
+#endif //!LTT_TRACE_FAST
+
+#ifdef LTT_TRACE_FAST
+static inline __attribute__((no_instrument_function)) int trace_user_generic_function_entry(
+ const void * lttng_param_this_fn,
+ const void * lttng_param_call_site)
+#ifndef LTT_TRACE
+{
+}
+#else
+{
+ unsigned int index;
+ struct ltt_trace_info *trace = thread_trace_info;
+ struct ltt_buf *ltt_buf;
+ void *buffer = NULL;
+ size_t real_to_base = 0; /* The buffer is allocated on arch_size alignment */
+ size_t *to_base = &real_to_base;
+ size_t real_to = 0;
+ size_t *to = &real_to;
+ size_t real_len = 0;
+ size_t *len = &real_len;
+ size_t reserve_size;
+ size_t slot_size;
+ size_t align;
+ const void *real_from;
+ const void **from = &real_from;
+ uint64_t tsc;
+ size_t before_hdr_pad, after_hdr_pad, header_size;
+
+ if(!trace) ltt_thread_init();
+
+ /* For each field, calculate the field size. */
+ /* size = *to_base + *to + *len */
+ /* Assume that the padding for alignment starts at a
+ * sizeof(void *) address. */
+
+ *from = <tng_param_this_fn;
+ align = sizeof(const void *);
+
+ if(*len == 0) {
+ *to += ltt_align(*to, align); /* align output */
+ } else {
+ *len += ltt_align(*to+*len, align); /* alignment, ok to do a memcpy of it */
+ }
+
+ *len += sizeof(const void *);
+
+ *from = <tng_param_call_site;
+ align = sizeof(const void *);
+
+ if(*len == 0) {
+ *to += ltt_align(*to, align); /* align output */
+ } else {
+ *len += ltt_align(*to+*len, align); /* alignment, ok to do a memcpy of it */
+ }
+
+ *len += sizeof(const void *);
+
+ reserve_size = *to_base + *to + *len;
+ trace->nesting++;
+ index = ltt_get_index_from_facility(ltt_facility_user_generic_F583779E,
+ event_user_generic_function_entry);
+
+ {
+ ltt_buf = ltt_get_channel_from_index(trace, index);
+ slot_size = 0;
+ buffer = ltt_reserve_slot(trace, ltt_buf,
+ reserve_size, &slot_size, &tsc,
+ &before_hdr_pad, &after_hdr_pad, &header_size);
+ if(!buffer) goto end; /* buffer full */
+
+ *to_base = *to = *len = 0;
+
+ ltt_write_event_header(trace, ltt_buf, buffer,
+ ltt_facility_user_generic_F583779E, event_user_generic_function_entry,
+ reserve_size, before_hdr_pad, tsc);
+ *to_base += before_hdr_pad + after_hdr_pad + header_size;
+
+ *from = <tng_param_this_fn;
+ align = sizeof(const void *);
+
+ if(*len == 0) {
+ *to += ltt_align(*to, align); /* align output */
+ } else {
+ *len += ltt_align(*to+*len, align); /* alignment, ok to do a memcpy of it */
+ }
+
+ *len += sizeof(const void *);
+
+ /* Flush pending memcpy */
+ if(*len != 0) {
+ memcpy(buffer+*to_base+*to, *from, *len);
+ *to += *len;
+ *len = 0;
+ }
+
+ *from = <tng_param_call_site;
+ align = sizeof(const void *);
+
+ if(*len == 0) {
+ *to += ltt_align(*to, align); /* align output */
+ } else {
+ *len += ltt_align(*to+*len, align); /* alignment, ok to do a memcpy of it */
+ }
+
+ *len += sizeof(const void *);
+
+ /* Flush pending memcpy */
+ if(*len != 0) {
+ memcpy(buffer+*to_base+*to, *from, *len);
+ *to += *len;
+ *len = 0;
+ }
+
+ ltt_commit_slot(ltt_buf, buffer, slot_size);
+
+}
+
+end:
+ trace->nesting--;
+}
+#endif //LTT_TRACE
+#endif //LTT_TRACE_FAST
/* Event function_exit structures */
/* Event function_exit logging function */
+#ifndef LTT_TRACE_FAST
static inline __attribute__((no_instrument_function)) int trace_user_generic_function_exit(
const void * lttng_param_this_fn,
const void * lttng_param_call_site)
}
#endif //LTT_TRACE
+#endif //!LTT_TRACE_FAST
+
+#ifdef LTT_TRACE_FAST
+static inline __attribute__((no_instrument_function)) int trace_user_generic_function_exit(
+ const void * lttng_param_this_fn,
+ const void * lttng_param_call_site)
+#ifndef LTT_TRACE
+{
+}
+#else
+{
+ unsigned int index;
+ struct ltt_trace_info *trace = thread_trace_info;
+ struct ltt_buf *ltt_buf;
+ void *buffer = NULL;
+ size_t real_to_base = 0; /* The buffer is allocated on arch_size alignment */
+ size_t *to_base = &real_to_base;
+ size_t real_to = 0;
+ size_t *to = &real_to;
+ size_t real_len = 0;
+ size_t *len = &real_len;
+ size_t reserve_size;
+ size_t slot_size;
+ size_t align;
+ const void *real_from;
+ const void **from = &real_from;
+ uint64_t tsc;
+ size_t before_hdr_pad, after_hdr_pad, header_size;
+
+ if(!trace) ltt_thread_init();
+
+ /* For each field, calculate the field size. */
+ /* size = *to_base + *to + *len */
+ /* Assume that the padding for alignment starts at a
+ * sizeof(void *) address. */
+
+ *from = <tng_param_this_fn;
+ align = sizeof(const void *);
+
+ if(*len == 0) {
+ *to += ltt_align(*to, align); /* align output */
+ } else {
+ *len += ltt_align(*to+*len, align); /* alignment, ok to do a memcpy of it */
+ }
+
+ *len += sizeof(const void *);
+
+ *from = <tng_param_call_site;
+ align = sizeof(const void *);
+
+ if(*len == 0) {
+ *to += ltt_align(*to, align); /* align output */
+ } else {
+ *len += ltt_align(*to+*len, align); /* alignment, ok to do a memcpy of it */
+ }
+
+ *len += sizeof(const void *);
+
+ reserve_size = *to_base + *to + *len;
+ trace->nesting++;
+ index = ltt_get_index_from_facility(ltt_facility_user_generic_F583779E,
+ event_user_generic_function_exit);
+
+ {
+ ltt_buf = ltt_get_channel_from_index(trace, index);
+ slot_size = 0;
+ buffer = ltt_reserve_slot(trace, ltt_buf,
+ reserve_size, &slot_size, &tsc,
+ &before_hdr_pad, &after_hdr_pad, &header_size);
+ if(!buffer) goto end; /* buffer full */
+
+ *to_base = *to = *len = 0;
+
+ ltt_write_event_header(trace, ltt_buf, buffer,
+ ltt_facility_user_generic_F583779E, event_user_generic_function_exit,
+ reserve_size, before_hdr_pad, tsc);
+ *to_base += before_hdr_pad + after_hdr_pad + header_size;
+
+ *from = <tng_param_this_fn;
+ align = sizeof(const void *);
+
+ if(*len == 0) {
+ *to += ltt_align(*to, align); /* align output */
+ } else {
+ *len += ltt_align(*to+*len, align); /* alignment, ok to do a memcpy of it */
+ }
+
+ *len += sizeof(const void *);
+
+ /* Flush pending memcpy */
+ if(*len != 0) {
+ memcpy(buffer+*to_base+*to, *from, *len);
+ *to += *len;
+ *len = 0;
+ }
+
+ *from = <tng_param_call_site;
+ align = sizeof(const void *);
+
+ if(*len == 0) {
+ *to += ltt_align(*to, align); /* align output */
+ } else {
+ *len += ltt_align(*to+*len, align); /* alignment, ok to do a memcpy of it */
+ }
+
+ *len += sizeof(const void *);
+
+ /* Flush pending memcpy */
+ if(*len != 0) {
+ memcpy(buffer+*to_base+*to, *from, *len);
+ *to += *len;
+ *len = 0;
+ }
+
+ ltt_commit_slot(ltt_buf, buffer, slot_size);
+
+}
+
+end:
+ trace->nesting--;
+}
+#endif //LTT_TRACE
+#endif //LTT_TRACE_FAST
#endif //_LTT_FACILITY_USER_GENERIC_H_