serialize string input robustness
authorMathieu Desnoyers <mathieu.desnoyers@efficios.com>
Sun, 15 Aug 2010 23:11:37 +0000 (19:11 -0400)
committerPierre-Marc Fournier <pierre-marc.fournier@polymtl.ca>
Mon, 16 Aug 2010 04:13:19 +0000 (00:13 -0400)
Make sure we handle concurrently modified input strings gracefully.

Adds ust_buffers_strncpy interface. It fixes up the string if it does not fills
exactly the space allocated. Ensures the string ends with \0.

Removes unused "_ust_buffers_write" at the same time.

Signed-off-by: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
include/ust/probe.h
libust/buffers.c
libust/buffers.h
libust/serialize.c
libust/tracer.h

index 2c0210070d78b322da684007f0bde95963be36ea..7285a2dfc92cd0fd7509f6fae861604020d12178 100644 (file)
@@ -28,7 +28,9 @@ struct ust_buffer;
 
 typedef size_t (*ltt_serialize_cb)(struct ust_buffer *buf, size_t buf_offset,
                         struct ltt_serialize_closure *closure,
-                        void *serialize_private, int *largest_align,
+                        void *serialize_private,
+                       unsigned int stack_pos_ctx,
+                       int *largest_align,
                         const char *fmt, va_list *args);
 
 struct ltt_available_probe {
index c27b78877df625c33a23af1ff8f6af2f4277ad1b..5d9bb8ec87ab566bdf58fe476e2b8a20f58407de 100644 (file)
@@ -70,28 +70,68 @@ static int get_n_cpus(void)
        return n_cpus;
 }
 
-/* _ust_buffers_write()
+/**
+ * _ust_buffers_strncpy_fixup - Fix an incomplete string in a ltt_relay buffer.
+ * @buf : buffer
+ * @offset : offset within the buffer
+ * @len : length to write
+ * @copied: string actually copied
+ * @terminated: does string end with \0
  *
- * @buf: destination buffer
- * @offset: offset in destination
- * @src: source buffer
- * @len: length of source
- * @cpy: already copied
+ * Fills string with "X" if incomplete.
  */
-
-void _ust_buffers_write(struct ust_buffer *buf, size_t offset,
-        const void *src, size_t len, ssize_t cpy)
+void _ust_buffers_strncpy_fixup(struct ust_buffer *buf, size_t offset,
+                               size_t len, size_t copied, int terminated)
 {
-       do {
-               len -= cpy;
-               src += cpy;
-               offset += cpy;
+       size_t buf_offset, cpy;
+
+       if (copied == len) {
+               /*
+                * Deal with non-terminated string.
+                */
+               assert(!terminated);
+               offset += copied - 1;
+               buf_offset = BUFFER_OFFSET(offset, buf->chan);
+               /*
+                * Underlying layer should never ask for writes across
+                * subbuffers.
+                */
+               assert(buf_offset
+                      < buf->chan->subbuf_size*buf->chan->subbuf_cnt);
+               ust_buffers_do_memset(buf->buf_data + buf_offset, '\0', 1);
+               return;
+       }
 
-               WARN_ON(offset >= buf->buf_size);
+       /*
+        * Deal with incomplete string.
+        * Overwrite string's \0 with X too.
+        */
+       cpy = copied - 1;
+       assert(terminated);
+       len -= cpy;
+       offset += cpy;
+       buf_offset = BUFFER_OFFSET(offset, buf->chan);
+
+       /*
+        * Underlying layer should never ask for writes across subbuffers.
+        */
+       assert(buf_offset
+              < buf->chan->subbuf_size*buf->chan->subbuf_cnt);
 
-               cpy = min_t(size_t, len, buf->buf_size - offset);
-               ust_buffers_do_copy(buf->buf_data + offset, src, cpy);
-       } while (unlikely(len != cpy));
+       ust_buffers_do_memset(buf->buf_data + buf_offset,
+                             'X', len);
+
+       /*
+        * Overwrite last 'X' with '\0'.
+        */
+       offset += len - 1;
+       buf_offset = BUFFER_OFFSET(offset, buf->chan);
+       /*
+        * Underlying layer should never ask for writes across subbuffers.
+        */
+       assert(buf_offset
+              < buf->chan->subbuf_size*buf->chan->subbuf_cnt);
+       ust_buffers_do_memset(buf->buf_data + buf_offset, '\0', 1);
 }
 
 static int ust_buffers_init_buffer(struct ust_trace *trace,
index 4fa6262af1715da97ebd69afecfa4084d61566dd..63449d6c024194fca1341769083a55cdfb93ff4b 100644 (file)
@@ -513,22 +513,90 @@ static __inline__ void ltt_commit_slot(
        ltt_write_commit_counter(chan, buf, endidx, buf_offset, commit_count, data_size);
 }
 
-void _ust_buffers_write(struct ust_buffer *buf, size_t offset,
-        const void *src, size_t len, ssize_t cpy);
+void _ust_buffers_strncpy_fixup(struct ust_buffer *buf, size_t offset,
+                               size_t len, size_t copied, int terminated);
 
 static __inline__ int ust_buffers_write(struct ust_buffer *buf, size_t offset,
         const void *src, size_t len)
 {
-       size_t cpy;
        size_t buf_offset = BUFFER_OFFSET(offset, buf->chan);
 
        assert(buf_offset < buf->chan->subbuf_size*buf->chan->subbuf_cnt);
+       assert(buf_offset + len < buf->chan->subbuf_size*buf->chan->subbuf_cnt);
 
-       cpy = min_t(size_t, len, buf->buf_size - buf_offset);
-       ust_buffers_do_copy(buf->buf_data + buf_offset, src, cpy);
+       ust_buffers_do_copy(buf->buf_data + buf_offset, src, len);
 
-       if (unlikely(len != cpy))
-               _ust_buffers_write(buf, buf_offset, src, len, cpy);
+       return len;
+}
+
+/*
+ * ust_buffers_do_memset - write character into dest.
+ * @dest: destination
+ * @src: source character
+ * @len: length to write
+ */
+static __inline__
+void ust_buffers_do_memset(void *dest, char src, size_t len)
+{
+       /*
+        * What we really want here is an __inline__ memset, but we
+        * don't have constants, so gcc generally uses a function call.
+        */
+       for (; len > 0; len--)
+               *(u8 *)dest++ = src;
+}
+
+/*
+ * ust_buffers_do_strncpy - copy a string up to a certain number of bytes
+ * @dest: destination
+ * @src: source
+ * @len: max. length to copy
+ * @terminated: output string ends with \0 (output)
+ *
+ * returns the number of bytes copied. Does not finalize with \0 if len is
+ * reached.
+ */
+static __inline__
+size_t ust_buffers_do_strncpy(void *dest, const void *src, size_t len,
+                             int *terminated)
+{
+       size_t orig_len = len;
+
+       *terminated = 0;
+       /*
+        * What we really want here is an __inline__ strncpy, but we
+        * don't have constants, so gcc generally uses a function call.
+        */
+       for (; len > 0; len--) {
+               *(u8 *)dest = LOAD_SHARED(*(const u8 *)src);
+               /* Check with dest, because src may be modified concurrently */
+               if (*(const u8 *)dest == '\0') {
+                       len--;
+                       *terminated = 1;
+                       break;
+               }
+               dest++;
+               src++;
+       }
+       return orig_len - len;
+}
+
+static __inline__
+int ust_buffers_strncpy(struct ust_buffer *buf, size_t offset, const void *src,
+                       size_t len)
+{
+       size_t buf_offset = BUFFER_OFFSET(offset, buf->chan);
+       ssize_t copied;
+       int terminated;
+
+       assert(buf_offset < buf->chan->subbuf_size*buf->chan->subbuf_cnt);
+       assert(buf_offset + len < buf->chan->subbuf_size*buf->chan->subbuf_cnt);
+
+       copied = ust_buffers_do_strncpy(buf->buf_data + buf_offset,
+                                       src, len, &terminated);
+       if (unlikely(copied < len || !terminated))
+               _ust_buffers_strncpy_fixup(buf, offset, len, copied,
+                                          terminated);
        return len;
 }
 
index 32df0b96ddbae05c9b4c1765444684418e20980f..95b6e897babeff40b815444710af1646bc77d838 100644 (file)
 #include "usterr.h"
 #include "ust_snprintf.h"
 
+/*
+ * Because UST core defines a non-const PAGE_SIZE, define PAGE_SIZE_STATIC here.
+ * It is just an approximation for the tracer stack.
+ */
+#define PAGE_SIZE_STATIC       4096
+
 enum ltt_type {
        LTT_TYPE_SIGNED_INT,
        LTT_TYPE_UNSIGNED_INT,
@@ -50,6 +56,16 @@ enum ltt_type {
        LTT_TYPE_NONE,
 };
 
+/*
+ * Special stack for the tracer. Keeps serialization offsets for each field.
+ * Per-thread. Deals with reentrancy from signals by simply ensuring that
+ * interrupting signals put the stack back to its original position.
+ */
+#define TRACER_STACK_LEN       (PAGE_SIZE_STATIC / sizeof(unsigned long))
+static unsigned long __thread tracer_stack[TRACER_STACK_LEN];
+
+static unsigned int __thread tracer_stack_pos;
+
 #define LTT_ATTRIBUTE_NETWORK_BYTE_ORDER (1<<1)
 
 /*
@@ -349,7 +365,9 @@ static inline size_t serialize_trace_data(struct ust_buffer *buf,
                size_t buf_offset,
                char trace_size, enum ltt_type trace_type,
                char c_size, enum ltt_type c_type,
-               int *largest_align, va_list *args)
+               unsigned int *stack_pos_ctx,
+               int *largest_align,
+               va_list *args)
 {
        union {
                unsigned long v_ulong;
@@ -405,10 +423,20 @@ static inline size_t serialize_trace_data(struct ust_buffer *buf,
                tmp.v_string.s = va_arg(*args, const char *);
                if ((unsigned long)tmp.v_string.s < PAGE_SIZE)
                        tmp.v_string.s = "<NULL>";
-               tmp.v_string.len = strlen(tmp.v_string.s)+1;
+               if (!buf) {
+                       /*
+                        * Reserve tracer stack entry.
+                        */
+                       tracer_stack_pos++;
+                       assert(tracer_stack_pos <= TRACER_STACK_LEN);
+                       barrier();
+                       tracer_stack[*stack_pos_ctx] =
+                                       strlen(tmp.v_string.s) + 1;
+               }
+               tmp.v_string.len = tracer_stack[(*stack_pos_ctx)++];
                if (buf)
-                       ust_buffers_write(buf, buf_offset, tmp.v_string.s,
-                               tmp.v_string.len);
+                       ust_buffers_strncpy(buf, buf_offset, tmp.v_string.s,
+                                           tmp.v_string.len);
                buf_offset += tmp.v_string.len;
                goto copydone;
        default:
@@ -508,7 +536,9 @@ copydone:
 
 notrace size_t ltt_serialize_data(struct ust_buffer *buf, size_t buf_offset,
                        struct ltt_serialize_closure *closure,
-                       void *serialize_private, int *largest_align,
+                       void *serialize_private,
+                       unsigned int stack_pos_ctx,
+                       int *largest_align,
                        const char *fmt, va_list *args)
 {
        char trace_size = 0, c_size = 0;        /*
@@ -548,7 +578,9 @@ notrace size_t ltt_serialize_data(struct ust_buffer *buf, size_t buf_offset,
                        buf_offset = serialize_trace_data(buf,
                                                buf_offset, trace_size,
                                                trace_type, c_size, c_type,
-                                               largest_align, args);
+                                               &stack_pos_ctx,
+                                               largest_align,
+                                               args);
                        trace_size = 0;
                        c_size = 0;
                        trace_type = LTT_TYPE_NONE;
@@ -566,25 +598,29 @@ notrace size_t ltt_serialize_data(struct ust_buffer *buf, size_t buf_offset,
  * Assume that the padding for alignment starts at a sizeof(void *) address.
  */
 static notrace size_t ltt_get_data_size(struct ltt_serialize_closure *closure,
-                               void *serialize_private, int *largest_align,
+                               void *serialize_private,
+                               unsigned int stack_pos_ctx, int *largest_align,
                                const char *fmt, va_list *args)
 {
        ltt_serialize_cb cb = closure->callbacks[0];
        closure->cb_idx = 0;
        return (size_t)cb(NULL, 0, closure, serialize_private,
-                               largest_align, fmt, args);
+                         stack_pos_ctx, largest_align, fmt, args);
 }
 
 static notrace
 void ltt_write_event_data(struct ust_buffer *buf, size_t buf_offset,
                                struct ltt_serialize_closure *closure,
-                               void *serialize_private, int largest_align,
+                               void *serialize_private,
+                               unsigned int stack_pos_ctx,
+                               int largest_align,
                                const char *fmt, va_list *args)
 {
        ltt_serialize_cb cb = closure->callbacks[0];
        closure->cb_idx = 0;
        buf_offset += ltt_align(buf_offset, largest_align);
-       cb(buf, buf_offset, closure, serialize_private, NULL, fmt, args);
+       cb(buf, buf_offset, closure, serialize_private, stack_pos_ctx, NULL,
+          fmt, args);
 }
 
 
@@ -609,6 +645,7 @@ notrace void ltt_vtrace(const struct marker *mdata, void *probe_data,
        void *serialize_private = NULL;
        int cpu;
        unsigned int rflags;
+       unsigned int stack_pos_ctx;
 
        /*
         * This test is useful for quickly exiting static tracing when no trace
@@ -622,6 +659,7 @@ notrace void ltt_vtrace(const struct marker *mdata, void *probe_data,
 
        /* Force volatile access. */
        STORE_SHARED(ltt_nesting, LOAD_SHARED(ltt_nesting) + 1);
+       stack_pos_ctx = tracer_stack_pos;
        barrier();
 
        pdata = (struct ltt_active_marker *)probe_data;
@@ -642,7 +680,8 @@ notrace void ltt_vtrace(const struct marker *mdata, void *probe_data,
         */
        largest_align = 1;      /* must be non-zero for ltt_align */
        data_size = ltt_get_data_size(&closure, serialize_private,
-                                       &largest_align, fmt, &args_copy);
+                                     stack_pos_ctx, &largest_align,
+                                     fmt, &args_copy);
        largest_align = min_t(int, largest_align, sizeof(void *));
        va_end(args_copy);
 
@@ -698,8 +737,9 @@ notrace void ltt_vtrace(const struct marker *mdata, void *probe_data,
                buf_offset = ltt_write_event_header(channel, buf, buf_offset,
                                        eID, data_size, tsc, rflags);
                ltt_write_event_data(buf, buf_offset, &closure,
-                                       serialize_private,
-                                       largest_align, fmt, &args_copy);
+                                    serialize_private,
+                                    stack_pos_ctx, largest_align,
+                                    fmt, &args_copy);
                va_end(args_copy);
                /* Out-of-order commit */
                ltt_commit_slot(channel, buf, buf_offset, data_size, slot_size);
@@ -707,6 +747,7 @@ notrace void ltt_vtrace(const struct marker *mdata, void *probe_data,
        }
 
        barrier();
+       tracer_stack_pos = stack_pos_ctx;
        STORE_SHARED(ltt_nesting, LOAD_SHARED(ltt_nesting) - 1);
 
        rcu_read_unlock(); //ust// rcu_read_unlock_sched_notrace();
index e61348250ff6de9d685fd77bc8dde8182b46e4fd..c5df6ece9c33a49751596c39134f193a5861a500 100644 (file)
@@ -65,7 +65,8 @@ struct ltt_serialize_closure {
 extern size_t ltt_serialize_data(struct ust_buffer *buf, size_t buf_offset,
                        struct ltt_serialize_closure *closure,
                        void *serialize_private,
-                       int *largest_align, const char *fmt, va_list *args);
+                       unsigned int stack_pos_ctx, int *largest_align,
+                       const char *fmt, va_list *args);
 
 struct ltt_probe_private_data {
        struct ust_trace *trace;        /*
This page took 0.029698 seconds and 4 git commands to generate.