summary |
shortlog |
log |
commit | commitdiff |
tree
raw |
patch |
inline | side by side (from parent 1:
8c51c31)
This patch fixes a security issue which allows the root user to read
arbitrary kernel memory. Considering the security model used in LTTng
userspace tooling for kernel tracing, this bug also allows members of
the 'tracing' group to read arbitrary kernel memory.
Calls to __copy_from_user_inatomic() where wrongly enclosed in
set_fs(KERNEL_DS) defeating the access_ok() calls and allowing to read
from kernel memory if a kernel address is provided.
Remove all set_fs() calls around __copy_from_user_inatomic().
As a side effect this will allow us to support v5.10 which should remove
set_fs().
Signed-off-by: Michael Jeanson <mjeanson@efficios.com>
Signed-off-by: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
Change-Id: I35e4562c835217352c012ed96a7b8f93e941381e
size_t offset = ctx->buf_offset;
struct lib_ring_buffer_backend_pages *backend_pages;
unsigned long ret;
size_t offset = ctx->buf_offset;
struct lib_ring_buffer_backend_pages *backend_pages;
unsigned long ret;
- mm_segment_t old_fs = get_fs();
if (unlikely(!len))
return;
if (unlikely(!len))
return;
index = (offset & (chanb->subbuf_size - 1)) >> PAGE_SHIFT;
pagecpy = min_t(size_t, len, (-offset) & ~PAGE_MASK);
index = (offset & (chanb->subbuf_size - 1)) >> PAGE_SHIFT;
pagecpy = min_t(size_t, len, (-offset) & ~PAGE_MASK);
pagefault_disable();
if (unlikely(!lttng_access_ok(VERIFY_READ, src, len)))
goto fill_buffer;
pagefault_disable();
if (unlikely(!lttng_access_ok(VERIFY_READ, src, len)))
goto fill_buffer;
_lib_ring_buffer_copy_from_user_inatomic(bufb, offset, src, len, 0);
}
pagefault_enable();
_lib_ring_buffer_copy_from_user_inatomic(bufb, offset, src, len, 0);
}
pagefault_enable();
ctx->buf_offset += len;
return;
fill_buffer:
pagefault_enable();
ctx->buf_offset += len;
return;
fill_buffer:
pagefault_enable();
/*
* In the error path we call the slow path version to avoid
* the pollution of static inline code.
/*
* In the error path we call the slow path version to avoid
* the pollution of static inline code.
size_t index, pagecpy;
size_t offset = ctx->buf_offset;
struct lib_ring_buffer_backend_pages *backend_pages;
size_t index, pagecpy;
size_t offset = ctx->buf_offset;
struct lib_ring_buffer_backend_pages *backend_pages;
- mm_segment_t old_fs = get_fs();
if (unlikely(!len))
return;
if (unlikely(!len))
return;
index = (offset & (chanb->subbuf_size - 1)) >> PAGE_SHIFT;
pagecpy = min_t(size_t, len, (-offset) & ~PAGE_MASK);
index = (offset & (chanb->subbuf_size - 1)) >> PAGE_SHIFT;
pagecpy = min_t(size_t, len, (-offset) & ~PAGE_MASK);
pagefault_disable();
if (unlikely(!lttng_access_ok(VERIFY_READ, src, len)))
goto fill_buffer;
pagefault_disable();
if (unlikely(!lttng_access_ok(VERIFY_READ, src, len)))
goto fill_buffer;
len, 0, pad);
}
pagefault_enable();
len, 0, pad);
}
pagefault_enable();
ctx->buf_offset += len;
return;
fill_buffer:
pagefault_enable();
ctx->buf_offset += len;
return;
fill_buffer:
pagefault_enable();
/*
* In the error path we call the slow path version to avoid
* the pollution of static inline code.
/*
* In the error path we call the slow path version to avoid
* the pollution of static inline code.
unsigned long len)
{
unsigned long ret;
unsigned long len)
{
unsigned long ret;
if (!lttng_access_ok(VERIFY_READ, src, len))
return 1;
if (!lttng_access_ok(VERIFY_READ, src, len))
return 1;
- old_fs = get_fs();
- set_fs(KERNEL_DS);
pagefault_disable();
ret = __copy_from_user_inatomic(dest, src, len);
pagefault_enable();
pagefault_disable();
ret = __copy_from_user_inatomic(dest, src, len);
pagefault_enable();
int stack_star_glob_match(struct estack *stack, int top, const char *cmp_type)
{
bool has_user = false;
int stack_star_glob_match(struct estack *stack, int top, const char *cmp_type)
{
bool has_user = false;
int result;
struct estack_entry *pattern_reg;
struct estack_entry *candidate_reg;
int result;
struct estack_entry *pattern_reg;
struct estack_entry *candidate_reg;
+ /* Disable the page fault handler when reading from userspace. */
if (estack_bx(stack, top)->u.s.user
|| estack_ax(stack, top)->u.s.user) {
has_user = true;
if (estack_bx(stack, top)->u.s.user
|| estack_ax(stack, top)->u.s.user) {
has_user = true;
- old_fs = get_fs();
- set_fs(KERNEL_DS);
/* Perform the match operation. */
result = !strutils_star_glob_match_char_cb(get_char_at_cb,
pattern_reg, get_char_at_cb, candidate_reg);
/* Perform the match operation. */
result = !strutils_star_glob_match_char_cb(get_char_at_cb,
pattern_reg, get_char_at_cb, candidate_reg);
{
size_t offset_bx = 0, offset_ax = 0;
int diff, has_user = 0;
{
size_t offset_bx = 0, offset_ax = 0;
int diff, has_user = 0;
if (estack_bx(stack, top)->u.s.user
|| estack_ax(stack, top)->u.s.user) {
has_user = 1;
if (estack_bx(stack, top)->u.s.user
|| estack_ax(stack, top)->u.s.user) {
has_user = 1;
- old_fs = get_fs();
- set_fs(KERNEL_DS);
offset_bx++;
offset_ax++;
}
offset_bx++;
offset_ax++;
}
long lttng_strlen_user_inatomic(const char *addr)
{
long count = 0;
long lttng_strlen_user_inatomic(const char *addr)
{
long count = 0;
- old_fs = get_fs();
- set_fs(KERNEL_DS);
pagefault_disable();
for (;;) {
char v;
pagefault_disable();
for (;;) {
char v;
addr++;
}
pagefault_enable();
addr++;
}
pagefault_enable();
return count;
}
EXPORT_SYMBOL_GPL(lttng_strlen_user_inatomic);
return count;
}
EXPORT_SYMBOL_GPL(lttng_strlen_user_inatomic);