| 1 | /* SPDX-License-Identifier: (GPL-2.0-only or LGPL-2.1-only) |
| 2 | * |
| 3 | * wrapper/uaccess.h |
| 4 | * |
| 5 | * wrapper around linux/uaccess.h. |
| 6 | * |
| 7 | * Copyright (C) 2019 Michael Jeanson <mjeanson@efficios.com> |
| 8 | */ |
| 9 | |
| 10 | #ifndef _LTTNG_WRAPPER_UACCESS_H |
| 11 | #define _LTTNG_WRAPPER_UACCESS_H |
| 12 | |
| 13 | #include <linux/uaccess.h> |
| 14 | #include <wrapper/bitops.h> |
| 15 | #include <lttng/kernel-version.h> |
| 16 | |
| 17 | #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(5,0,0) || \ |
| 18 | LTTNG_RHEL_KERNEL_RANGE(4,18,0,147,0,0, 4,19,0,0,0,0)) |
| 19 | |
| 20 | #define VERIFY_READ 0 |
| 21 | #define VERIFY_WRITE 1 |
| 22 | #define lttng_access_ok(type, addr, size) access_ok(addr, size) |
| 23 | |
| 24 | #else /* LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(5,0,0) */ |
| 25 | |
| 26 | #define lttng_access_ok(type, addr, size) access_ok(type, addr, size) |
| 27 | |
| 28 | #endif /* LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(5,0,0) */ |
| 29 | |
| 30 | #if LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(5,4,0) |
| 31 | static __always_inline __must_check int |
| 32 | lttng_copy_struct_from_user(void *dst, size_t ksize, const void __user *src, |
| 33 | size_t usize) |
| 34 | { |
| 35 | return copy_struct_from_user(dst, ksize, src, usize); |
| 36 | } |
| 37 | #else /* LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(5,4,0) */ |
| 38 | /** |
| 39 | * lttng_check_zeroed_user: check if a userspace buffer only contains zero bytes |
| 40 | * @from: Source address, in userspace. |
| 41 | * @size: Size of buffer. |
| 42 | * |
| 43 | * This is effectively shorthand for "memchr_inv(from, 0, size) == NULL" for |
| 44 | * userspace addresses (and is more efficient because we don't care where the |
| 45 | * first non-zero byte is). |
| 46 | * |
| 47 | * Returns: |
| 48 | * * 0: There were non-zero bytes present in the buffer. |
| 49 | * * 1: The buffer was full of zero bytes. |
| 50 | * * -EFAULT: access to userspace failed. |
| 51 | */ |
| 52 | static inline |
| 53 | int lttng_check_zeroed_user(const void __user *from, size_t size) |
| 54 | { |
| 55 | unsigned long val; |
| 56 | uintptr_t align = (uintptr_t) from % sizeof(unsigned long); |
| 57 | int ret; |
| 58 | |
| 59 | if (unlikely(size == 0)) |
| 60 | return 1; |
| 61 | |
| 62 | from -= align; |
| 63 | size += align; |
| 64 | |
| 65 | if (!lttng_access_ok(VERIFY_READ, from, size)) |
| 66 | return -EFAULT; |
| 67 | |
| 68 | ret = get_user(val, (unsigned long __user *) from); |
| 69 | if (ret) |
| 70 | return ret; |
| 71 | if (align) |
| 72 | val &= ~lttng_aligned_byte_mask(align); |
| 73 | |
| 74 | while (size > sizeof(unsigned long)) { |
| 75 | if (unlikely(val)) |
| 76 | goto done; |
| 77 | |
| 78 | from += sizeof(unsigned long); |
| 79 | size -= sizeof(unsigned long); |
| 80 | |
| 81 | ret = get_user(val, (unsigned long __user *) from); |
| 82 | if (ret) |
| 83 | return ret; |
| 84 | } |
| 85 | |
| 86 | if (size < sizeof(unsigned long)) |
| 87 | val &= lttng_aligned_byte_mask(size); |
| 88 | |
| 89 | done: |
| 90 | return (val == 0); |
| 91 | } |
| 92 | |
| 93 | static __always_inline __must_check int |
| 94 | lttng_copy_struct_from_user(void *dst, size_t ksize, const void __user *src, |
| 95 | size_t usize) |
| 96 | { |
| 97 | size_t size = min(ksize, usize); |
| 98 | size_t rest = max(ksize, usize) - size; |
| 99 | |
| 100 | /* Deal with trailing bytes. */ |
| 101 | if (usize < ksize) { |
| 102 | memset(dst + size, 0, rest); |
| 103 | } else if (usize > ksize) { |
| 104 | int ret = lttng_check_zeroed_user(src + size, rest); |
| 105 | if (ret <= 0) |
| 106 | return ret ?: -E2BIG; |
| 107 | } |
| 108 | /* Copy the interoperable parts of the struct. */ |
| 109 | if (copy_from_user(dst, src, size)) |
| 110 | return -EFAULT; |
| 111 | return 0; |
| 112 | } |
| 113 | #endif /* LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(5,4,0) */ |
| 114 | |
| 115 | #endif /* _LTTNG_WRAPPER_UACCESS_H */ |