fix: kvm instrumentation for CentOS >= 4.18.0-147
[lttng-modules.git] / lttng-tp-mempool.c
1 /* SPDX-License-Identifier: (GPL-2.0 or LGPL-2.1)
2 *
3 * lttng-tp-mempool.c
4 *
5 * Copyright (C) 2018 Julien Desfossez <jdesfossez@efficios.com>
6 */
7
8 #include <linux/slab.h>
9 #include <linux/percpu.h>
10
11 #include <lttng-tp-mempool.h>
12
13 struct lttng_tp_buf_entry {
14 int cpu; /* To make sure we return the entry to the right pool. */
15 char buf[LTTNG_TP_MEMPOOL_BUF_SIZE];
16 struct list_head list;
17 };
18
19 /*
20 * No exclusive access strategy for now, this memory pool is currently only
21 * used from a non-preemptible context, and the interrupt tracepoint probes do
22 * not use this facility.
23 */
24 struct per_cpu_buf {
25 struct list_head free_list; /* Free struct lttng_tp_buf_entry. */
26 };
27
28 static struct per_cpu_buf __percpu *pool; /* Per-cpu buffer. */
29
30 int lttng_tp_mempool_init(void)
31 {
32 int ret, cpu;
33
34 /* The pool is only supposed to be allocated once. */
35 if (pool) {
36 WARN_ON_ONCE(1);
37 ret = -1;
38 goto end;
39 }
40
41 pool = alloc_percpu(struct per_cpu_buf);
42 if (!pool) {
43 ret = -ENOMEM;
44 goto end;
45 }
46
47 for_each_possible_cpu(cpu) {
48 struct per_cpu_buf *cpu_buf = per_cpu_ptr(pool, cpu);
49
50 INIT_LIST_HEAD(&cpu_buf->free_list);
51 }
52
53 for_each_possible_cpu(cpu) {
54 int i;
55 struct per_cpu_buf *cpu_buf = per_cpu_ptr(pool, cpu);
56
57 for (i = 0; i < LTTNG_TP_MEMPOOL_NR_BUF_PER_CPU; i++) {
58 struct lttng_tp_buf_entry *entry;
59
60 entry = kzalloc_node(sizeof(struct lttng_tp_buf_entry),
61 GFP_KERNEL, cpu_to_node(cpu));
62 if (!entry) {
63 ret = -ENOMEM;
64 goto error_free_pool;
65 }
66 entry->cpu = cpu;
67 list_add_tail(&entry->list, &cpu_buf->free_list);
68 }
69 }
70
71 ret = 0;
72 goto end;
73
74 error_free_pool:
75 lttng_tp_mempool_destroy();
76 end:
77 return ret;
78 }
79
80 void lttng_tp_mempool_destroy(void)
81 {
82 int cpu;
83
84 if (!pool) {
85 return;
86 }
87
88 for_each_possible_cpu(cpu) {
89 struct per_cpu_buf *cpu_buf = per_cpu_ptr(pool, cpu);
90 struct lttng_tp_buf_entry *entry, *tmp;
91 int i = 0;
92
93 list_for_each_entry_safe(entry, tmp, &cpu_buf->free_list, list) {
94 list_del(&entry->list);
95 kfree(entry);
96 i++;
97 }
98 if (i < LTTNG_TP_MEMPOOL_NR_BUF_PER_CPU) {
99 printk(KERN_WARNING "Leak detected in tp-mempool\n");
100 }
101 }
102 free_percpu(pool);
103 pool = NULL;
104 }
105
106 void *lttng_tp_mempool_alloc(size_t size)
107 {
108 void *ret;
109 struct lttng_tp_buf_entry *entry;
110 struct per_cpu_buf *cpu_buf;
111 int cpu = smp_processor_id();
112
113 if (size > LTTNG_TP_MEMPOOL_BUF_SIZE) {
114 ret = NULL;
115 goto end;
116 }
117
118 cpu_buf = per_cpu_ptr(pool, cpu);
119 if (list_empty(&cpu_buf->free_list)) {
120 ret = NULL;
121 goto end;
122 }
123
124 entry = list_first_entry(&cpu_buf->free_list, struct lttng_tp_buf_entry, list);
125 /* Remove the entry from the free list. */
126 list_del(&entry->list);
127
128 memset(entry->buf, 0, LTTNG_TP_MEMPOOL_BUF_SIZE);
129
130 ret = (void *) entry->buf;
131
132 end:
133 return ret;
134 }
135
136 void lttng_tp_mempool_free(void *ptr)
137 {
138 struct lttng_tp_buf_entry *entry;
139 struct per_cpu_buf *cpu_buf;
140
141 if (!ptr)
142 goto end;
143 entry = container_of(ptr, struct lttng_tp_buf_entry, buf);
144 cpu_buf = per_cpu_ptr(pool, entry->cpu);
145 if (!cpu_buf)
146 goto end;
147 /* Add it to the free list. */
148 list_add_tail(&entry->list, &cpu_buf->free_list);
149
150 end:
151 return;
152 }
This page took 0.033015 seconds and 4 git commands to generate.