Commit | Line | Data |
---|---|---|
9f36eaed MJ |
1 | /* SPDX-License-Identifier: (GPL-2.0 or LGPL-2.1) |
2 | * | |
e0130fab MD |
3 | * lttng-tracker-pid.c |
4 | * | |
53efbeb8 | 5 | * LTTng Process ID tracking. |
e0130fab MD |
6 | * |
7 | * Copyright (C) 2014 Mathieu Desnoyers <mathieu.desnoyers@efficios.com> | |
e0130fab MD |
8 | */ |
9 | ||
10 | #include <linux/module.h> | |
11 | #include <linux/slab.h> | |
12 | #include <linux/err.h> | |
13 | #include <linux/seq_file.h> | |
14 | #include <linux/stringify.h> | |
e0130fab MD |
15 | #include <linux/hash.h> |
16 | #include <linux/rcupdate.h> | |
17 | ||
241ae9a8 MD |
18 | #include <wrapper/tracepoint.h> |
19 | #include <wrapper/rcu.h> | |
20 | #include <wrapper/list.h> | |
21 | #include <lttng-events.h> | |
e0130fab MD |
22 | |
23 | /* | |
24 | * Hash table is allocated and freed when there are no possible | |
25 | * concurrent lookups (ensured by the alloc/free caller). However, | |
26 | * there can be concurrent RCU lookups vs add/del operations. | |
27 | * | |
28 | * Concurrent updates of the PID hash table are forbidden: the caller | |
29 | * must ensure mutual exclusion. This is currently done by holding the | |
30 | * sessions_mutex across calls to create, destroy, add, and del | |
31 | * functions of this API. | |
32 | */ | |
7e6f9ef6 MD |
33 | int lttng_pid_tracker_get_node_pid(const struct lttng_pid_hash_node *node) |
34 | { | |
35 | return node->pid; | |
36 | } | |
e0130fab MD |
37 | |
38 | /* | |
39 | * Lookup performed from RCU read-side critical section (RCU sched), | |
40 | * protected by preemption off at the tracepoint call site. | |
41 | * Return 1 if found, 0 if not found. | |
42 | */ | |
43 | bool lttng_pid_tracker_lookup(struct lttng_pid_tracker *lpf, int pid) | |
44 | { | |
45 | struct hlist_head *head; | |
46 | struct lttng_pid_hash_node *e; | |
47 | uint32_t hash = hash_32(pid, 32); | |
48 | ||
49 | head = &lpf->pid_hash[hash & (LTTNG_PID_TABLE_SIZE - 1)]; | |
7a09dcb7 | 50 | lttng_hlist_for_each_entry_rcu(e, head, hlist) { |
e0130fab MD |
51 | if (pid == e->pid) |
52 | return 1; /* Found */ | |
53 | } | |
54 | return 0; | |
55 | } | |
56 | EXPORT_SYMBOL_GPL(lttng_pid_tracker_lookup); | |
57 | ||
58 | /* | |
59 | * Tracker add and del operations support concurrent RCU lookups. | |
60 | */ | |
61 | int lttng_pid_tracker_add(struct lttng_pid_tracker *lpf, int pid) | |
62 | { | |
63 | struct hlist_head *head; | |
64 | struct lttng_pid_hash_node *e; | |
65 | uint32_t hash = hash_32(pid, 32); | |
66 | ||
67 | head = &lpf->pid_hash[hash & (LTTNG_PID_TABLE_SIZE - 1)]; | |
f934e302 | 68 | lttng_hlist_for_each_entry(e, head, hlist) { |
e0130fab MD |
69 | if (pid == e->pid) |
70 | return -EEXIST; | |
71 | } | |
72 | e = kmalloc(sizeof(struct lttng_pid_hash_node), GFP_KERNEL); | |
73 | if (!e) | |
74 | return -ENOMEM; | |
75 | e->pid = pid; | |
76 | hlist_add_head_rcu(&e->hlist, head); | |
77 | return 0; | |
78 | } | |
79 | ||
80 | static | |
81 | void pid_tracker_del_node_rcu(struct lttng_pid_hash_node *e) | |
82 | { | |
83 | hlist_del_rcu(&e->hlist); | |
84 | /* | |
85 | * We choose to use a heavyweight synchronize on removal here, | |
86 | * since removal of a PID from the tracker mask is a rare | |
87 | * operation, and we don't want to use more cache lines than | |
88 | * what we really need when doing the PID lookups, so we don't | |
89 | * want to afford adding a rcu_head field to those pid hash | |
90 | * node. | |
91 | */ | |
92 | synchronize_trace(); | |
93 | kfree(e); | |
94 | } | |
95 | ||
96 | /* | |
97 | * This removal is only used on destroy, so it does not need to support | |
98 | * concurrent RCU lookups. | |
99 | */ | |
100 | static | |
101 | void pid_tracker_del_node(struct lttng_pid_hash_node *e) | |
102 | { | |
103 | hlist_del(&e->hlist); | |
104 | kfree(e); | |
105 | } | |
106 | ||
107 | int lttng_pid_tracker_del(struct lttng_pid_tracker *lpf, int pid) | |
108 | { | |
109 | struct hlist_head *head; | |
110 | struct lttng_pid_hash_node *e; | |
111 | uint32_t hash = hash_32(pid, 32); | |
112 | ||
113 | head = &lpf->pid_hash[hash & (LTTNG_PID_TABLE_SIZE - 1)]; | |
114 | /* | |
115 | * No need of _safe iteration, because we stop traversal as soon | |
116 | * as we remove the entry. | |
117 | */ | |
f934e302 | 118 | lttng_hlist_for_each_entry(e, head, hlist) { |
e0130fab MD |
119 | if (pid == e->pid) { |
120 | pid_tracker_del_node_rcu(e); | |
121 | return 0; | |
122 | } | |
123 | } | |
124 | return -ENOENT; /* Not found */ | |
125 | } | |
126 | ||
127 | struct lttng_pid_tracker *lttng_pid_tracker_create(void) | |
128 | { | |
129 | return kzalloc(sizeof(struct lttng_pid_tracker), GFP_KERNEL); | |
130 | } | |
131 | ||
132 | void lttng_pid_tracker_destroy(struct lttng_pid_tracker *lpf) | |
133 | { | |
134 | int i; | |
135 | ||
136 | for (i = 0; i < LTTNG_PID_TABLE_SIZE; i++) { | |
137 | struct hlist_head *head = &lpf->pid_hash[i]; | |
138 | struct lttng_pid_hash_node *e; | |
139 | struct hlist_node *tmp; | |
140 | ||
f934e302 | 141 | lttng_hlist_for_each_entry_safe(e, tmp, head, hlist) |
e0130fab MD |
142 | pid_tracker_del_node(e); |
143 | } | |
144 | kfree(lpf); | |
145 | } |