Now with more sandwich
[lttv.git] / contrib / fsm_checker / LOCK_CHECK / fsm_locking.c
1 #include <lttv/lttv.h>
2 #include <lttv/option.h>
3 #include <lttv/module.h>
4 #include <lttv/hook.h>
5 #include <lttv/attribute.h>
6 #include <lttv/iattribute.h>
7 #include <lttv/stats.h>
8 #include <lttv/filter.h>
9 #include <ltt/ltt.h>
10 #include <ltt/event.h>
11 #include <ltt/trace.h>
12 #include <stdio.h>
13 #include "fsm_locking_sm.h"
14 #include "lockclass.h"
15
16
17 int NUM_OF_CPUS;
18 //one hashing table for all locks
19 GHashTable *locksTable;
20 GArray *fsm_array;
21 int total_acquire;
22 //Global timestamps very useful for debugging
23 long ts_sec;
24 long ts_ns;
25
26 void lockclass_clearlock(struct lockclass *fsm, struct lockstruct *lock){
27 struct lockstruct *temp;
28
29 temp = g_hash_table_lookup(locksTable,(gconstpointer)lock->lock_add);
30 if(temp!=NULL){
31 temp->taken_irqs_on=0;
32 temp->taken_irqs_off=0;
33 temp->hardirq_context =0;
34 }
35 }
36
37 static gboolean lockdep_init_map(void *hook_data, void *call_data){
38 LttvTracefileState *s = (LttvTracefileState *) call_data;
39 int cpu = s->cpu;
40 LttEvent *e = ltt_tracefile_get_event(s->parent.tf);
41 LttvTraceHook *th = (LttvTraceHook *)hook_data;
42
43 struct marker_field *f = lttv_trace_get_hook_field(th, 0);
44 guint32 lock_add = ltt_event_get_long_unsigned(e, f);
45
46 //TODO:the lock_add being initialized by lockdep should not be present in stack
47 struct lockclass *fsm = g_array_index(fsm_array,struct lockclass *, cpu);
48
49 struct lockstruct *lock = g_hash_table_lookup(locksTable,(gconstpointer)lock_add);
50 if(lock==NULL)
51 return FALSE;
52
53 LttTime time = ltt_event_time(e);
54 ts_sec=(long)time.tv_sec;
55 ts_ns=(long)time.tv_nsec;
56
57 lockclassContext_free_lock(&fsm->_fsm, lock);
58 return FALSE;
59 }
60 static gboolean kernel_sched_schedule(void *hook_data, void *call_data){
61 LttvTracefileState *s = (LttvTracefileState *) call_data;
62 //printf("event sched_schedule encountered on cpu: %d\n", s->cpu);
63 //work should be done per processor
64 int cpu = s->cpu;
65 //parse event here
66 LttEvent *e = ltt_tracefile_get_event(s->parent.tf);
67 LttvTraceHook *th = (LttvTraceHook *)hook_data;
68
69 struct marker_field *f = lttv_trace_get_hook_field(th, 0);
70 guint32 prev_pid = ltt_event_get_long_unsigned(e, f);
71
72 f = lttv_trace_get_hook_field(th, 1);
73 guint32 next_pid = ltt_event_get_long_unsigned(e, f);
74
75 LttTime time = ltt_event_time(e);
76 ts_sec=(long)time.tv_sec;
77 ts_ns=(long)time.tv_nsec;
78
79 struct lockclass *fsm = g_array_index(fsm_array,struct lockclass *, cpu);
80
81 lockclassContext_schedule_out(&fsm->_fsm, prev_pid);
82 return FALSE;
83 }
84
85
86 static gboolean lockdep_lock_acquire(void *hook_data, void *call_data){
87 total_acquire++;
88 LttvTracefileState *s = (LttvTracefileState *) call_data;
89 int cpu = s->cpu;
90
91 //parse event here
92 LttEvent *e = ltt_tracefile_get_event(s->parent.tf);
93 LttvTraceHook *th = (LttvTraceHook *)hook_data;
94
95 struct marker_field *f = lttv_trace_get_hook_field(th, 2);
96 guint32 lock_add = ltt_event_get_long_unsigned(e, f);
97
98 f= lttv_trace_get_hook_field(th, 5);
99 int hardirqs_off = ltt_event_get_long_unsigned(e, f);
100
101 f=lttv_trace_get_hook_field(th, 0);
102 guint32 ret_add = ltt_event_get_long_unsigned(e, f);
103
104 f=lttv_trace_get_hook_field(th,4);
105 int read = ltt_event_get_long_unsigned(e, f);
106
107 f=lttv_trace_get_hook_field(th, 6);
108 int hardirq_context = ltt_event_get_long_unsigned(e, f);
109
110 LttvTraceState *ts = (LttvTraceState*) s->parent.t_context;
111 LttvProcessState *process = ts->running_process[cpu];
112 int pid = process->pid;
113
114 LttTime time = ltt_event_time(e);
115 ts_sec=(long)time.tv_sec;
116 ts_ns=(long)time.tv_nsec;
117
118
119 //filter rwlock_acquire_read and rwsem_acquire_read
120 if(read==2 || read==1)
121 return FALSE;
122 //read needs to be 0: spin_acquire, rwlock_acquire, mutex_acquire, rwsem_acquire & lock_map_acquire
123 struct lockclass *fsm = g_array_index(fsm_array,struct lockclass *, cpu);
124
125 struct lockstruct *lock = g_hash_table_lookup(locksTable,(gconstpointer)lock_add);
126 if(lock==NULL){
127 lock = lockstruct_Init(lock_add);
128 //add lock to table
129 g_hash_table_insert(locksTable, (gpointer)lock_add,lock);
130 }
131 //lock->pid = pid;//update pid
132 lockclassContext_acquire_lock(&fsm->_fsm,lock, lock->lock_add, hardirqs_off, hardirq_context, pid);
133
134 return FALSE;
135 }
136 static gboolean lockdep_lock_release(void *hook_data, void *call_data){
137 LttvTracefileState *s = (LttvTracefileState *) call_data;
138 int cpu = s->cpu;
139
140 //parse event here
141 LttEvent *e = ltt_tracefile_get_event(s->parent.tf);
142 LttvTraceHook *th = (LttvTraceHook *)hook_data;
143 struct marker_field *f = lttv_trace_get_hook_field(th, 1);
144 guint32 lock_add = ltt_event_get_long_unsigned(e, f);
145
146 LttvTraceState *ts = (LttvTraceState*) s->parent.t_context;
147 LttvProcessState *process = ts->running_process[cpu];
148 int pid = process->pid;
149
150 LttTime time = ltt_event_time(e);
151 ts_sec=(long)time.tv_sec;
152 ts_ns=(long)time.tv_nsec;
153
154 struct lockstruct *lock = g_hash_table_lookup(locksTable,(gconstpointer)lock_add);
155
156 if(lock==NULL)
157 return FALSE;
158
159 struct lockclass *fsm = g_array_index(fsm_array,struct lockclass *, cpu);
160
161 lockclassContext_release_lock(&fsm->_fsm, lock);
162 return FALSE;
163 }
164 void printlock(struct lockstruct * lock){
165 printf("Lock 0x%x: held_irqs_on: %d held_irqs_off %d irqcontext: %d pid: %u\n",
166 lock->lock_add, lock->taken_irqs_on,
167 lock->taken_irqs_off, lock->hardirq_context,
168 lock->pid);
169 }
170
171 void lockclass_printstack(struct lockclass *fsm){
172 GArray *localstack = fsm->local_stack;
173 int len = localstack->len;
174 int i;
175 struct lockstruct *lock;
176 for(i=0; i<len; i++){
177 lock = g_array_index(localstack, struct lockstruct *, i);
178 printlock(lock);
179 }
180 }
181 void lockclass_schedule_err(struct lockclass *fsm, guint32 pid){
182 printf("process %u was scheduled out\n",pid);
183 }
184
185
186 static int add_events_by_id_hooks(void *hook_data, void *call_data){
187 LttvTraceContext *tc = (LttvTraceContext *) call_data;
188 LttTrace *t = tc->t;
189
190 //FIND NUMBER OF CPUS
191 NUM_OF_CPUS = ltt_trace_get_num_cpu(t);
192
193 // EVENT ***LOCKDEP_LOCK_ACQUIRE***
194 GQuark LTT_FACILITY_LOCKDEP = g_quark_from_string("lockdep");
195 GQuark LTT_EVENT_LOCK_ACQUIRE = g_quark_from_string("lock_acquire");
196
197 GQuark LTT_FIELD_RET_ADDRESS = g_quark_from_string("retaddr");
198 GQuark LTT_FIELD_SUBCLASS = g_quark_from_string("subclass");
199 GQuark LTT_FIELD_LOCK = g_quark_from_string("lock");
200 GQuark LTT_FIELD_TRYLOCK = g_quark_from_string("trylock");
201 GQuark LTT_FIELD_READ = g_quark_from_string("read");
202 GQuark LTT_FIELD_HARD_IRQS_OFF = g_quark_from_string("hardirqs_off");
203 GQuark LTT_FIELD_HARDIRQ_CONTEXT = g_quark_from_string("hardirq_context");
204 //*******************************************************************
205
206 // EVENT ***KERNEL_SCHED_SCHEDULE
207 GQuark LTT_FACILITY_KERNEL = g_quark_from_string("kernel");
208 GQuark LTT_EVENT_SCHED_SCHEDULE = g_quark_from_string("sched_schedule");
209
210 GQuark LTT_FIELD_PREV_PID = g_quark_from_string("prev_pid");
211 GQuark LTT_FIELD_NEXT_PID = g_quark_from_string("next_pid");
212 GQuark LTT_FIELD_PREV_STATE = g_quark_from_string("prev_state");
213 //*******************************************************************
214 // EVENT ***LOCKDEP_LOCK_RELEASE***
215 GQuark LTT_EVENT_LOCK_RELEASE = g_quark_from_string("lock_release");
216
217 GQuark LTT_FIELD_NESTED = g_quark_from_string("nested");
218
219 //*******************************************************************
220 // EVENT ***LOCKDEP_INIT_MAP***
221 GQuark LTT_EVENT_INIT_MAP = g_quark_from_string("init_map");
222
223 //*******************************************************************
224
225 // EVENT ***...
226
227 //...
228
229 //*******************************************************************
230
231 //# of hooks to register = # of desired events
232 GArray *hooks = g_array_sized_new(FALSE, FALSE, sizeof(LttvTraceHook), 3 );
233
234 lttv_trace_find_hook(t, LTT_FACILITY_LOCKDEP, LTT_EVENT_LOCK_ACQUIRE,
235 FIELD_ARRAY(LTT_FIELD_RET_ADDRESS, LTT_FIELD_SUBCLASS, LTT_FIELD_LOCK, LTT_FIELD_TRYLOCK,
236 LTT_FIELD_READ, LTT_FIELD_HARD_IRQS_OFF, LTT_FIELD_HARDIRQ_CONTEXT),
237 lockdep_lock_acquire,
238 NULL,
239 &hooks);
240
241 lttv_trace_find_hook(t, LTT_FACILITY_KERNEL, LTT_EVENT_SCHED_SCHEDULE,
242 FIELD_ARRAY(LTT_FIELD_PREV_PID, LTT_FIELD_NEXT_PID, LTT_FIELD_PREV_STATE),
243 kernel_sched_schedule,
244 NULL,
245 &hooks);
246
247 lttv_trace_find_hook(t, LTT_FACILITY_LOCKDEP, LTT_EVENT_LOCK_RELEASE,
248 FIELD_ARRAY(LTT_FIELD_RET_ADDRESS, LTT_FIELD_LOCK, LTT_FIELD_NESTED),
249 lockdep_lock_release,
250 NULL,
251 &hooks);
252
253 lttv_trace_find_hook(t, LTT_FACILITY_LOCKDEP, LTT_EVENT_INIT_MAP,
254 FIELD_ARRAY(LTT_FIELD_LOCK),
255 lockdep_init_map,
256 NULL,
257 &hooks);
258 //find remaining hooks here to fill the hooks array
259 //...
260
261 //LttvTraceHook *th = &g_array_index(hooks, LttvTraceHook, 0);
262
263
264 //Determine id of needed events
265 GQuark evQuark_lock_acquire = g_quark_try_string("lockdep_lock_acquire");
266 struct marker_info *info_lock_acquire = marker_get_info_from_name(t, evQuark_lock_acquire);
267 int ev_id_lock_acquire = marker_get_id_from_info(t, info_lock_acquire);
268
269 //printf("id of event of interest: %d\n", ev_id_lock_acquire);
270
271 //when multiple tracefiles exists:
272 int nb_tracefiles = tc->tracefiles->len;
273 int i, j;
274 LttvTracefileContext **tfc;
275 LttvHooks *needed_hooks;
276 LttvTraceHook *th = (LttvTraceHook *)hook_data;
277 for(i=0; i<nb_tracefiles; i++){
278 tfc = &g_array_index(tc->tracefiles, LttvTracefileContext*, i);
279 //printf("adding hooks for tracefile #%d\n",i);
280 //add all needed hooks
281 for(j=0; j<hooks->len; j++)
282 {
283 th=&g_array_index(hooks, LttvTraceHook, j);
284 needed_hooks = lttv_hooks_by_id_find((*tfc)->event_by_id, th->id);
285 lttv_hooks_add(needed_hooks, th->h, th, LTTV_PRIO_DEFAULT);
286 //printf("hooked with event id: %d\n", th->id);
287 }
288 }
289
290 fsm_array = g_array_sized_new(FALSE, FALSE, sizeof(struct lockclass *), NUM_OF_CPUS);
291
292 struct lockclass *class;
293 for(i=0; i<NUM_OF_CPUS; i++){
294 class = lockclass_Init(i);
295 g_array_append_val(fsm_array, class);
296 }
297 locksTable = g_hash_table_new(g_direct_hash, NULL);
298 total_acquire=0;
299 }
300 //function to be moved to lockclass when testing is done:
301 void lockclass_updatelock(struct lockclass *fsm, struct lockstruct *lock, guint32 lock_add, int pid, int hardirqs_off, int hardirq_context){
302 struct lockstruct *temp = g_hash_table_lookup(locksTable,(gconstpointer)lock_add);
303
304 if(temp==NULL)
305 printf("Attemping to update an uninitialized lock");
306 if(temp!=NULL){
307 temp->pid = pid;
308 if(hardirq_context==1){
309 temp->hardirq_context=1;
310 temp->hardirq_context_ts_sec=ts_sec;
311 temp->hardirq_context_ts_ns=ts_ns;
312 }
313 if(hardirqs_off==1){
314 temp->taken_irqs_off=1;
315 temp->taken_irqs_off_ts_sec=ts_sec;
316 temp->taken_irqs_off_ts_ns=ts_ns;
317 }
318 else if(hardirqs_off==0){
319 temp->taken_irqs_on=1;
320 temp->taken_irqs_on_ts_sec=ts_sec;
321 temp->taken_irqs_on_ts_ns=ts_ns;
322 }
323
324 }
325 }
326 static void init(){
327
328 gboolean result;
329
330 LttvAttributeValue value;
331
332 LttvIAttribute *attributes = LTTV_IATTRIBUTE(lttv_global_attributes());
333
334 static LttvHooks *before_trace;
335
336 result = lttv_iattribute_find_by_path(attributes, "hooks/trace/before", LTTV_POINTER, &value);
337 g_assert(result);
338 before_trace = *(value.v_pointer);
339 g_assert(before_trace);
340 //Register add_events_by_id_hook to be called before starting to read the trace
341 lttv_hooks_add(before_trace, add_events_by_id_hooks, NULL, LTTV_PRIO_DEFAULT);
342
343 //******************************************************************
344
345
346 }
347 static void destroy(){
348 printf("total lock_acquire %d\n", total_acquire);
349 printf("\nEnd of locks analysis.\n");
350 }
351 LTTV_MODULE("fsm_locking", "Detects improper use of kernel locks", \
352 "4 scenarios of problematic use of spinlocks are searched for",\
353 init, destroy, "stats", "batchAnalysis", "option")
This page took 0.05931 seconds and 4 git commands to generate.