- /* Start by firing off a work queue on each CPU. Their sole purpose in life
- * is to guarantee that each CPU has been in a state where is was in syscall
- * mode (i.e. not in a trap, an IRQ or a soft IRQ) */
- sema_init(&work_sema4, 1 - num_online_cpus());
- cpu_work = (struct work_struct *)kmalloc(sizeof(struct work_struct) *
- num_online_cpus(), GFP_KERNEL);
- for_each_online_cpu(cpu)
- {
- INIT_WORK(&cpu_work[cpu_index], ltt_statedump_work_func, &work_sema4);
-
- /* TODO: verify RC */
- schedule_delayed_work_on(cpu,&cpu_work[cpu_index],0);
- cpu_index++;
- }
-
- ltt_enumerate_process_states();