#define dbg_printf(fmt, args...)
#endif
+/* For testing */
+#define POISON_FREE
+
/*
* Per-CPU split-counters lazily update the global counter each 1024
* addition/removal. It automatically keeps track of resize required.
return order;
}
+#ifdef POISON_FREE
+#define poison_free(ptr) \
+ do { \
+ memset(ptr, 0x42, sizeof(*(ptr))); \
+ free(ptr); \
+ } while (0)
+#else
+#define poison_free(ptr) free(ptr)
+#endif
+
static
void cds_lfht_resize_lazy(struct cds_lfht *ht, struct rcu_table *t, int growth);
static
void free_per_cpu_items_count(struct ht_items_count *count)
{
- free(count);
+ poison_free(count);
}
static
{
struct rcu_table *t =
caa_container_of(head, struct rcu_table, head);
- free(t);
+ poison_free(t);
}
static
{
struct rcu_level *l =
caa_container_of(head, struct rcu_level, head);
- free(l);
+ poison_free(l);
}
/*
bit_reverse_ulong(t->tbl[order]->nodes[i].reverse_hash));
assert(is_dummy(t->tbl[order]->nodes[i].next));
}
- free(t->tbl[order]);
+ poison_free(t->tbl[order]);
}
return 0;
}
ret = cds_lfht_delete_dummy(ht);
if (ret)
return ret;
- free(ht->t);
+ poison_free(ht->t);
free_per_cpu_items_count(ht->percpu_count);
- free(ht);
+ poison_free(ht);
return ret;
}
pthread_mutex_lock(&ht->resize_mutex);
_do_cds_lfht_resize(ht);
pthread_mutex_unlock(&ht->resize_mutex);
- free(work);
+ poison_free(work);
cmm_smp_mb(); /* finish resize before decrement */
uatomic_dec(&ht->in_progress_resize);
}
/* hardcoded number of CPUs */
#define NR_CPUS 16384
+/* For testing */
+#define POISON_FREE
+
+#ifdef POISON_FREE
+#define poison_free(ptr) \
+ do { \
+ memset(ptr, 0x42, sizeof(*(ptr))); \
+ free(ptr); \
+ } while (0)
+#else
+#define poison_free(ptr) free(ptr)
+#endif
+
+
+
#if defined(_syscall0)
_syscall0(pid_t, gettid)
#elif defined(__NR_gettid)