X-Git-Url: http://git.lttng.org./?a=blobdiff_plain;f=lib%2Fringbuffer%2Fring_buffer_backend.c;h=a24013fb0ab9c727680b358d80ca0c5dcb55d6ee;hb=360d3efee55154a55c18be32175bee608dcb21df;hp=f760836cfe50032e18c9d3774a0e1cd7c6338ed5;hpb=1e36732678d7cc650940b4508031acf2f6ffcd1b;p=lttng-modules.git diff --git a/lib/ringbuffer/ring_buffer_backend.c b/lib/ringbuffer/ring_buffer_backend.c index f760836c..a24013fb 100644 --- a/lib/ringbuffer/ring_buffer_backend.c +++ b/lib/ringbuffer/ring_buffer_backend.c @@ -29,6 +29,7 @@ #include #include +#include #include /* for wrapper_vmalloc_sync_all() */ #include #include @@ -56,6 +57,25 @@ int lib_ring_buffer_backend_allocate(const struct lib_ring_buffer_config *config unsigned long i; num_pages = size >> PAGE_SHIFT; + + /* + * Verify that there is enough free pages available on the system for + * the current allocation request. + * wrapper_check_enough_free_pages uses si_mem_available() if available + * and returns if there should be enough free pages based on the + * current estimate. + */ + if (!wrapper_check_enough_free_pages(num_pages)) + goto not_enough_pages; + + /* + * Set the current user thread as the first target of the OOM killer. + * If the estimate received by si_mem_available() was off, and we do + * end up running out of memory because of this buffer allocation, we + * want to kill the offending app first. + */ + wrapper_set_current_oom_origin(); + num_pages_per_subbuf = num_pages >> get_count_order(num_subbuf); subbuf_size = chanb->subbuf_size; num_subbuf_alloc = num_subbuf; @@ -71,7 +91,7 @@ int lib_ring_buffer_backend_allocate(const struct lib_ring_buffer_config *config if (unlikely(!pages)) goto pages_error; - bufb->array = kmalloc_node(ALIGN(sizeof(*bufb->array) + bufb->array = lttng_kvmalloc_node(ALIGN(sizeof(*bufb->array) * num_subbuf_alloc, 1 << INTERNODE_CACHE_SHIFT), GFP_KERNEL | __GFP_NOWARN, @@ -90,7 +110,7 @@ int lib_ring_buffer_backend_allocate(const struct lib_ring_buffer_config *config /* Allocate backend pages array elements */ for (i = 0; i < num_subbuf_alloc; i++) { bufb->array[i] = - kzalloc_node(ALIGN( + lttng_kvzalloc_node(ALIGN( sizeof(struct lib_ring_buffer_backend_pages) + sizeof(struct lib_ring_buffer_backend_page) * num_pages_per_subbuf, @@ -102,7 +122,7 @@ int lib_ring_buffer_backend_allocate(const struct lib_ring_buffer_config *config } /* Allocate write-side subbuffer table */ - bufb->buf_wsb = kzalloc_node(ALIGN( + bufb->buf_wsb = lttng_kvzalloc_node(ALIGN( sizeof(struct lib_ring_buffer_backend_subbuffer) * num_subbuf, 1 << INTERNODE_CACHE_SHIFT), @@ -122,7 +142,7 @@ int lib_ring_buffer_backend_allocate(const struct lib_ring_buffer_config *config bufb->buf_rsb.id = subbuffer_id(config, 0, 1, 0); /* Allocate subbuffer packet counter table */ - bufb->buf_cnt = kzalloc_node(ALIGN( + bufb->buf_cnt = lttng_kvzalloc_node(ALIGN( sizeof(struct lib_ring_buffer_backend_counts) * num_subbuf, 1 << INTERNODE_CACHE_SHIFT), @@ -150,22 +170,25 @@ int lib_ring_buffer_backend_allocate(const struct lib_ring_buffer_config *config * will not fault. */ wrapper_vmalloc_sync_all(); + wrapper_clear_current_oom_origin(); vfree(pages); return 0; free_wsb: - kfree(bufb->buf_wsb); + lttng_kvfree(bufb->buf_wsb); free_array: for (i = 0; (i < num_subbuf_alloc && bufb->array[i]); i++) - kfree(bufb->array[i]); + lttng_kvfree(bufb->array[i]); depopulate: /* Free all allocated pages */ for (i = 0; (i < num_pages && pages[i]); i++) __free_page(pages[i]); - kfree(bufb->array); + lttng_kvfree(bufb->array); array_error: vfree(pages); pages_error: + wrapper_clear_current_oom_origin(); +not_enough_pages: return -ENOMEM; } @@ -191,14 +214,14 @@ void lib_ring_buffer_backend_free(struct lib_ring_buffer_backend *bufb) if (chanb->extra_reader_sb) num_subbuf_alloc++; - kfree(bufb->buf_wsb); - kfree(bufb->buf_cnt); + lttng_kvfree(bufb->buf_wsb); + lttng_kvfree(bufb->buf_cnt); for (i = 0; i < num_subbuf_alloc; i++) { for (j = 0; j < bufb->num_pages_per_subbuf; j++) __free_page(pfn_to_page(bufb->array[i]->p[j].pfn)); - kfree(bufb->array[i]); + lttng_kvfree(bufb->array[i]); } - kfree(bufb->array); + lttng_kvfree(bufb->array); bufb->allocated = 0; } @@ -249,7 +272,7 @@ void channel_backend_reset(struct channel_backend *chanb) chanb->start_tsc = config->cb.ring_buffer_clock_read(chan); } -#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) +#if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(4,10,0)) /* * No need to implement a "dead" callback to do a buffer switch here, @@ -281,7 +304,7 @@ int lttng_cpuhp_rb_backend_prepare(unsigned int cpu, } EXPORT_SYMBOL_GPL(lttng_cpuhp_rb_backend_prepare); -#else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */ +#else /* #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(4,10,0)) */ #ifdef CONFIG_HOTPLUG_CPU @@ -331,7 +354,7 @@ int lib_ring_buffer_cpu_hp_callback(struct notifier_block *nb, #endif -#endif /* #else #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */ +#endif /* #else #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(4,10,0)) */ /** * channel_backend_init - initialize a channel backend @@ -409,13 +432,13 @@ int channel_backend_init(struct channel_backend *chanb, if (!chanb->buf) goto free_cpumask; -#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) +#if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(4,10,0)) chanb->cpuhp_prepare.component = LTTNG_RING_BUFFER_BACKEND; ret = cpuhp_state_add_instance(lttng_rb_hp_prepare, &chanb->cpuhp_prepare.node); if (ret) goto free_bufs; -#else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */ +#else /* #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(4,10,0)) */ { /* @@ -452,7 +475,7 @@ int channel_backend_init(struct channel_backend *chanb, } #endif } -#endif /* #else #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */ +#endif /* #else #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(4,10,0)) */ } else { chanb->buf = kzalloc(sizeof(struct lib_ring_buffer), GFP_KERNEL); if (!chanb->buf) @@ -467,16 +490,18 @@ int channel_backend_init(struct channel_backend *chanb, free_bufs: if (config->alloc == RING_BUFFER_ALLOC_PER_CPU) { -#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) - ret = cpuhp_state_remove_instance(lttng_rb_hp_prepare, - &chanb->cpuhp_prepare.node); - WARN_ON(ret); -#else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */ +#if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(4,10,0)) + /* + * Teardown of lttng_rb_hp_prepare instance + * on "add" error is handled within cpu hotplug, + * no teardown to do from the caller. + */ +#else /* #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(4,10,0)) */ #ifdef CONFIG_HOTPLUG_CPU put_online_cpus(); unregister_hotcpu_notifier(&chanb->cpu_hp_notifier); #endif -#endif /* #else #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */ +#endif /* #else #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(4,10,0)) */ for_each_possible_cpu(i) { struct lib_ring_buffer *buf = per_cpu_ptr(chanb->buf, i); @@ -505,15 +530,15 @@ void channel_backend_unregister_notifiers(struct channel_backend *chanb) const struct lib_ring_buffer_config *config = &chanb->config; if (config->alloc == RING_BUFFER_ALLOC_PER_CPU) { -#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) +#if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(4,10,0)) int ret; ret = cpuhp_state_remove_instance(lttng_rb_hp_prepare, &chanb->cpuhp_prepare.node); WARN_ON(ret); -#else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */ +#else /* #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(4,10,0)) */ unregister_hotcpu_notifier(&chanb->cpu_hp_notifier); -#endif /* #else #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */ +#endif /* #else #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(4,10,0)) */ } }