#include <linux/delay.h>
#include <linux/module.h>
#include <linux/percpu.h>
+#include <asm/cacheflush.h>
#include <wrapper/ringbuffer/config.h>
#include <wrapper/ringbuffer/backend.h>
}
EXPORT_SYMBOL_GPL(lib_ring_buffer_move_consumer);
+#if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE
+static void lib_ring_buffer_flush_read_subbuf_dcache(
+ const struct lib_ring_buffer_config *config,
+ struct channel *chan,
+ struct lib_ring_buffer *buf)
+{
+ struct lib_ring_buffer_backend_pages *pages;
+ unsigned long sb_bindex, id, i, nr_pages;
+
+ if (config->output != RING_BUFFER_MMAP)
+ return;
+
+ /*
+ * Architectures with caches aliased on virtual addresses may
+ * use different cache lines for the linear mapping vs
+ * user-space memory mapping. Given that the ring buffer is
+ * based on the kernel linear mapping, aligning it with the
+ * user-space mapping is not straightforward, and would require
+ * extra TLB entries. Therefore, simply flush the dcache for the
+ * entire sub-buffer before reading it.
+ */
+ id = buf->backend.buf_rsb.id;
+ sb_bindex = subbuffer_id_get_index(config, id);
+ pages = buf->backend.array[sb_bindex];
+ nr_pages = buf->backend.num_pages_per_subbuf;
+ for (i = 0; i < nr_pages; i++) {
+ struct lib_ring_buffer_backend_page *backend_page;
+
+ backend_page = &pages->p[i];
+ flush_dcache_page(pfn_to_page(backend_page->pfn));
+ }
+}
+#else
+static void lib_ring_buffer_flush_read_subbuf_dcache(
+ const struct lib_ring_buffer_config *config,
+ struct channel *chan,
+ struct lib_ring_buffer *buf)
+{
+}
+#endif
+
/**
* lib_ring_buffer_get_subbuf - get exclusive access to subbuffer for reading
* @buf: ring buffer
buf->get_subbuf_consumed = consumed;
buf->get_subbuf = 1;
+ lib_ring_buffer_flush_read_subbuf_dcache(config, chan, buf);
+
return 0;
nodata: