1 /* SPDX-License-Identifier: GPL-2.0
5 * Copyright (C) 2002-2005 - Tom Zanussi <zanussi@us.ibm.com>, IBM Corp
6 * Copyright (C) 1999-2005 - Karim Yaghmour <karim@opersys.com>
7 * Copyright (C) 2008-2012 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
9 * Re-using code from kernel/relay.c, hence the GPL-2.0 license for this
13 #include <linux/module.h>
16 #include <wrapper/ringbuffer/backend.h>
17 #include <wrapper/ringbuffer/frontend.h>
18 #include <wrapper/ringbuffer/vfs.h>
21 * fault() vm_op implementation for ring buffer file mapping.
23 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5,1,0))
24 static vm_fault_t
lib_ring_buffer_fault_compat(struct vm_area_struct
*vma
, struct vm_fault
*vmf
)
26 static int lib_ring_buffer_fault_compat(struct vm_area_struct
*vma
, struct vm_fault
*vmf
)
29 struct lib_ring_buffer
*buf
= vma
->vm_private_data
;
30 struct channel
*chan
= buf
->backend
.chan
;
31 const struct lib_ring_buffer_config
*config
= &chan
->backend
.config
;
32 pgoff_t pgoff
= vmf
->pgoff
;
35 unsigned long offset
, sb_bindex
;
38 * Verify that faults are only done on the range of pages owned by the
41 offset
= pgoff
<< PAGE_SHIFT
;
42 sb_bindex
= subbuffer_id_get_index(config
, buf
->backend
.buf_rsb
.id
);
43 if (!(offset
>= buf
->backend
.array
[sb_bindex
]->mmap_offset
44 && offset
< buf
->backend
.array
[sb_bindex
]->mmap_offset
+
45 buf
->backend
.chan
->backend
.subbuf_size
))
46 return VM_FAULT_SIGBUS
;
48 * ring_buffer_read_get_pfn() gets the page frame number for the
49 * current reader's pages.
51 pfnp
= lib_ring_buffer_read_get_pfn(&buf
->backend
, offset
, &virt
);
53 return VM_FAULT_SIGBUS
;
54 get_page(pfn_to_page(*pfnp
));
55 vmf
->page
= pfn_to_page(*pfnp
);
60 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5,1,0))
61 static vm_fault_t
lib_ring_buffer_fault(struct vm_fault
*vmf
)
63 struct vm_area_struct
*vma
= vmf
->vma
;
64 return lib_ring_buffer_fault_compat(vma
, vmf
);
66 #elif (LINUX_VERSION_CODE >= KERNEL_VERSION(4,11,0))
67 static int lib_ring_buffer_fault(struct vm_fault
*vmf
)
69 struct vm_area_struct
*vma
= vmf
->vma
;
70 return lib_ring_buffer_fault_compat(vma
, vmf
);
72 #else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,11,0)) */
73 static int lib_ring_buffer_fault(struct vm_area_struct
*vma
, struct vm_fault
*vmf
)
75 return lib_ring_buffer_fault_compat(vma
, vmf
);
77 #endif /* #else #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,11,0)) */
80 * vm_ops for ring buffer file mappings.
82 static const struct vm_operations_struct lib_ring_buffer_mmap_ops
= {
83 .fault
= lib_ring_buffer_fault
,
87 * lib_ring_buffer_mmap_buf: - mmap channel buffer to process address space
88 * @buf: ring buffer to map
89 * @vma: vm_area_struct describing memory to be mapped
91 * Returns 0 if ok, negative on error
93 * Caller should already have grabbed mmap_sem.
95 static int lib_ring_buffer_mmap_buf(struct lib_ring_buffer
*buf
,
96 struct vm_area_struct
*vma
)
98 unsigned long length
= vma
->vm_end
- vma
->vm_start
;
99 struct channel
*chan
= buf
->backend
.chan
;
100 const struct lib_ring_buffer_config
*config
= &chan
->backend
.config
;
101 unsigned long mmap_buf_len
;
103 if (config
->output
!= RING_BUFFER_MMAP
)
106 mmap_buf_len
= chan
->backend
.buf_size
;
107 if (chan
->backend
.extra_reader_sb
)
108 mmap_buf_len
+= chan
->backend
.subbuf_size
;
110 if (length
!= mmap_buf_len
)
113 vma
->vm_ops
= &lib_ring_buffer_mmap_ops
;
114 vma
->vm_flags
|= VM_DONTEXPAND
;
115 vma
->vm_private_data
= buf
;
120 int lib_ring_buffer_mmap(struct file
*filp
, struct vm_area_struct
*vma
,
121 struct lib_ring_buffer
*buf
)
123 return lib_ring_buffer_mmap_buf(buf
, vma
);
125 EXPORT_SYMBOL_GPL(lib_ring_buffer_mmap
);
128 * vfs_lib_ring_buffer_mmap - mmap file op
130 * @vma: the vma describing what to map
132 * Calls upon lib_ring_buffer_mmap_buf() to map the file into user space.
134 int vfs_lib_ring_buffer_mmap(struct file
*filp
, struct vm_area_struct
*vma
)
136 struct lib_ring_buffer
*buf
= filp
->private_data
;
137 return lib_ring_buffer_mmap(filp
, vma
, buf
);
139 EXPORT_SYMBOL_GPL(vfs_lib_ring_buffer_mmap
);