xref: /linux/include/drm/drm_gem_shmem_helper.h (revision 58f6259b7a08f8d47d4629609703d358b042f0fd)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 
3 #ifndef __DRM_GEM_SHMEM_HELPER_H__
4 #define __DRM_GEM_SHMEM_HELPER_H__
5 
6 #include <linux/fs.h>
7 #include <linux/mm.h>
8 #include <linux/mutex.h>
9 
10 #include <drm/drm_file.h>
11 #include <drm/drm_gem.h>
12 #include <drm/drm_ioctl.h>
13 #include <drm/drm_prime.h>
14 
15 struct dma_buf_attachment;
16 struct drm_mode_create_dumb;
17 struct drm_printer;
18 struct sg_table;
19 
20 /**
21  * struct drm_gem_shmem_object - GEM object backed by shmem
22  */
23 struct drm_gem_shmem_object {
24 	/**
25 	 * @base: Base GEM object
26 	 */
27 	struct drm_gem_object base;
28 
29 	/**
30 	 * @pages_lock: Protects the page table and use count
31 	 */
32 	struct mutex pages_lock;
33 
34 	/**
35 	 * @pages: Page table
36 	 */
37 	struct page **pages;
38 
39 	/**
40 	 * @pages_use_count:
41 	 *
42 	 * Reference count on the pages table.
43 	 * The pages are put when the count reaches zero.
44 	 */
45 	unsigned int pages_use_count;
46 
47 	/**
48 	 * @madv: State for madvise
49 	 *
50 	 * 0 is active/inuse.
51 	 * A negative value is the object is purged.
52 	 * Positive values are driver specific and not used by the helpers.
53 	 */
54 	int madv;
55 
56 	/**
57 	 * @madv_list: List entry for madvise tracking
58 	 *
59 	 * Typically used by drivers to track purgeable objects
60 	 */
61 	struct list_head madv_list;
62 
63 	/**
64 	 * @sgt: Scatter/gather table for imported PRIME buffers
65 	 */
66 	struct sg_table *sgt;
67 
68 	/**
69 	 * @vmap_lock: Protects the vmap address and use count
70 	 */
71 	struct mutex vmap_lock;
72 
73 	/**
74 	 * @vaddr: Kernel virtual address of the backing memory
75 	 */
76 	void *vaddr;
77 
78 	/**
79 	 * @vmap_use_count:
80 	 *
81 	 * Reference count on the virtual address.
82 	 * The address are un-mapped when the count reaches zero.
83 	 */
84 	unsigned int vmap_use_count;
85 
86 	/**
87 	 * @pages_mark_dirty_on_put:
88 	 *
89 	 * Mark pages as dirty when they are put.
90 	 */
91 	bool pages_mark_dirty_on_put : 1;
92 
93 	/**
94 	 * @pages_mark_accessed_on_put:
95 	 *
96 	 * Mark pages as accessed when they are put.
97 	 */
98 	bool pages_mark_accessed_on_put : 1;
99 
100 	/**
101 	 * @map_wc: map object write-combined (instead of using shmem defaults).
102 	 */
103 	bool map_wc : 1;
104 };
105 
106 #define to_drm_gem_shmem_obj(obj) \
107 	container_of(obj, struct drm_gem_shmem_object, base)
108 
109 struct drm_gem_shmem_object *drm_gem_shmem_create(struct drm_device *dev, size_t size);
110 void drm_gem_shmem_free(struct drm_gem_shmem_object *shmem);
111 
112 int drm_gem_shmem_get_pages(struct drm_gem_shmem_object *shmem);
113 void drm_gem_shmem_put_pages(struct drm_gem_shmem_object *shmem);
114 int drm_gem_shmem_pin(struct drm_gem_shmem_object *shmem);
115 void drm_gem_shmem_unpin(struct drm_gem_shmem_object *shmem);
116 int drm_gem_shmem_vmap(struct drm_gem_shmem_object *shmem,
117 		       struct iosys_map *map);
118 void drm_gem_shmem_vunmap(struct drm_gem_shmem_object *shmem,
119 			  struct iosys_map *map);
120 int drm_gem_shmem_mmap(struct drm_gem_shmem_object *shmem, struct vm_area_struct *vma);
121 
122 int drm_gem_shmem_madvise(struct drm_gem_shmem_object *shmem, int madv);
123 
124 static inline bool drm_gem_shmem_is_purgeable(struct drm_gem_shmem_object *shmem)
125 {
126 	return (shmem->madv > 0) &&
127 		!shmem->vmap_use_count && shmem->sgt &&
128 		!shmem->base.dma_buf && !shmem->base.import_attach;
129 }
130 
131 void drm_gem_shmem_purge_locked(struct drm_gem_shmem_object *shmem);
132 bool drm_gem_shmem_purge(struct drm_gem_shmem_object *shmem);
133 
134 struct sg_table *drm_gem_shmem_get_sg_table(struct drm_gem_shmem_object *shmem);
135 struct sg_table *drm_gem_shmem_get_pages_sgt(struct drm_gem_shmem_object *shmem);
136 
137 void drm_gem_shmem_print_info(const struct drm_gem_shmem_object *shmem,
138 			      struct drm_printer *p, unsigned int indent);
139 
140 extern const struct vm_operations_struct drm_gem_shmem_vm_ops;
141 
142 /*
143  * GEM object functions
144  */
145 
146 /**
147  * drm_gem_shmem_object_free - GEM object function for drm_gem_shmem_free()
148  * @obj: GEM object to free
149  *
150  * This function wraps drm_gem_shmem_free(). Drivers that employ the shmem helpers
151  * should use it as their &drm_gem_object_funcs.free handler.
152  */
153 static inline void drm_gem_shmem_object_free(struct drm_gem_object *obj)
154 {
155 	struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
156 
157 	drm_gem_shmem_free(shmem);
158 }
159 
160 /**
161  * drm_gem_shmem_object_print_info() - Print &drm_gem_shmem_object info for debugfs
162  * @p: DRM printer
163  * @indent: Tab indentation level
164  * @obj: GEM object
165  *
166  * This function wraps drm_gem_shmem_print_info(). Drivers that employ the shmem helpers should
167  * use this function as their &drm_gem_object_funcs.print_info handler.
168  */
169 static inline void drm_gem_shmem_object_print_info(struct drm_printer *p, unsigned int indent,
170 						   const struct drm_gem_object *obj)
171 {
172 	const struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
173 
174 	drm_gem_shmem_print_info(shmem, p, indent);
175 }
176 
177 /**
178  * drm_gem_shmem_object_pin - GEM object function for drm_gem_shmem_pin()
179  * @obj: GEM object
180  *
181  * This function wraps drm_gem_shmem_pin(). Drivers that employ the shmem helpers should
182  * use it as their &drm_gem_object_funcs.pin handler.
183  */
184 static inline int drm_gem_shmem_object_pin(struct drm_gem_object *obj)
185 {
186 	struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
187 
188 	return drm_gem_shmem_pin(shmem);
189 }
190 
191 /**
192  * drm_gem_shmem_object_unpin - GEM object function for drm_gem_shmem_unpin()
193  * @obj: GEM object
194  *
195  * This function wraps drm_gem_shmem_unpin(). Drivers that employ the shmem helpers should
196  * use it as their &drm_gem_object_funcs.unpin handler.
197  */
198 static inline void drm_gem_shmem_object_unpin(struct drm_gem_object *obj)
199 {
200 	struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
201 
202 	drm_gem_shmem_unpin(shmem);
203 }
204 
205 /**
206  * drm_gem_shmem_object_get_sg_table - GEM object function for drm_gem_shmem_get_sg_table()
207  * @obj: GEM object
208  *
209  * This function wraps drm_gem_shmem_get_sg_table(). Drivers that employ the shmem helpers should
210  * use it as their &drm_gem_object_funcs.get_sg_table handler.
211  *
212  * Returns:
213  * A pointer to the scatter/gather table of pinned pages or error pointer on failure.
214  */
215 static inline struct sg_table *drm_gem_shmem_object_get_sg_table(struct drm_gem_object *obj)
216 {
217 	struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
218 
219 	return drm_gem_shmem_get_sg_table(shmem);
220 }
221 
222 /*
223  * drm_gem_shmem_object_vmap - GEM object function for drm_gem_shmem_vmap()
224  * @obj: GEM object
225  * @map: Returns the kernel virtual address of the SHMEM GEM object's backing store.
226  *
227  * This function wraps drm_gem_shmem_vmap(). Drivers that employ the shmem helpers should
228  * use it as their &drm_gem_object_funcs.vmap handler.
229  *
230  * Returns:
231  * 0 on success or a negative error code on failure.
232  */
233 static inline int drm_gem_shmem_object_vmap(struct drm_gem_object *obj,
234 					    struct iosys_map *map)
235 {
236 	struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
237 
238 	return drm_gem_shmem_vmap(shmem, map);
239 }
240 
241 /*
242  * drm_gem_shmem_object_vunmap - GEM object function for drm_gem_shmem_vunmap()
243  * @obj: GEM object
244  * @map: Kernel virtual address where the SHMEM GEM object was mapped
245  *
246  * This function wraps drm_gem_shmem_vunmap(). Drivers that employ the shmem helpers should
247  * use it as their &drm_gem_object_funcs.vunmap handler.
248  */
249 static inline void drm_gem_shmem_object_vunmap(struct drm_gem_object *obj,
250 					       struct iosys_map *map)
251 {
252 	struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
253 
254 	drm_gem_shmem_vunmap(shmem, map);
255 }
256 
257 /**
258  * drm_gem_shmem_object_mmap - GEM object function for drm_gem_shmem_mmap()
259  * @obj: GEM object
260  * @vma: VMA for the area to be mapped
261  *
262  * This function wraps drm_gem_shmem_mmap(). Drivers that employ the shmem helpers should
263  * use it as their &drm_gem_object_funcs.mmap handler.
264  *
265  * Returns:
266  * 0 on success or a negative error code on failure.
267  */
268 static inline int drm_gem_shmem_object_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma)
269 {
270 	struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
271 
272 	return drm_gem_shmem_mmap(shmem, vma);
273 }
274 
275 /*
276  * Driver ops
277  */
278 
279 struct drm_gem_object *
280 drm_gem_shmem_prime_import_sg_table(struct drm_device *dev,
281 				    struct dma_buf_attachment *attach,
282 				    struct sg_table *sgt);
283 int drm_gem_shmem_dumb_create(struct drm_file *file, struct drm_device *dev,
284 			      struct drm_mode_create_dumb *args);
285 
286 /**
287  * DRM_GEM_SHMEM_DRIVER_OPS - Default shmem GEM operations
288  *
289  * This macro provides a shortcut for setting the shmem GEM operations in
290  * the &drm_driver structure.
291  */
292 #define DRM_GEM_SHMEM_DRIVER_OPS \
293 	.prime_handle_to_fd	= drm_gem_prime_handle_to_fd, \
294 	.prime_fd_to_handle	= drm_gem_prime_fd_to_handle, \
295 	.gem_prime_import_sg_table = drm_gem_shmem_prime_import_sg_table, \
296 	.gem_prime_mmap		= drm_gem_prime_mmap, \
297 	.dumb_create		= drm_gem_shmem_dumb_create
298 
299 #endif /* __DRM_GEM_SHMEM_HELPER_H__ */
300