xref: /linux/drivers/gpu/drm/exynos/exynos_drm_gem.h (revision 564eb714f5f09ac733c26860d5f0831f213fbdf1)
1 /* exynos_drm_gem.h
2  *
3  * Copyright (c) 2011 Samsung Electronics Co., Ltd.
4  * Authoer: Inki Dae <inki.dae@samsung.com>
5  *
6  * This program is free software; you can redistribute  it and/or modify it
7  * under  the terms of  the GNU General  Public License as published by the
8  * Free Software Foundation;  either version 2 of the  License, or (at your
9  * option) any later version.
10  */
11 
12 #ifndef _EXYNOS_DRM_GEM_H_
13 #define _EXYNOS_DRM_GEM_H_
14 
15 #define to_exynos_gem_obj(x)	container_of(x,\
16 			struct exynos_drm_gem_obj, base)
17 
18 #define IS_NONCONTIG_BUFFER(f)		(f & EXYNOS_BO_NONCONTIG)
19 
20 /*
21  * exynos drm gem buffer structure.
22  *
23  * @kvaddr: kernel virtual address to allocated memory region.
24  * *userptr: user space address.
25  * @dma_addr: bus address(accessed by dma) to allocated memory region.
26  *	- this address could be physical address without IOMMU and
27  *	device address with IOMMU.
28  * @write: whether pages will be written to by the caller.
29  * @pages: Array of backing pages.
30  * @sgt: sg table to transfer page data.
31  * @size: size of allocated memory region.
32  * @pfnmap: indicate whether memory region from userptr is mmaped with
33  *	VM_PFNMAP or not.
34  */
35 struct exynos_drm_gem_buf {
36 	void __iomem		*kvaddr;
37 	unsigned long		userptr;
38 	dma_addr_t		dma_addr;
39 	struct dma_attrs	dma_attrs;
40 	unsigned int		write;
41 	struct page		**pages;
42 	struct sg_table		*sgt;
43 	unsigned long		size;
44 	bool			pfnmap;
45 };
46 
47 /*
48  * exynos drm buffer structure.
49  *
50  * @base: a gem object.
51  *	- a new handle to this gem object would be created
52  *	by drm_gem_handle_create().
53  * @buffer: a pointer to exynos_drm_gem_buffer object.
54  *	- contain the information to memory region allocated
55  *	by user request or at framebuffer creation.
56  *	continuous memory region allocated by user request
57  *	or at framebuffer creation.
58  * @size: size requested from user, in bytes and this size is aligned
59  *	in page unit.
60  * @vma: a pointer to vm_area.
61  * @flags: indicate memory type to allocated buffer and cache attruibute.
62  *
63  * P.S. this object would be transfered to user as kms_bo.handle so
64  *	user can access the buffer through kms_bo.handle.
65  */
66 struct exynos_drm_gem_obj {
67 	struct drm_gem_object		base;
68 	struct exynos_drm_gem_buf	*buffer;
69 	unsigned long			size;
70 	struct vm_area_struct		*vma;
71 	unsigned int			flags;
72 };
73 
74 struct page **exynos_gem_get_pages(struct drm_gem_object *obj, gfp_t gfpmask);
75 
76 /* destroy a buffer with gem object */
77 void exynos_drm_gem_destroy(struct exynos_drm_gem_obj *exynos_gem_obj);
78 
79 /* create a private gem object and initialize it. */
80 struct exynos_drm_gem_obj *exynos_drm_gem_init(struct drm_device *dev,
81 						      unsigned long size);
82 
83 /* create a new buffer with gem object */
84 struct exynos_drm_gem_obj *exynos_drm_gem_create(struct drm_device *dev,
85 						unsigned int flags,
86 						unsigned long size);
87 
88 /*
89  * request gem object creation and buffer allocation as the size
90  * that it is calculated with framebuffer information such as width,
91  * height and bpp.
92  */
93 int exynos_drm_gem_create_ioctl(struct drm_device *dev, void *data,
94 				struct drm_file *file_priv);
95 
96 /*
97  * get dma address from gem handle and this function could be used for
98  * other drivers such as 2d/3d acceleration drivers.
99  * with this function call, gem object reference count would be increased.
100  */
101 dma_addr_t *exynos_drm_gem_get_dma_addr(struct drm_device *dev,
102 					unsigned int gem_handle,
103 					struct drm_file *filp);
104 
105 /*
106  * put dma address from gem handle and this function could be used for
107  * other drivers such as 2d/3d acceleration drivers.
108  * with this function call, gem object reference count would be decreased.
109  */
110 void exynos_drm_gem_put_dma_addr(struct drm_device *dev,
111 					unsigned int gem_handle,
112 					struct drm_file *filp);
113 
114 /* get buffer offset to map to user space. */
115 int exynos_drm_gem_map_offset_ioctl(struct drm_device *dev, void *data,
116 				    struct drm_file *file_priv);
117 
118 /*
119  * mmap the physically continuous memory that a gem object contains
120  * to user space.
121  */
122 int exynos_drm_gem_mmap_ioctl(struct drm_device *dev, void *data,
123 			      struct drm_file *file_priv);
124 
125 /* map user space allocated by malloc to pages. */
126 int exynos_drm_gem_userptr_ioctl(struct drm_device *dev, void *data,
127 				      struct drm_file *file_priv);
128 
129 /* get buffer information to memory region allocated by gem. */
130 int exynos_drm_gem_get_ioctl(struct drm_device *dev, void *data,
131 				      struct drm_file *file_priv);
132 
133 /* get buffer size to gem handle. */
134 unsigned long exynos_drm_gem_get_size(struct drm_device *dev,
135 						unsigned int gem_handle,
136 						struct drm_file *file_priv);
137 
138 /* free gem object. */
139 void exynos_drm_gem_free_object(struct drm_gem_object *gem_obj);
140 
141 /* create memory region for drm framebuffer. */
142 int exynos_drm_gem_dumb_create(struct drm_file *file_priv,
143 			       struct drm_device *dev,
144 			       struct drm_mode_create_dumb *args);
145 
146 /* map memory region for drm framebuffer to user space. */
147 int exynos_drm_gem_dumb_map_offset(struct drm_file *file_priv,
148 				   struct drm_device *dev, uint32_t handle,
149 				   uint64_t *offset);
150 
151 /* page fault handler and mmap fault address(virtual) to physical memory. */
152 int exynos_drm_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf);
153 
154 /* set vm_flags and we can change the vm attribute to other one at here. */
155 int exynos_drm_gem_mmap(struct file *filp, struct vm_area_struct *vma);
156 
157 static inline int vma_is_io(struct vm_area_struct *vma)
158 {
159 	return !!(vma->vm_flags & (VM_IO | VM_PFNMAP));
160 }
161 
162 /* get a copy of a virtual memory region. */
163 struct vm_area_struct *exynos_gem_get_vma(struct vm_area_struct *vma);
164 
165 /* release a userspace virtual memory area. */
166 void exynos_gem_put_vma(struct vm_area_struct *vma);
167 
168 /* get pages from user space. */
169 int exynos_gem_get_pages_from_userptr(unsigned long start,
170 						unsigned int npages,
171 						struct page **pages,
172 						struct vm_area_struct *vma);
173 
174 /* drop the reference to pages. */
175 void exynos_gem_put_pages_to_userptr(struct page **pages,
176 					unsigned int npages,
177 					struct vm_area_struct *vma);
178 
179 /* map sgt with dma region. */
180 int exynos_gem_map_sgt_with_dma(struct drm_device *drm_dev,
181 				struct sg_table *sgt,
182 				enum dma_data_direction dir);
183 
184 /* unmap sgt from dma region. */
185 void exynos_gem_unmap_sgt_from_dma(struct drm_device *drm_dev,
186 				struct sg_table *sgt,
187 				enum dma_data_direction dir);
188 
189 #endif
190