xref: /linux/drivers/gpu/drm/arm/display/komeda/komeda_kms.c (revision 164666fa66669d437bdcc8d5f1744a2aee73be41)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * (C) COPYRIGHT 2018 ARM Limited. All rights reserved.
4  * Author: James.Qian.Wang <james.qian.wang@arm.com>
5  *
6  */
7 #include <linux/component.h>
8 #include <linux/interrupt.h>
9 
10 #include <drm/drm_atomic.h>
11 #include <drm/drm_atomic_helper.h>
12 #include <drm/drm_drv.h>
13 #include <drm/drm_fb_helper.h>
14 #include <drm/drm_gem_cma_helper.h>
15 #include <drm/drm_gem_framebuffer_helper.h>
16 #include <drm/drm_managed.h>
17 #include <drm/drm_probe_helper.h>
18 #include <drm/drm_vblank.h>
19 
20 #include "komeda_dev.h"
21 #include "komeda_framebuffer.h"
22 #include "komeda_kms.h"
23 
24 DEFINE_DRM_GEM_CMA_FOPS(komeda_cma_fops);
25 
26 static int komeda_gem_cma_dumb_create(struct drm_file *file,
27 				      struct drm_device *dev,
28 				      struct drm_mode_create_dumb *args)
29 {
30 	struct komeda_dev *mdev = dev->dev_private;
31 	u32 pitch = DIV_ROUND_UP(args->width * args->bpp, 8);
32 
33 	args->pitch = ALIGN(pitch, mdev->chip.bus_width);
34 
35 	return drm_gem_cma_dumb_create_internal(file, dev, args);
36 }
37 
38 static irqreturn_t komeda_kms_irq_handler(int irq, void *data)
39 {
40 	struct drm_device *drm = data;
41 	struct komeda_dev *mdev = drm->dev_private;
42 	struct komeda_kms_dev *kms = to_kdev(drm);
43 	struct komeda_events evts;
44 	irqreturn_t status;
45 	u32 i;
46 
47 	/* Call into the CHIP to recognize events */
48 	memset(&evts, 0, sizeof(evts));
49 	status = mdev->funcs->irq_handler(mdev, &evts);
50 
51 	komeda_print_events(&evts, drm);
52 
53 	/* Notify the crtc to handle the events */
54 	for (i = 0; i < kms->n_crtcs; i++)
55 		komeda_crtc_handle_event(&kms->crtcs[i], &evts);
56 
57 	return status;
58 }
59 
60 static const struct drm_driver komeda_kms_driver = {
61 	.driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC,
62 	.lastclose			= drm_fb_helper_lastclose,
63 	DRM_GEM_CMA_DRIVER_OPS_WITH_DUMB_CREATE(komeda_gem_cma_dumb_create),
64 	.fops = &komeda_cma_fops,
65 	.name = "komeda",
66 	.desc = "Arm Komeda Display Processor driver",
67 	.date = "20181101",
68 	.major = 0,
69 	.minor = 1,
70 };
71 
72 static void komeda_kms_commit_tail(struct drm_atomic_state *old_state)
73 {
74 	struct drm_device *dev = old_state->dev;
75 	bool fence_cookie = dma_fence_begin_signalling();
76 
77 	drm_atomic_helper_commit_modeset_disables(dev, old_state);
78 
79 	drm_atomic_helper_commit_planes(dev, old_state,
80 					DRM_PLANE_COMMIT_ACTIVE_ONLY);
81 
82 	drm_atomic_helper_commit_modeset_enables(dev, old_state);
83 
84 	drm_atomic_helper_commit_hw_done(old_state);
85 
86 	drm_atomic_helper_wait_for_flip_done(dev, old_state);
87 
88 	dma_fence_end_signalling(fence_cookie);
89 
90 	drm_atomic_helper_cleanup_planes(dev, old_state);
91 }
92 
93 static const struct drm_mode_config_helper_funcs komeda_mode_config_helpers = {
94 	.atomic_commit_tail = komeda_kms_commit_tail,
95 };
96 
97 static int komeda_plane_state_list_add(struct drm_plane_state *plane_st,
98 				       struct list_head *zorder_list)
99 {
100 	struct komeda_plane_state *new = to_kplane_st(plane_st);
101 	struct komeda_plane_state *node, *last;
102 
103 	last = list_empty(zorder_list) ?
104 	       NULL : list_last_entry(zorder_list, typeof(*last), zlist_node);
105 
106 	/* Considering the list sequence is zpos increasing, so if list is empty
107 	 * or the zpos of new node bigger than the last node in list, no need
108 	 * loop and just insert the new one to the tail of the list.
109 	 */
110 	if (!last || (new->base.zpos > last->base.zpos)) {
111 		list_add_tail(&new->zlist_node, zorder_list);
112 		return 0;
113 	}
114 
115 	/* Build the list by zpos increasing */
116 	list_for_each_entry(node, zorder_list, zlist_node) {
117 		if (new->base.zpos < node->base.zpos) {
118 			list_add_tail(&new->zlist_node, &node->zlist_node);
119 			break;
120 		} else if (node->base.zpos == new->base.zpos) {
121 			struct drm_plane *a = node->base.plane;
122 			struct drm_plane *b = new->base.plane;
123 
124 			/* Komeda doesn't support setting a same zpos for
125 			 * different planes.
126 			 */
127 			DRM_DEBUG_ATOMIC("PLANE: %s and PLANE: %s are configured same zpos: %d.\n",
128 					 a->name, b->name, node->base.zpos);
129 			return -EINVAL;
130 		}
131 	}
132 
133 	return 0;
134 }
135 
136 static int komeda_crtc_normalize_zpos(struct drm_crtc *crtc,
137 				      struct drm_crtc_state *crtc_st)
138 {
139 	struct drm_atomic_state *state = crtc_st->state;
140 	struct komeda_crtc *kcrtc = to_kcrtc(crtc);
141 	struct komeda_crtc_state *kcrtc_st = to_kcrtc_st(crtc_st);
142 	struct komeda_plane_state *kplane_st;
143 	struct drm_plane_state *plane_st;
144 	struct drm_plane *plane;
145 	struct list_head zorder_list;
146 	int order = 0, err;
147 
148 	DRM_DEBUG_ATOMIC("[CRTC:%d:%s] calculating normalized zpos values\n",
149 			 crtc->base.id, crtc->name);
150 
151 	INIT_LIST_HEAD(&zorder_list);
152 
153 	/* This loop also added all effected planes into the new state */
154 	drm_for_each_plane_mask(plane, crtc->dev, crtc_st->plane_mask) {
155 		plane_st = drm_atomic_get_plane_state(state, plane);
156 		if (IS_ERR(plane_st))
157 			return PTR_ERR(plane_st);
158 
159 		/* Build a list by zpos increasing */
160 		err = komeda_plane_state_list_add(plane_st, &zorder_list);
161 		if (err)
162 			return err;
163 	}
164 
165 	kcrtc_st->max_slave_zorder = 0;
166 
167 	list_for_each_entry(kplane_st, &zorder_list, zlist_node) {
168 		plane_st = &kplane_st->base;
169 		plane = plane_st->plane;
170 
171 		plane_st->normalized_zpos = order++;
172 		/* When layer_split has been enabled, one plane will be handled
173 		 * by two separated komeda layers (left/right), which may needs
174 		 * two zorders.
175 		 * - zorder: for left_layer for left display part.
176 		 * - zorder + 1: will be reserved for right layer.
177 		 */
178 		if (to_kplane_st(plane_st)->layer_split)
179 			order++;
180 
181 		DRM_DEBUG_ATOMIC("[PLANE:%d:%s] zpos:%d, normalized zpos: %d\n",
182 				 plane->base.id, plane->name,
183 				 plane_st->zpos, plane_st->normalized_zpos);
184 
185 		/* calculate max slave zorder */
186 		if (has_bit(drm_plane_index(plane), kcrtc->slave_planes))
187 			kcrtc_st->max_slave_zorder =
188 				max(plane_st->normalized_zpos,
189 				    kcrtc_st->max_slave_zorder);
190 	}
191 
192 	crtc_st->zpos_changed = true;
193 
194 	return 0;
195 }
196 
197 static int komeda_kms_check(struct drm_device *dev,
198 			    struct drm_atomic_state *state)
199 {
200 	struct drm_crtc *crtc;
201 	struct drm_crtc_state *new_crtc_st;
202 	int i, err;
203 
204 	err = drm_atomic_helper_check_modeset(dev, state);
205 	if (err)
206 		return err;
207 
208 	/* Komeda need to re-calculate resource assumption in every commit
209 	 * so need to add all affected_planes (even unchanged) to
210 	 * drm_atomic_state.
211 	 */
212 	for_each_new_crtc_in_state(state, crtc, new_crtc_st, i) {
213 		err = drm_atomic_add_affected_planes(state, crtc);
214 		if (err)
215 			return err;
216 
217 		err = komeda_crtc_normalize_zpos(crtc, new_crtc_st);
218 		if (err)
219 			return err;
220 	}
221 
222 	err = drm_atomic_helper_check_planes(dev, state);
223 	if (err)
224 		return err;
225 
226 	return 0;
227 }
228 
229 static const struct drm_mode_config_funcs komeda_mode_config_funcs = {
230 	.fb_create		= komeda_fb_create,
231 	.atomic_check		= komeda_kms_check,
232 	.atomic_commit		= drm_atomic_helper_commit,
233 };
234 
235 static void komeda_kms_mode_config_init(struct komeda_kms_dev *kms,
236 					struct komeda_dev *mdev)
237 {
238 	struct drm_mode_config *config = &kms->base.mode_config;
239 
240 	drm_mode_config_init(&kms->base);
241 
242 	komeda_kms_setup_crtcs(kms, mdev);
243 
244 	/* Get value from dev */
245 	config->min_width	= 0;
246 	config->min_height	= 0;
247 	config->max_width	= 4096;
248 	config->max_height	= 4096;
249 
250 	config->funcs = &komeda_mode_config_funcs;
251 	config->helper_private = &komeda_mode_config_helpers;
252 }
253 
254 struct komeda_kms_dev *komeda_kms_attach(struct komeda_dev *mdev)
255 {
256 	struct komeda_kms_dev *kms;
257 	struct drm_device *drm;
258 	int err;
259 
260 	kms = devm_drm_dev_alloc(mdev->dev, &komeda_kms_driver,
261 				 struct komeda_kms_dev, base);
262 	if (IS_ERR(kms))
263 		return kms;
264 
265 	drm = &kms->base;
266 
267 	drm->dev_private = mdev;
268 
269 	komeda_kms_mode_config_init(kms, mdev);
270 
271 	err = komeda_kms_add_private_objs(kms, mdev);
272 	if (err)
273 		goto cleanup_mode_config;
274 
275 	err = komeda_kms_add_planes(kms, mdev);
276 	if (err)
277 		goto cleanup_mode_config;
278 
279 	err = drm_vblank_init(drm, kms->n_crtcs);
280 	if (err)
281 		goto cleanup_mode_config;
282 
283 	err = komeda_kms_add_crtcs(kms, mdev);
284 	if (err)
285 		goto cleanup_mode_config;
286 
287 	err = komeda_kms_add_wb_connectors(kms, mdev);
288 	if (err)
289 		goto cleanup_mode_config;
290 
291 	err = component_bind_all(mdev->dev, kms);
292 	if (err)
293 		goto cleanup_mode_config;
294 
295 	drm_mode_config_reset(drm);
296 
297 	err = devm_request_irq(drm->dev, mdev->irq,
298 			       komeda_kms_irq_handler, IRQF_SHARED,
299 			       drm->driver->name, drm);
300 	if (err)
301 		goto free_component_binding;
302 
303 	drm_kms_helper_poll_init(drm);
304 
305 	err = drm_dev_register(drm, 0);
306 	if (err)
307 		goto free_interrupts;
308 
309 	return kms;
310 
311 free_interrupts:
312 	drm_kms_helper_poll_fini(drm);
313 free_component_binding:
314 	component_unbind_all(mdev->dev, drm);
315 cleanup_mode_config:
316 	drm_mode_config_cleanup(drm);
317 	komeda_kms_cleanup_private_objs(kms);
318 	drm->dev_private = NULL;
319 	return ERR_PTR(err);
320 }
321 
322 void komeda_kms_detach(struct komeda_kms_dev *kms)
323 {
324 	struct drm_device *drm = &kms->base;
325 	struct komeda_dev *mdev = drm->dev_private;
326 
327 	drm_dev_unregister(drm);
328 	drm_kms_helper_poll_fini(drm);
329 	drm_atomic_helper_shutdown(drm);
330 	component_unbind_all(mdev->dev, drm);
331 	drm_mode_config_cleanup(drm);
332 	komeda_kms_cleanup_private_objs(kms);
333 	drm->dev_private = NULL;
334 }
335