xref: /linux/drivers/gpu/drm/msm/msm_gem.c (revision d2912cb15bdda8ba4a5dd73396ad62641af2f520)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2013 Red Hat
4  * Author: Rob Clark <robdclark@gmail.com>
5  */
6 
7 #include <linux/spinlock.h>
8 #include <linux/shmem_fs.h>
9 #include <linux/dma-buf.h>
10 #include <linux/pfn_t.h>
11 
12 #include "msm_drv.h"
13 #include "msm_fence.h"
14 #include "msm_gem.h"
15 #include "msm_gpu.h"
16 #include "msm_mmu.h"
17 
18 static void msm_gem_vunmap_locked(struct drm_gem_object *obj);
19 
20 
21 static dma_addr_t physaddr(struct drm_gem_object *obj)
22 {
23 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
24 	struct msm_drm_private *priv = obj->dev->dev_private;
25 	return (((dma_addr_t)msm_obj->vram_node->start) << PAGE_SHIFT) +
26 			priv->vram.paddr;
27 }
28 
29 static bool use_pages(struct drm_gem_object *obj)
30 {
31 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
32 	return !msm_obj->vram_node;
33 }
34 
35 /* allocate pages from VRAM carveout, used when no IOMMU: */
36 static struct page **get_pages_vram(struct drm_gem_object *obj, int npages)
37 {
38 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
39 	struct msm_drm_private *priv = obj->dev->dev_private;
40 	dma_addr_t paddr;
41 	struct page **p;
42 	int ret, i;
43 
44 	p = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
45 	if (!p)
46 		return ERR_PTR(-ENOMEM);
47 
48 	spin_lock(&priv->vram.lock);
49 	ret = drm_mm_insert_node(&priv->vram.mm, msm_obj->vram_node, npages);
50 	spin_unlock(&priv->vram.lock);
51 	if (ret) {
52 		kvfree(p);
53 		return ERR_PTR(ret);
54 	}
55 
56 	paddr = physaddr(obj);
57 	for (i = 0; i < npages; i++) {
58 		p[i] = phys_to_page(paddr);
59 		paddr += PAGE_SIZE;
60 	}
61 
62 	return p;
63 }
64 
65 static struct page **get_pages(struct drm_gem_object *obj)
66 {
67 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
68 
69 	if (!msm_obj->pages) {
70 		struct drm_device *dev = obj->dev;
71 		struct page **p;
72 		int npages = obj->size >> PAGE_SHIFT;
73 
74 		if (use_pages(obj))
75 			p = drm_gem_get_pages(obj);
76 		else
77 			p = get_pages_vram(obj, npages);
78 
79 		if (IS_ERR(p)) {
80 			DRM_DEV_ERROR(dev->dev, "could not get pages: %ld\n",
81 					PTR_ERR(p));
82 			return p;
83 		}
84 
85 		msm_obj->pages = p;
86 
87 		msm_obj->sgt = drm_prime_pages_to_sg(p, npages);
88 		if (IS_ERR(msm_obj->sgt)) {
89 			void *ptr = ERR_CAST(msm_obj->sgt);
90 
91 			DRM_DEV_ERROR(dev->dev, "failed to allocate sgt\n");
92 			msm_obj->sgt = NULL;
93 			return ptr;
94 		}
95 
96 		/* For non-cached buffers, ensure the new pages are clean
97 		 * because display controller, GPU, etc. are not coherent:
98 		 */
99 		if (msm_obj->flags & (MSM_BO_WC|MSM_BO_UNCACHED))
100 			dma_map_sg(dev->dev, msm_obj->sgt->sgl,
101 					msm_obj->sgt->nents, DMA_BIDIRECTIONAL);
102 	}
103 
104 	return msm_obj->pages;
105 }
106 
107 static void put_pages_vram(struct drm_gem_object *obj)
108 {
109 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
110 	struct msm_drm_private *priv = obj->dev->dev_private;
111 
112 	spin_lock(&priv->vram.lock);
113 	drm_mm_remove_node(msm_obj->vram_node);
114 	spin_unlock(&priv->vram.lock);
115 
116 	kvfree(msm_obj->pages);
117 }
118 
119 static void put_pages(struct drm_gem_object *obj)
120 {
121 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
122 
123 	if (msm_obj->pages) {
124 		if (msm_obj->sgt) {
125 			/* For non-cached buffers, ensure the new
126 			 * pages are clean because display controller,
127 			 * GPU, etc. are not coherent:
128 			 */
129 			if (msm_obj->flags & (MSM_BO_WC|MSM_BO_UNCACHED))
130 				dma_unmap_sg(obj->dev->dev, msm_obj->sgt->sgl,
131 					     msm_obj->sgt->nents,
132 					     DMA_BIDIRECTIONAL);
133 
134 			sg_free_table(msm_obj->sgt);
135 			kfree(msm_obj->sgt);
136 		}
137 
138 		if (use_pages(obj))
139 			drm_gem_put_pages(obj, msm_obj->pages, true, false);
140 		else
141 			put_pages_vram(obj);
142 
143 		msm_obj->pages = NULL;
144 	}
145 }
146 
147 struct page **msm_gem_get_pages(struct drm_gem_object *obj)
148 {
149 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
150 	struct page **p;
151 
152 	mutex_lock(&msm_obj->lock);
153 
154 	if (WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED)) {
155 		mutex_unlock(&msm_obj->lock);
156 		return ERR_PTR(-EBUSY);
157 	}
158 
159 	p = get_pages(obj);
160 	mutex_unlock(&msm_obj->lock);
161 	return p;
162 }
163 
164 void msm_gem_put_pages(struct drm_gem_object *obj)
165 {
166 	/* when we start tracking the pin count, then do something here */
167 }
168 
169 int msm_gem_mmap_obj(struct drm_gem_object *obj,
170 		struct vm_area_struct *vma)
171 {
172 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
173 
174 	vma->vm_flags &= ~VM_PFNMAP;
175 	vma->vm_flags |= VM_MIXEDMAP;
176 
177 	if (msm_obj->flags & MSM_BO_WC) {
178 		vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
179 	} else if (msm_obj->flags & MSM_BO_UNCACHED) {
180 		vma->vm_page_prot = pgprot_noncached(vm_get_page_prot(vma->vm_flags));
181 	} else {
182 		/*
183 		 * Shunt off cached objs to shmem file so they have their own
184 		 * address_space (so unmap_mapping_range does what we want,
185 		 * in particular in the case of mmap'd dmabufs)
186 		 */
187 		fput(vma->vm_file);
188 		get_file(obj->filp);
189 		vma->vm_pgoff = 0;
190 		vma->vm_file  = obj->filp;
191 
192 		vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
193 	}
194 
195 	return 0;
196 }
197 
198 int msm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
199 {
200 	int ret;
201 
202 	ret = drm_gem_mmap(filp, vma);
203 	if (ret) {
204 		DBG("mmap failed: %d", ret);
205 		return ret;
206 	}
207 
208 	return msm_gem_mmap_obj(vma->vm_private_data, vma);
209 }
210 
211 vm_fault_t msm_gem_fault(struct vm_fault *vmf)
212 {
213 	struct vm_area_struct *vma = vmf->vma;
214 	struct drm_gem_object *obj = vma->vm_private_data;
215 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
216 	struct page **pages;
217 	unsigned long pfn;
218 	pgoff_t pgoff;
219 	int err;
220 	vm_fault_t ret;
221 
222 	/*
223 	 * vm_ops.open/drm_gem_mmap_obj and close get and put
224 	 * a reference on obj. So, we dont need to hold one here.
225 	 */
226 	err = mutex_lock_interruptible(&msm_obj->lock);
227 	if (err) {
228 		ret = VM_FAULT_NOPAGE;
229 		goto out;
230 	}
231 
232 	if (WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED)) {
233 		mutex_unlock(&msm_obj->lock);
234 		return VM_FAULT_SIGBUS;
235 	}
236 
237 	/* make sure we have pages attached now */
238 	pages = get_pages(obj);
239 	if (IS_ERR(pages)) {
240 		ret = vmf_error(PTR_ERR(pages));
241 		goto out_unlock;
242 	}
243 
244 	/* We don't use vmf->pgoff since that has the fake offset: */
245 	pgoff = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
246 
247 	pfn = page_to_pfn(pages[pgoff]);
248 
249 	VERB("Inserting %p pfn %lx, pa %lx", (void *)vmf->address,
250 			pfn, pfn << PAGE_SHIFT);
251 
252 	ret = vmf_insert_mixed(vma, vmf->address, __pfn_to_pfn_t(pfn, PFN_DEV));
253 out_unlock:
254 	mutex_unlock(&msm_obj->lock);
255 out:
256 	return ret;
257 }
258 
259 /** get mmap offset */
260 static uint64_t mmap_offset(struct drm_gem_object *obj)
261 {
262 	struct drm_device *dev = obj->dev;
263 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
264 	int ret;
265 
266 	WARN_ON(!mutex_is_locked(&msm_obj->lock));
267 
268 	/* Make it mmapable */
269 	ret = drm_gem_create_mmap_offset(obj);
270 
271 	if (ret) {
272 		DRM_DEV_ERROR(dev->dev, "could not allocate mmap offset\n");
273 		return 0;
274 	}
275 
276 	return drm_vma_node_offset_addr(&obj->vma_node);
277 }
278 
279 uint64_t msm_gem_mmap_offset(struct drm_gem_object *obj)
280 {
281 	uint64_t offset;
282 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
283 
284 	mutex_lock(&msm_obj->lock);
285 	offset = mmap_offset(obj);
286 	mutex_unlock(&msm_obj->lock);
287 	return offset;
288 }
289 
290 static struct msm_gem_vma *add_vma(struct drm_gem_object *obj,
291 		struct msm_gem_address_space *aspace)
292 {
293 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
294 	struct msm_gem_vma *vma;
295 
296 	WARN_ON(!mutex_is_locked(&msm_obj->lock));
297 
298 	vma = kzalloc(sizeof(*vma), GFP_KERNEL);
299 	if (!vma)
300 		return ERR_PTR(-ENOMEM);
301 
302 	vma->aspace = aspace;
303 
304 	list_add_tail(&vma->list, &msm_obj->vmas);
305 
306 	return vma;
307 }
308 
309 static struct msm_gem_vma *lookup_vma(struct drm_gem_object *obj,
310 		struct msm_gem_address_space *aspace)
311 {
312 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
313 	struct msm_gem_vma *vma;
314 
315 	WARN_ON(!mutex_is_locked(&msm_obj->lock));
316 
317 	list_for_each_entry(vma, &msm_obj->vmas, list) {
318 		if (vma->aspace == aspace)
319 			return vma;
320 	}
321 
322 	return NULL;
323 }
324 
325 static void del_vma(struct msm_gem_vma *vma)
326 {
327 	if (!vma)
328 		return;
329 
330 	list_del(&vma->list);
331 	kfree(vma);
332 }
333 
334 /* Called with msm_obj->lock locked */
335 static void
336 put_iova(struct drm_gem_object *obj)
337 {
338 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
339 	struct msm_gem_vma *vma, *tmp;
340 
341 	WARN_ON(!mutex_is_locked(&msm_obj->lock));
342 
343 	list_for_each_entry_safe(vma, tmp, &msm_obj->vmas, list) {
344 		msm_gem_purge_vma(vma->aspace, vma);
345 		msm_gem_close_vma(vma->aspace, vma);
346 		del_vma(vma);
347 	}
348 }
349 
350 static int msm_gem_get_iova_locked(struct drm_gem_object *obj,
351 		struct msm_gem_address_space *aspace, uint64_t *iova)
352 {
353 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
354 	struct msm_gem_vma *vma;
355 	int ret = 0;
356 
357 	WARN_ON(!mutex_is_locked(&msm_obj->lock));
358 
359 	vma = lookup_vma(obj, aspace);
360 
361 	if (!vma) {
362 		vma = add_vma(obj, aspace);
363 		if (IS_ERR(vma))
364 			return PTR_ERR(vma);
365 
366 		ret = msm_gem_init_vma(aspace, vma, obj->size >> PAGE_SHIFT);
367 		if (ret) {
368 			del_vma(vma);
369 			return ret;
370 		}
371 	}
372 
373 	*iova = vma->iova;
374 	return 0;
375 }
376 
377 static int msm_gem_pin_iova(struct drm_gem_object *obj,
378 		struct msm_gem_address_space *aspace)
379 {
380 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
381 	struct msm_gem_vma *vma;
382 	struct page **pages;
383 	int prot = IOMMU_READ;
384 
385 	if (!(msm_obj->flags & MSM_BO_GPU_READONLY))
386 		prot |= IOMMU_WRITE;
387 
388 	WARN_ON(!mutex_is_locked(&msm_obj->lock));
389 
390 	if (WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED))
391 		return -EBUSY;
392 
393 	vma = lookup_vma(obj, aspace);
394 	if (WARN_ON(!vma))
395 		return -EINVAL;
396 
397 	pages = get_pages(obj);
398 	if (IS_ERR(pages))
399 		return PTR_ERR(pages);
400 
401 	return msm_gem_map_vma(aspace, vma, prot,
402 			msm_obj->sgt, obj->size >> PAGE_SHIFT);
403 }
404 
405 /* get iova and pin it. Should have a matching put */
406 int msm_gem_get_and_pin_iova(struct drm_gem_object *obj,
407 		struct msm_gem_address_space *aspace, uint64_t *iova)
408 {
409 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
410 	u64 local;
411 	int ret;
412 
413 	mutex_lock(&msm_obj->lock);
414 
415 	ret = msm_gem_get_iova_locked(obj, aspace, &local);
416 
417 	if (!ret)
418 		ret = msm_gem_pin_iova(obj, aspace);
419 
420 	if (!ret)
421 		*iova = local;
422 
423 	mutex_unlock(&msm_obj->lock);
424 	return ret;
425 }
426 
427 /*
428  * Get an iova but don't pin it. Doesn't need a put because iovas are currently
429  * valid for the life of the object
430  */
431 int msm_gem_get_iova(struct drm_gem_object *obj,
432 		struct msm_gem_address_space *aspace, uint64_t *iova)
433 {
434 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
435 	int ret;
436 
437 	mutex_lock(&msm_obj->lock);
438 	ret = msm_gem_get_iova_locked(obj, aspace, iova);
439 	mutex_unlock(&msm_obj->lock);
440 
441 	return ret;
442 }
443 
444 /* get iova without taking a reference, used in places where you have
445  * already done a 'msm_gem_get_and_pin_iova' or 'msm_gem_get_iova'
446  */
447 uint64_t msm_gem_iova(struct drm_gem_object *obj,
448 		struct msm_gem_address_space *aspace)
449 {
450 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
451 	struct msm_gem_vma *vma;
452 
453 	mutex_lock(&msm_obj->lock);
454 	vma = lookup_vma(obj, aspace);
455 	mutex_unlock(&msm_obj->lock);
456 	WARN_ON(!vma);
457 
458 	return vma ? vma->iova : 0;
459 }
460 
461 /*
462  * Unpin a iova by updating the reference counts. The memory isn't actually
463  * purged until something else (shrinker, mm_notifier, destroy, etc) decides
464  * to get rid of it
465  */
466 void msm_gem_unpin_iova(struct drm_gem_object *obj,
467 		struct msm_gem_address_space *aspace)
468 {
469 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
470 	struct msm_gem_vma *vma;
471 
472 	mutex_lock(&msm_obj->lock);
473 	vma = lookup_vma(obj, aspace);
474 
475 	if (!WARN_ON(!vma))
476 		msm_gem_unmap_vma(aspace, vma);
477 
478 	mutex_unlock(&msm_obj->lock);
479 }
480 
481 int msm_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
482 		struct drm_mode_create_dumb *args)
483 {
484 	args->pitch = align_pitch(args->width, args->bpp);
485 	args->size  = PAGE_ALIGN(args->pitch * args->height);
486 	return msm_gem_new_handle(dev, file, args->size,
487 			MSM_BO_SCANOUT | MSM_BO_WC, &args->handle, "dumb");
488 }
489 
490 int msm_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev,
491 		uint32_t handle, uint64_t *offset)
492 {
493 	struct drm_gem_object *obj;
494 	int ret = 0;
495 
496 	/* GEM does all our handle to object mapping */
497 	obj = drm_gem_object_lookup(file, handle);
498 	if (obj == NULL) {
499 		ret = -ENOENT;
500 		goto fail;
501 	}
502 
503 	*offset = msm_gem_mmap_offset(obj);
504 
505 	drm_gem_object_put_unlocked(obj);
506 
507 fail:
508 	return ret;
509 }
510 
511 static void *get_vaddr(struct drm_gem_object *obj, unsigned madv)
512 {
513 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
514 	int ret = 0;
515 
516 	mutex_lock(&msm_obj->lock);
517 
518 	if (WARN_ON(msm_obj->madv > madv)) {
519 		DRM_DEV_ERROR(obj->dev->dev, "Invalid madv state: %u vs %u\n",
520 			msm_obj->madv, madv);
521 		mutex_unlock(&msm_obj->lock);
522 		return ERR_PTR(-EBUSY);
523 	}
524 
525 	/* increment vmap_count *before* vmap() call, so shrinker can
526 	 * check vmap_count (is_vunmapable()) outside of msm_obj->lock.
527 	 * This guarantees that we won't try to msm_gem_vunmap() this
528 	 * same object from within the vmap() call (while we already
529 	 * hold msm_obj->lock)
530 	 */
531 	msm_obj->vmap_count++;
532 
533 	if (!msm_obj->vaddr) {
534 		struct page **pages = get_pages(obj);
535 		if (IS_ERR(pages)) {
536 			ret = PTR_ERR(pages);
537 			goto fail;
538 		}
539 		msm_obj->vaddr = vmap(pages, obj->size >> PAGE_SHIFT,
540 				VM_MAP, pgprot_writecombine(PAGE_KERNEL));
541 		if (msm_obj->vaddr == NULL) {
542 			ret = -ENOMEM;
543 			goto fail;
544 		}
545 	}
546 
547 	mutex_unlock(&msm_obj->lock);
548 	return msm_obj->vaddr;
549 
550 fail:
551 	msm_obj->vmap_count--;
552 	mutex_unlock(&msm_obj->lock);
553 	return ERR_PTR(ret);
554 }
555 
556 void *msm_gem_get_vaddr(struct drm_gem_object *obj)
557 {
558 	return get_vaddr(obj, MSM_MADV_WILLNEED);
559 }
560 
561 /*
562  * Don't use this!  It is for the very special case of dumping
563  * submits from GPU hangs or faults, were the bo may already
564  * be MSM_MADV_DONTNEED, but we know the buffer is still on the
565  * active list.
566  */
567 void *msm_gem_get_vaddr_active(struct drm_gem_object *obj)
568 {
569 	return get_vaddr(obj, __MSM_MADV_PURGED);
570 }
571 
572 void msm_gem_put_vaddr(struct drm_gem_object *obj)
573 {
574 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
575 
576 	mutex_lock(&msm_obj->lock);
577 	WARN_ON(msm_obj->vmap_count < 1);
578 	msm_obj->vmap_count--;
579 	mutex_unlock(&msm_obj->lock);
580 }
581 
582 /* Update madvise status, returns true if not purged, else
583  * false or -errno.
584  */
585 int msm_gem_madvise(struct drm_gem_object *obj, unsigned madv)
586 {
587 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
588 
589 	mutex_lock(&msm_obj->lock);
590 
591 	WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex));
592 
593 	if (msm_obj->madv != __MSM_MADV_PURGED)
594 		msm_obj->madv = madv;
595 
596 	madv = msm_obj->madv;
597 
598 	mutex_unlock(&msm_obj->lock);
599 
600 	return (madv != __MSM_MADV_PURGED);
601 }
602 
603 void msm_gem_purge(struct drm_gem_object *obj, enum msm_gem_lock subclass)
604 {
605 	struct drm_device *dev = obj->dev;
606 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
607 
608 	WARN_ON(!mutex_is_locked(&dev->struct_mutex));
609 	WARN_ON(!is_purgeable(msm_obj));
610 	WARN_ON(obj->import_attach);
611 
612 	mutex_lock_nested(&msm_obj->lock, subclass);
613 
614 	put_iova(obj);
615 
616 	msm_gem_vunmap_locked(obj);
617 
618 	put_pages(obj);
619 
620 	msm_obj->madv = __MSM_MADV_PURGED;
621 
622 	drm_vma_node_unmap(&obj->vma_node, dev->anon_inode->i_mapping);
623 	drm_gem_free_mmap_offset(obj);
624 
625 	/* Our goal here is to return as much of the memory as
626 	 * is possible back to the system as we are called from OOM.
627 	 * To do this we must instruct the shmfs to drop all of its
628 	 * backing pages, *now*.
629 	 */
630 	shmem_truncate_range(file_inode(obj->filp), 0, (loff_t)-1);
631 
632 	invalidate_mapping_pages(file_inode(obj->filp)->i_mapping,
633 			0, (loff_t)-1);
634 
635 	mutex_unlock(&msm_obj->lock);
636 }
637 
638 static void msm_gem_vunmap_locked(struct drm_gem_object *obj)
639 {
640 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
641 
642 	WARN_ON(!mutex_is_locked(&msm_obj->lock));
643 
644 	if (!msm_obj->vaddr || WARN_ON(!is_vunmapable(msm_obj)))
645 		return;
646 
647 	vunmap(msm_obj->vaddr);
648 	msm_obj->vaddr = NULL;
649 }
650 
651 void msm_gem_vunmap(struct drm_gem_object *obj, enum msm_gem_lock subclass)
652 {
653 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
654 
655 	mutex_lock_nested(&msm_obj->lock, subclass);
656 	msm_gem_vunmap_locked(obj);
657 	mutex_unlock(&msm_obj->lock);
658 }
659 
660 /* must be called before _move_to_active().. */
661 int msm_gem_sync_object(struct drm_gem_object *obj,
662 		struct msm_fence_context *fctx, bool exclusive)
663 {
664 	struct reservation_object_list *fobj;
665 	struct dma_fence *fence;
666 	int i, ret;
667 
668 	fobj = reservation_object_get_list(obj->resv);
669 	if (!fobj || (fobj->shared_count == 0)) {
670 		fence = reservation_object_get_excl(obj->resv);
671 		/* don't need to wait on our own fences, since ring is fifo */
672 		if (fence && (fence->context != fctx->context)) {
673 			ret = dma_fence_wait(fence, true);
674 			if (ret)
675 				return ret;
676 		}
677 	}
678 
679 	if (!exclusive || !fobj)
680 		return 0;
681 
682 	for (i = 0; i < fobj->shared_count; i++) {
683 		fence = rcu_dereference_protected(fobj->shared[i],
684 						reservation_object_held(obj->resv));
685 		if (fence->context != fctx->context) {
686 			ret = dma_fence_wait(fence, true);
687 			if (ret)
688 				return ret;
689 		}
690 	}
691 
692 	return 0;
693 }
694 
695 void msm_gem_move_to_active(struct drm_gem_object *obj,
696 		struct msm_gpu *gpu, bool exclusive, struct dma_fence *fence)
697 {
698 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
699 	WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED);
700 	msm_obj->gpu = gpu;
701 	if (exclusive)
702 		reservation_object_add_excl_fence(obj->resv, fence);
703 	else
704 		reservation_object_add_shared_fence(obj->resv, fence);
705 	list_del_init(&msm_obj->mm_list);
706 	list_add_tail(&msm_obj->mm_list, &gpu->active_list);
707 }
708 
709 void msm_gem_move_to_inactive(struct drm_gem_object *obj)
710 {
711 	struct drm_device *dev = obj->dev;
712 	struct msm_drm_private *priv = dev->dev_private;
713 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
714 
715 	WARN_ON(!mutex_is_locked(&dev->struct_mutex));
716 
717 	msm_obj->gpu = NULL;
718 	list_del_init(&msm_obj->mm_list);
719 	list_add_tail(&msm_obj->mm_list, &priv->inactive_list);
720 }
721 
722 int msm_gem_cpu_prep(struct drm_gem_object *obj, uint32_t op, ktime_t *timeout)
723 {
724 	bool write = !!(op & MSM_PREP_WRITE);
725 	unsigned long remain =
726 		op & MSM_PREP_NOSYNC ? 0 : timeout_to_jiffies(timeout);
727 	long ret;
728 
729 	ret = reservation_object_wait_timeout_rcu(obj->resv, write,
730 						  true,  remain);
731 	if (ret == 0)
732 		return remain == 0 ? -EBUSY : -ETIMEDOUT;
733 	else if (ret < 0)
734 		return ret;
735 
736 	/* TODO cache maintenance */
737 
738 	return 0;
739 }
740 
741 int msm_gem_cpu_fini(struct drm_gem_object *obj)
742 {
743 	/* TODO cache maintenance */
744 	return 0;
745 }
746 
747 #ifdef CONFIG_DEBUG_FS
748 static void describe_fence(struct dma_fence *fence, const char *type,
749 		struct seq_file *m)
750 {
751 	if (!dma_fence_is_signaled(fence))
752 		seq_printf(m, "\t%9s: %s %s seq %llu\n", type,
753 				fence->ops->get_driver_name(fence),
754 				fence->ops->get_timeline_name(fence),
755 				fence->seqno);
756 }
757 
758 void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m)
759 {
760 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
761 	struct reservation_object *robj = obj->resv;
762 	struct reservation_object_list *fobj;
763 	struct dma_fence *fence;
764 	struct msm_gem_vma *vma;
765 	uint64_t off = drm_vma_node_start(&obj->vma_node);
766 	const char *madv;
767 
768 	mutex_lock(&msm_obj->lock);
769 
770 	switch (msm_obj->madv) {
771 	case __MSM_MADV_PURGED:
772 		madv = " purged";
773 		break;
774 	case MSM_MADV_DONTNEED:
775 		madv = " purgeable";
776 		break;
777 	case MSM_MADV_WILLNEED:
778 	default:
779 		madv = "";
780 		break;
781 	}
782 
783 	seq_printf(m, "%08x: %c %2d (%2d) %08llx %p",
784 			msm_obj->flags, is_active(msm_obj) ? 'A' : 'I',
785 			obj->name, kref_read(&obj->refcount),
786 			off, msm_obj->vaddr);
787 
788 	seq_printf(m, " %08zu %9s %-32s\n", obj->size, madv, msm_obj->name);
789 
790 	if (!list_empty(&msm_obj->vmas)) {
791 
792 		seq_puts(m, "      vmas:");
793 
794 		list_for_each_entry(vma, &msm_obj->vmas, list)
795 			seq_printf(m, " [%s: %08llx,%s,inuse=%d]",
796 				vma->aspace != NULL ? vma->aspace->name : NULL,
797 				vma->iova, vma->mapped ? "mapped" : "unmapped",
798 				vma->inuse);
799 
800 		seq_puts(m, "\n");
801 	}
802 
803 	rcu_read_lock();
804 	fobj = rcu_dereference(robj->fence);
805 	if (fobj) {
806 		unsigned int i, shared_count = fobj->shared_count;
807 
808 		for (i = 0; i < shared_count; i++) {
809 			fence = rcu_dereference(fobj->shared[i]);
810 			describe_fence(fence, "Shared", m);
811 		}
812 	}
813 
814 	fence = rcu_dereference(robj->fence_excl);
815 	if (fence)
816 		describe_fence(fence, "Exclusive", m);
817 	rcu_read_unlock();
818 
819 	mutex_unlock(&msm_obj->lock);
820 }
821 
822 void msm_gem_describe_objects(struct list_head *list, struct seq_file *m)
823 {
824 	struct msm_gem_object *msm_obj;
825 	int count = 0;
826 	size_t size = 0;
827 
828 	seq_puts(m, "   flags       id ref  offset   kaddr            size     madv      name\n");
829 	list_for_each_entry(msm_obj, list, mm_list) {
830 		struct drm_gem_object *obj = &msm_obj->base;
831 		seq_puts(m, "   ");
832 		msm_gem_describe(obj, m);
833 		count++;
834 		size += obj->size;
835 	}
836 
837 	seq_printf(m, "Total %d objects, %zu bytes\n", count, size);
838 }
839 #endif
840 
841 /* don't call directly!  Use drm_gem_object_put() and friends */
842 void msm_gem_free_object(struct drm_gem_object *obj)
843 {
844 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
845 	struct drm_device *dev = obj->dev;
846 	struct msm_drm_private *priv = dev->dev_private;
847 
848 	if (llist_add(&msm_obj->freed, &priv->free_list))
849 		queue_work(priv->wq, &priv->free_work);
850 }
851 
852 static void free_object(struct msm_gem_object *msm_obj)
853 {
854 	struct drm_gem_object *obj = &msm_obj->base;
855 	struct drm_device *dev = obj->dev;
856 
857 	WARN_ON(!mutex_is_locked(&dev->struct_mutex));
858 
859 	/* object should not be on active list: */
860 	WARN_ON(is_active(msm_obj));
861 
862 	list_del(&msm_obj->mm_list);
863 
864 	mutex_lock(&msm_obj->lock);
865 
866 	put_iova(obj);
867 
868 	if (obj->import_attach) {
869 		if (msm_obj->vaddr)
870 			dma_buf_vunmap(obj->import_attach->dmabuf, msm_obj->vaddr);
871 
872 		/* Don't drop the pages for imported dmabuf, as they are not
873 		 * ours, just free the array we allocated:
874 		 */
875 		if (msm_obj->pages)
876 			kvfree(msm_obj->pages);
877 
878 		drm_prime_gem_destroy(obj, msm_obj->sgt);
879 	} else {
880 		msm_gem_vunmap_locked(obj);
881 		put_pages(obj);
882 	}
883 
884 	drm_gem_object_release(obj);
885 
886 	mutex_unlock(&msm_obj->lock);
887 	kfree(msm_obj);
888 }
889 
890 void msm_gem_free_work(struct work_struct *work)
891 {
892 	struct msm_drm_private *priv =
893 		container_of(work, struct msm_drm_private, free_work);
894 	struct drm_device *dev = priv->dev;
895 	struct llist_node *freed;
896 	struct msm_gem_object *msm_obj, *next;
897 
898 	while ((freed = llist_del_all(&priv->free_list))) {
899 
900 		mutex_lock(&dev->struct_mutex);
901 
902 		llist_for_each_entry_safe(msm_obj, next,
903 					  freed, freed)
904 			free_object(msm_obj);
905 
906 		mutex_unlock(&dev->struct_mutex);
907 
908 		if (need_resched())
909 			break;
910 	}
911 }
912 
913 /* convenience method to construct a GEM buffer object, and userspace handle */
914 int msm_gem_new_handle(struct drm_device *dev, struct drm_file *file,
915 		uint32_t size, uint32_t flags, uint32_t *handle,
916 		char *name)
917 {
918 	struct drm_gem_object *obj;
919 	int ret;
920 
921 	obj = msm_gem_new(dev, size, flags);
922 
923 	if (IS_ERR(obj))
924 		return PTR_ERR(obj);
925 
926 	if (name)
927 		msm_gem_object_set_name(obj, "%s", name);
928 
929 	ret = drm_gem_handle_create(file, obj, handle);
930 
931 	/* drop reference from allocate - handle holds it now */
932 	drm_gem_object_put_unlocked(obj);
933 
934 	return ret;
935 }
936 
937 static int msm_gem_new_impl(struct drm_device *dev,
938 		uint32_t size, uint32_t flags,
939 		struct reservation_object *resv,
940 		struct drm_gem_object **obj,
941 		bool struct_mutex_locked)
942 {
943 	struct msm_drm_private *priv = dev->dev_private;
944 	struct msm_gem_object *msm_obj;
945 
946 	switch (flags & MSM_BO_CACHE_MASK) {
947 	case MSM_BO_UNCACHED:
948 	case MSM_BO_CACHED:
949 	case MSM_BO_WC:
950 		break;
951 	default:
952 		DRM_DEV_ERROR(dev->dev, "invalid cache flag: %x\n",
953 				(flags & MSM_BO_CACHE_MASK));
954 		return -EINVAL;
955 	}
956 
957 	msm_obj = kzalloc(sizeof(*msm_obj), GFP_KERNEL);
958 	if (!msm_obj)
959 		return -ENOMEM;
960 
961 	mutex_init(&msm_obj->lock);
962 
963 	msm_obj->flags = flags;
964 	msm_obj->madv = MSM_MADV_WILLNEED;
965 
966 	if (resv)
967 		msm_obj->base.resv = resv;
968 
969 	INIT_LIST_HEAD(&msm_obj->submit_entry);
970 	INIT_LIST_HEAD(&msm_obj->vmas);
971 
972 	if (struct_mutex_locked) {
973 		WARN_ON(!mutex_is_locked(&dev->struct_mutex));
974 		list_add_tail(&msm_obj->mm_list, &priv->inactive_list);
975 	} else {
976 		mutex_lock(&dev->struct_mutex);
977 		list_add_tail(&msm_obj->mm_list, &priv->inactive_list);
978 		mutex_unlock(&dev->struct_mutex);
979 	}
980 
981 	*obj = &msm_obj->base;
982 
983 	return 0;
984 }
985 
986 static struct drm_gem_object *_msm_gem_new(struct drm_device *dev,
987 		uint32_t size, uint32_t flags, bool struct_mutex_locked)
988 {
989 	struct msm_drm_private *priv = dev->dev_private;
990 	struct drm_gem_object *obj = NULL;
991 	bool use_vram = false;
992 	int ret;
993 
994 	size = PAGE_ALIGN(size);
995 
996 	if (!msm_use_mmu(dev))
997 		use_vram = true;
998 	else if ((flags & (MSM_BO_STOLEN | MSM_BO_SCANOUT)) && priv->vram.size)
999 		use_vram = true;
1000 
1001 	if (WARN_ON(use_vram && !priv->vram.size))
1002 		return ERR_PTR(-EINVAL);
1003 
1004 	/* Disallow zero sized objects as they make the underlying
1005 	 * infrastructure grumpy
1006 	 */
1007 	if (size == 0)
1008 		return ERR_PTR(-EINVAL);
1009 
1010 	ret = msm_gem_new_impl(dev, size, flags, NULL, &obj, struct_mutex_locked);
1011 	if (ret)
1012 		goto fail;
1013 
1014 	if (use_vram) {
1015 		struct msm_gem_vma *vma;
1016 		struct page **pages;
1017 		struct msm_gem_object *msm_obj = to_msm_bo(obj);
1018 
1019 		mutex_lock(&msm_obj->lock);
1020 
1021 		vma = add_vma(obj, NULL);
1022 		mutex_unlock(&msm_obj->lock);
1023 		if (IS_ERR(vma)) {
1024 			ret = PTR_ERR(vma);
1025 			goto fail;
1026 		}
1027 
1028 		to_msm_bo(obj)->vram_node = &vma->node;
1029 
1030 		drm_gem_private_object_init(dev, obj, size);
1031 
1032 		pages = get_pages(obj);
1033 		if (IS_ERR(pages)) {
1034 			ret = PTR_ERR(pages);
1035 			goto fail;
1036 		}
1037 
1038 		vma->iova = physaddr(obj);
1039 	} else {
1040 		ret = drm_gem_object_init(dev, obj, size);
1041 		if (ret)
1042 			goto fail;
1043 		/*
1044 		 * Our buffers are kept pinned, so allocating them from the
1045 		 * MOVABLE zone is a really bad idea, and conflicts with CMA.
1046 		 * See comments above new_inode() why this is required _and_
1047 		 * expected if you're going to pin these pages.
1048 		 */
1049 		mapping_set_gfp_mask(obj->filp->f_mapping, GFP_HIGHUSER);
1050 	}
1051 
1052 	return obj;
1053 
1054 fail:
1055 	drm_gem_object_put_unlocked(obj);
1056 	return ERR_PTR(ret);
1057 }
1058 
1059 struct drm_gem_object *msm_gem_new_locked(struct drm_device *dev,
1060 		uint32_t size, uint32_t flags)
1061 {
1062 	return _msm_gem_new(dev, size, flags, true);
1063 }
1064 
1065 struct drm_gem_object *msm_gem_new(struct drm_device *dev,
1066 		uint32_t size, uint32_t flags)
1067 {
1068 	return _msm_gem_new(dev, size, flags, false);
1069 }
1070 
1071 struct drm_gem_object *msm_gem_import(struct drm_device *dev,
1072 		struct dma_buf *dmabuf, struct sg_table *sgt)
1073 {
1074 	struct msm_gem_object *msm_obj;
1075 	struct drm_gem_object *obj;
1076 	uint32_t size;
1077 	int ret, npages;
1078 
1079 	/* if we don't have IOMMU, don't bother pretending we can import: */
1080 	if (!msm_use_mmu(dev)) {
1081 		DRM_DEV_ERROR(dev->dev, "cannot import without IOMMU\n");
1082 		return ERR_PTR(-EINVAL);
1083 	}
1084 
1085 	size = PAGE_ALIGN(dmabuf->size);
1086 
1087 	ret = msm_gem_new_impl(dev, size, MSM_BO_WC, dmabuf->resv, &obj, false);
1088 	if (ret)
1089 		goto fail;
1090 
1091 	drm_gem_private_object_init(dev, obj, size);
1092 
1093 	npages = size / PAGE_SIZE;
1094 
1095 	msm_obj = to_msm_bo(obj);
1096 	mutex_lock(&msm_obj->lock);
1097 	msm_obj->sgt = sgt;
1098 	msm_obj->pages = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
1099 	if (!msm_obj->pages) {
1100 		mutex_unlock(&msm_obj->lock);
1101 		ret = -ENOMEM;
1102 		goto fail;
1103 	}
1104 
1105 	ret = drm_prime_sg_to_page_addr_arrays(sgt, msm_obj->pages, NULL, npages);
1106 	if (ret) {
1107 		mutex_unlock(&msm_obj->lock);
1108 		goto fail;
1109 	}
1110 
1111 	mutex_unlock(&msm_obj->lock);
1112 	return obj;
1113 
1114 fail:
1115 	drm_gem_object_put_unlocked(obj);
1116 	return ERR_PTR(ret);
1117 }
1118 
1119 static void *_msm_gem_kernel_new(struct drm_device *dev, uint32_t size,
1120 		uint32_t flags, struct msm_gem_address_space *aspace,
1121 		struct drm_gem_object **bo, uint64_t *iova, bool locked)
1122 {
1123 	void *vaddr;
1124 	struct drm_gem_object *obj = _msm_gem_new(dev, size, flags, locked);
1125 	int ret;
1126 
1127 	if (IS_ERR(obj))
1128 		return ERR_CAST(obj);
1129 
1130 	if (iova) {
1131 		ret = msm_gem_get_and_pin_iova(obj, aspace, iova);
1132 		if (ret)
1133 			goto err;
1134 	}
1135 
1136 	vaddr = msm_gem_get_vaddr(obj);
1137 	if (IS_ERR(vaddr)) {
1138 		msm_gem_unpin_iova(obj, aspace);
1139 		ret = PTR_ERR(vaddr);
1140 		goto err;
1141 	}
1142 
1143 	if (bo)
1144 		*bo = obj;
1145 
1146 	return vaddr;
1147 err:
1148 	if (locked)
1149 		drm_gem_object_put(obj);
1150 	else
1151 		drm_gem_object_put_unlocked(obj);
1152 
1153 	return ERR_PTR(ret);
1154 
1155 }
1156 
1157 void *msm_gem_kernel_new(struct drm_device *dev, uint32_t size,
1158 		uint32_t flags, struct msm_gem_address_space *aspace,
1159 		struct drm_gem_object **bo, uint64_t *iova)
1160 {
1161 	return _msm_gem_kernel_new(dev, size, flags, aspace, bo, iova, false);
1162 }
1163 
1164 void *msm_gem_kernel_new_locked(struct drm_device *dev, uint32_t size,
1165 		uint32_t flags, struct msm_gem_address_space *aspace,
1166 		struct drm_gem_object **bo, uint64_t *iova)
1167 {
1168 	return _msm_gem_kernel_new(dev, size, flags, aspace, bo, iova, true);
1169 }
1170 
1171 void msm_gem_kernel_put(struct drm_gem_object *bo,
1172 		struct msm_gem_address_space *aspace, bool locked)
1173 {
1174 	if (IS_ERR_OR_NULL(bo))
1175 		return;
1176 
1177 	msm_gem_put_vaddr(bo);
1178 	msm_gem_unpin_iova(bo, aspace);
1179 
1180 	if (locked)
1181 		drm_gem_object_put(bo);
1182 	else
1183 		drm_gem_object_put_unlocked(bo);
1184 }
1185 
1186 void msm_gem_object_set_name(struct drm_gem_object *bo, const char *fmt, ...)
1187 {
1188 	struct msm_gem_object *msm_obj = to_msm_bo(bo);
1189 	va_list ap;
1190 
1191 	if (!fmt)
1192 		return;
1193 
1194 	va_start(ap, fmt);
1195 	vsnprintf(msm_obj->name, sizeof(msm_obj->name), fmt, ap);
1196 	va_end(ap);
1197 }
1198