xref: /linux/drivers/media/common/videobuf2/videobuf2-dma-contig.c (revision 58f6259b7a08f8d47d4629609703d358b042f0fd)
1 /*
2  * videobuf2-dma-contig.c - DMA contig memory allocator for videobuf2
3  *
4  * Copyright (C) 2010 Samsung Electronics
5  *
6  * Author: Pawel Osciak <pawel@osciak.com>
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License as published by
10  * the Free Software Foundation.
11  */
12 
13 #include <linux/dma-buf.h>
14 #include <linux/dma-resv.h>
15 #include <linux/module.h>
16 #include <linux/refcount.h>
17 #include <linux/scatterlist.h>
18 #include <linux/sched.h>
19 #include <linux/slab.h>
20 #include <linux/dma-mapping.h>
21 #include <linux/highmem.h>
22 
23 #include <media/videobuf2-v4l2.h>
24 #include <media/videobuf2-dma-contig.h>
25 #include <media/videobuf2-memops.h>
26 
27 struct vb2_dc_buf {
28 	struct device			*dev;
29 	void				*vaddr;
30 	unsigned long			size;
31 	void				*cookie;
32 	dma_addr_t			dma_addr;
33 	unsigned long			attrs;
34 	enum dma_data_direction		dma_dir;
35 	struct sg_table			*dma_sgt;
36 	struct frame_vector		*vec;
37 
38 	/* MMAP related */
39 	struct vb2_vmarea_handler	handler;
40 	refcount_t			refcount;
41 	struct sg_table			*sgt_base;
42 
43 	/* DMABUF related */
44 	struct dma_buf_attachment	*db_attach;
45 
46 	struct vb2_buffer		*vb;
47 	bool				non_coherent_mem;
48 };
49 
50 /*********************************************/
51 /*        scatterlist table functions        */
52 /*********************************************/
53 
54 static unsigned long vb2_dc_get_contiguous_size(struct sg_table *sgt)
55 {
56 	struct scatterlist *s;
57 	dma_addr_t expected = sg_dma_address(sgt->sgl);
58 	unsigned int i;
59 	unsigned long size = 0;
60 
61 	for_each_sgtable_dma_sg(sgt, s, i) {
62 		if (sg_dma_address(s) != expected)
63 			break;
64 		expected += sg_dma_len(s);
65 		size += sg_dma_len(s);
66 	}
67 	return size;
68 }
69 
70 /*********************************************/
71 /*         callbacks for all buffers         */
72 /*********************************************/
73 
74 static void *vb2_dc_cookie(struct vb2_buffer *vb, void *buf_priv)
75 {
76 	struct vb2_dc_buf *buf = buf_priv;
77 
78 	return &buf->dma_addr;
79 }
80 
81 /*
82  * This function may fail if:
83  *
84  * - dma_buf_vmap() fails
85  *   E.g. due to lack of virtual mapping address space, or due to
86  *   dmabuf->ops misconfiguration.
87  *
88  * - dma_vmap_noncontiguous() fails
89  *   For instance, when requested buffer size is larger than totalram_pages().
90  *   Relevant for buffers that use non-coherent memory.
91  *
92  * - Queue DMA attrs have DMA_ATTR_NO_KERNEL_MAPPING set
93  *   Relevant for buffers that use coherent memory.
94  */
95 static void *vb2_dc_vaddr(struct vb2_buffer *vb, void *buf_priv)
96 {
97 	struct vb2_dc_buf *buf = buf_priv;
98 
99 	if (buf->vaddr)
100 		return buf->vaddr;
101 
102 	if (buf->db_attach) {
103 		struct iosys_map map;
104 
105 		if (!dma_buf_vmap_unlocked(buf->db_attach->dmabuf, &map))
106 			buf->vaddr = map.vaddr;
107 
108 		return buf->vaddr;
109 	}
110 
111 	if (buf->non_coherent_mem)
112 		buf->vaddr = dma_vmap_noncontiguous(buf->dev, buf->size,
113 						    buf->dma_sgt);
114 	return buf->vaddr;
115 }
116 
117 static unsigned int vb2_dc_num_users(void *buf_priv)
118 {
119 	struct vb2_dc_buf *buf = buf_priv;
120 
121 	return refcount_read(&buf->refcount);
122 }
123 
124 static void vb2_dc_prepare(void *buf_priv)
125 {
126 	struct vb2_dc_buf *buf = buf_priv;
127 	struct sg_table *sgt = buf->dma_sgt;
128 
129 	/* This takes care of DMABUF and user-enforced cache sync hint */
130 	if (buf->vb->skip_cache_sync_on_prepare)
131 		return;
132 
133 	if (!buf->non_coherent_mem)
134 		return;
135 
136 	/* Non-coherent MMAP only */
137 	if (buf->vaddr)
138 		flush_kernel_vmap_range(buf->vaddr, buf->size);
139 
140 	/* For both USERPTR and non-coherent MMAP */
141 	dma_sync_sgtable_for_device(buf->dev, sgt, buf->dma_dir);
142 }
143 
144 static void vb2_dc_finish(void *buf_priv)
145 {
146 	struct vb2_dc_buf *buf = buf_priv;
147 	struct sg_table *sgt = buf->dma_sgt;
148 
149 	/* This takes care of DMABUF and user-enforced cache sync hint */
150 	if (buf->vb->skip_cache_sync_on_finish)
151 		return;
152 
153 	if (!buf->non_coherent_mem)
154 		return;
155 
156 	/* Non-coherent MMAP only */
157 	if (buf->vaddr)
158 		invalidate_kernel_vmap_range(buf->vaddr, buf->size);
159 
160 	/* For both USERPTR and non-coherent MMAP */
161 	dma_sync_sgtable_for_cpu(buf->dev, sgt, buf->dma_dir);
162 }
163 
164 /*********************************************/
165 /*        callbacks for MMAP buffers         */
166 /*********************************************/
167 
168 static void vb2_dc_put(void *buf_priv)
169 {
170 	struct vb2_dc_buf *buf = buf_priv;
171 
172 	if (!refcount_dec_and_test(&buf->refcount))
173 		return;
174 
175 	if (buf->non_coherent_mem) {
176 		if (buf->vaddr)
177 			dma_vunmap_noncontiguous(buf->dev, buf->vaddr);
178 		dma_free_noncontiguous(buf->dev, buf->size,
179 				       buf->dma_sgt, buf->dma_dir);
180 	} else {
181 		if (buf->sgt_base) {
182 			sg_free_table(buf->sgt_base);
183 			kfree(buf->sgt_base);
184 		}
185 		dma_free_attrs(buf->dev, buf->size, buf->cookie,
186 			       buf->dma_addr, buf->attrs);
187 	}
188 	put_device(buf->dev);
189 	kfree(buf);
190 }
191 
192 static int vb2_dc_alloc_coherent(struct vb2_dc_buf *buf)
193 {
194 	struct vb2_queue *q = buf->vb->vb2_queue;
195 
196 	buf->cookie = dma_alloc_attrs(buf->dev,
197 				      buf->size,
198 				      &buf->dma_addr,
199 				      GFP_KERNEL | q->gfp_flags,
200 				      buf->attrs);
201 	if (!buf->cookie)
202 		return -ENOMEM;
203 
204 	if (q->dma_attrs & DMA_ATTR_NO_KERNEL_MAPPING)
205 		return 0;
206 
207 	buf->vaddr = buf->cookie;
208 	return 0;
209 }
210 
211 static int vb2_dc_alloc_non_coherent(struct vb2_dc_buf *buf)
212 {
213 	struct vb2_queue *q = buf->vb->vb2_queue;
214 
215 	buf->dma_sgt = dma_alloc_noncontiguous(buf->dev,
216 					       buf->size,
217 					       buf->dma_dir,
218 					       GFP_KERNEL | q->gfp_flags,
219 					       buf->attrs);
220 	if (!buf->dma_sgt)
221 		return -ENOMEM;
222 
223 	buf->dma_addr = sg_dma_address(buf->dma_sgt->sgl);
224 
225 	/*
226 	 * For non-coherent buffers the kernel mapping is created on demand
227 	 * in vb2_dc_vaddr().
228 	 */
229 	return 0;
230 }
231 
232 static void *vb2_dc_alloc(struct vb2_buffer *vb,
233 			  struct device *dev,
234 			  unsigned long size)
235 {
236 	struct vb2_dc_buf *buf;
237 	int ret;
238 
239 	if (WARN_ON(!dev))
240 		return ERR_PTR(-EINVAL);
241 
242 	buf = kzalloc(sizeof *buf, GFP_KERNEL);
243 	if (!buf)
244 		return ERR_PTR(-ENOMEM);
245 
246 	buf->attrs = vb->vb2_queue->dma_attrs;
247 	buf->dma_dir = vb->vb2_queue->dma_dir;
248 	buf->vb = vb;
249 	buf->non_coherent_mem = vb->vb2_queue->non_coherent_mem;
250 
251 	buf->size = size;
252 	/* Prevent the device from being released while the buffer is used */
253 	buf->dev = get_device(dev);
254 
255 	if (buf->non_coherent_mem)
256 		ret = vb2_dc_alloc_non_coherent(buf);
257 	else
258 		ret = vb2_dc_alloc_coherent(buf);
259 
260 	if (ret) {
261 		dev_err(dev, "dma alloc of size %lu failed\n", size);
262 		kfree(buf);
263 		return ERR_PTR(-ENOMEM);
264 	}
265 
266 	buf->handler.refcount = &buf->refcount;
267 	buf->handler.put = vb2_dc_put;
268 	buf->handler.arg = buf;
269 
270 	refcount_set(&buf->refcount, 1);
271 
272 	return buf;
273 }
274 
275 static int vb2_dc_mmap(void *buf_priv, struct vm_area_struct *vma)
276 {
277 	struct vb2_dc_buf *buf = buf_priv;
278 	int ret;
279 
280 	if (!buf) {
281 		printk(KERN_ERR "No buffer to map\n");
282 		return -EINVAL;
283 	}
284 
285 	if (buf->non_coherent_mem)
286 		ret = dma_mmap_noncontiguous(buf->dev, vma, buf->size,
287 					     buf->dma_sgt);
288 	else
289 		ret = dma_mmap_attrs(buf->dev, vma, buf->cookie, buf->dma_addr,
290 				     buf->size, buf->attrs);
291 	if (ret) {
292 		pr_err("Remapping memory failed, error: %d\n", ret);
293 		return ret;
294 	}
295 
296 	vm_flags_set(vma, VM_DONTEXPAND | VM_DONTDUMP);
297 	vma->vm_private_data	= &buf->handler;
298 	vma->vm_ops		= &vb2_common_vm_ops;
299 
300 	vma->vm_ops->open(vma);
301 
302 	pr_debug("%s: mapped dma addr 0x%08lx at 0x%08lx, size %lu\n",
303 		 __func__, (unsigned long)buf->dma_addr, vma->vm_start,
304 		 buf->size);
305 
306 	return 0;
307 }
308 
309 /*********************************************/
310 /*         DMABUF ops for exporters          */
311 /*********************************************/
312 
313 struct vb2_dc_attachment {
314 	struct sg_table sgt;
315 	enum dma_data_direction dma_dir;
316 };
317 
318 static int vb2_dc_dmabuf_ops_attach(struct dma_buf *dbuf,
319 	struct dma_buf_attachment *dbuf_attach)
320 {
321 	struct vb2_dc_attachment *attach;
322 	unsigned int i;
323 	struct scatterlist *rd, *wr;
324 	struct sg_table *sgt;
325 	struct vb2_dc_buf *buf = dbuf->priv;
326 	int ret;
327 
328 	attach = kzalloc(sizeof(*attach), GFP_KERNEL);
329 	if (!attach)
330 		return -ENOMEM;
331 
332 	sgt = &attach->sgt;
333 	/* Copy the buf->base_sgt scatter list to the attachment, as we can't
334 	 * map the same scatter list to multiple attachments at the same time.
335 	 */
336 	ret = sg_alloc_table(sgt, buf->sgt_base->orig_nents, GFP_KERNEL);
337 	if (ret) {
338 		kfree(attach);
339 		return -ENOMEM;
340 	}
341 
342 	rd = buf->sgt_base->sgl;
343 	wr = sgt->sgl;
344 	for (i = 0; i < sgt->orig_nents; ++i) {
345 		sg_set_page(wr, sg_page(rd), rd->length, rd->offset);
346 		rd = sg_next(rd);
347 		wr = sg_next(wr);
348 	}
349 
350 	attach->dma_dir = DMA_NONE;
351 	dbuf_attach->priv = attach;
352 
353 	return 0;
354 }
355 
356 static void vb2_dc_dmabuf_ops_detach(struct dma_buf *dbuf,
357 	struct dma_buf_attachment *db_attach)
358 {
359 	struct vb2_dc_attachment *attach = db_attach->priv;
360 	struct sg_table *sgt;
361 
362 	if (!attach)
363 		return;
364 
365 	sgt = &attach->sgt;
366 
367 	/* release the scatterlist cache */
368 	if (attach->dma_dir != DMA_NONE)
369 		/*
370 		 * Cache sync can be skipped here, as the vb2_dc memory is
371 		 * allocated from device coherent memory, which means the
372 		 * memory locations do not require any explicit cache
373 		 * maintenance prior or after being used by the device.
374 		 */
375 		dma_unmap_sgtable(db_attach->dev, sgt, attach->dma_dir,
376 				  DMA_ATTR_SKIP_CPU_SYNC);
377 	sg_free_table(sgt);
378 	kfree(attach);
379 	db_attach->priv = NULL;
380 }
381 
382 static struct sg_table *vb2_dc_dmabuf_ops_map(
383 	struct dma_buf_attachment *db_attach, enum dma_data_direction dma_dir)
384 {
385 	struct vb2_dc_attachment *attach = db_attach->priv;
386 	struct sg_table *sgt;
387 
388 	sgt = &attach->sgt;
389 	/* return previously mapped sg table */
390 	if (attach->dma_dir == dma_dir)
391 		return sgt;
392 
393 	/* release any previous cache */
394 	if (attach->dma_dir != DMA_NONE) {
395 		dma_unmap_sgtable(db_attach->dev, sgt, attach->dma_dir,
396 				  DMA_ATTR_SKIP_CPU_SYNC);
397 		attach->dma_dir = DMA_NONE;
398 	}
399 
400 	/*
401 	 * mapping to the client with new direction, no cache sync
402 	 * required see comment in vb2_dc_dmabuf_ops_detach()
403 	 */
404 	if (dma_map_sgtable(db_attach->dev, sgt, dma_dir,
405 			    DMA_ATTR_SKIP_CPU_SYNC)) {
406 		pr_err("failed to map scatterlist\n");
407 		return ERR_PTR(-EIO);
408 	}
409 
410 	attach->dma_dir = dma_dir;
411 
412 	return sgt;
413 }
414 
415 static void vb2_dc_dmabuf_ops_unmap(struct dma_buf_attachment *db_attach,
416 	struct sg_table *sgt, enum dma_data_direction dma_dir)
417 {
418 	/* nothing to be done here */
419 }
420 
421 static void vb2_dc_dmabuf_ops_release(struct dma_buf *dbuf)
422 {
423 	/* drop reference obtained in vb2_dc_get_dmabuf */
424 	vb2_dc_put(dbuf->priv);
425 }
426 
427 static int
428 vb2_dc_dmabuf_ops_begin_cpu_access(struct dma_buf *dbuf,
429 				   enum dma_data_direction direction)
430 {
431 	return 0;
432 }
433 
434 static int
435 vb2_dc_dmabuf_ops_end_cpu_access(struct dma_buf *dbuf,
436 				 enum dma_data_direction direction)
437 {
438 	return 0;
439 }
440 
441 static int vb2_dc_dmabuf_ops_vmap(struct dma_buf *dbuf, struct iosys_map *map)
442 {
443 	struct vb2_dc_buf *buf;
444 	void *vaddr;
445 
446 	buf = dbuf->priv;
447 	vaddr = vb2_dc_vaddr(buf->vb, buf);
448 	if (!vaddr)
449 		return -EINVAL;
450 
451 	iosys_map_set_vaddr(map, vaddr);
452 
453 	return 0;
454 }
455 
456 static int vb2_dc_dmabuf_ops_mmap(struct dma_buf *dbuf,
457 	struct vm_area_struct *vma)
458 {
459 	dma_resv_assert_held(dbuf->resv);
460 
461 	return vb2_dc_mmap(dbuf->priv, vma);
462 }
463 
464 static const struct dma_buf_ops vb2_dc_dmabuf_ops = {
465 	.attach = vb2_dc_dmabuf_ops_attach,
466 	.detach = vb2_dc_dmabuf_ops_detach,
467 	.map_dma_buf = vb2_dc_dmabuf_ops_map,
468 	.unmap_dma_buf = vb2_dc_dmabuf_ops_unmap,
469 	.begin_cpu_access = vb2_dc_dmabuf_ops_begin_cpu_access,
470 	.end_cpu_access = vb2_dc_dmabuf_ops_end_cpu_access,
471 	.vmap = vb2_dc_dmabuf_ops_vmap,
472 	.mmap = vb2_dc_dmabuf_ops_mmap,
473 	.release = vb2_dc_dmabuf_ops_release,
474 };
475 
476 static struct sg_table *vb2_dc_get_base_sgt(struct vb2_dc_buf *buf)
477 {
478 	int ret;
479 	struct sg_table *sgt;
480 
481 	if (buf->non_coherent_mem)
482 		return buf->dma_sgt;
483 
484 	sgt = kmalloc(sizeof(*sgt), GFP_KERNEL);
485 	if (!sgt) {
486 		dev_err(buf->dev, "failed to alloc sg table\n");
487 		return NULL;
488 	}
489 
490 	ret = dma_get_sgtable_attrs(buf->dev, sgt, buf->cookie, buf->dma_addr,
491 		buf->size, buf->attrs);
492 	if (ret < 0) {
493 		dev_err(buf->dev, "failed to get scatterlist from DMA API\n");
494 		kfree(sgt);
495 		return NULL;
496 	}
497 
498 	return sgt;
499 }
500 
501 static struct dma_buf *vb2_dc_get_dmabuf(struct vb2_buffer *vb,
502 					 void *buf_priv,
503 					 unsigned long flags)
504 {
505 	struct vb2_dc_buf *buf = buf_priv;
506 	struct dma_buf *dbuf;
507 	DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
508 
509 	exp_info.ops = &vb2_dc_dmabuf_ops;
510 	exp_info.size = buf->size;
511 	exp_info.flags = flags;
512 	exp_info.priv = buf;
513 
514 	if (!buf->sgt_base)
515 		buf->sgt_base = vb2_dc_get_base_sgt(buf);
516 
517 	if (WARN_ON(!buf->sgt_base))
518 		return NULL;
519 
520 	dbuf = dma_buf_export(&exp_info);
521 	if (IS_ERR(dbuf))
522 		return NULL;
523 
524 	/* dmabuf keeps reference to vb2 buffer */
525 	refcount_inc(&buf->refcount);
526 
527 	return dbuf;
528 }
529 
530 /*********************************************/
531 /*       callbacks for USERPTR buffers       */
532 /*********************************************/
533 
534 static void vb2_dc_put_userptr(void *buf_priv)
535 {
536 	struct vb2_dc_buf *buf = buf_priv;
537 	struct sg_table *sgt = buf->dma_sgt;
538 	int i;
539 	struct page **pages;
540 
541 	if (sgt) {
542 		/*
543 		 * No need to sync to CPU, it's already synced to the CPU
544 		 * since the finish() memop will have been called before this.
545 		 */
546 		dma_unmap_sgtable(buf->dev, sgt, buf->dma_dir,
547 				  DMA_ATTR_SKIP_CPU_SYNC);
548 		pages = frame_vector_pages(buf->vec);
549 		/* sgt should exist only if vector contains pages... */
550 		BUG_ON(IS_ERR(pages));
551 		if (buf->dma_dir == DMA_FROM_DEVICE ||
552 		    buf->dma_dir == DMA_BIDIRECTIONAL)
553 			for (i = 0; i < frame_vector_count(buf->vec); i++)
554 				set_page_dirty_lock(pages[i]);
555 		sg_free_table(sgt);
556 		kfree(sgt);
557 	} else {
558 		dma_unmap_resource(buf->dev, buf->dma_addr, buf->size,
559 				   buf->dma_dir, 0);
560 	}
561 	vb2_destroy_framevec(buf->vec);
562 	kfree(buf);
563 }
564 
565 static void *vb2_dc_get_userptr(struct vb2_buffer *vb, struct device *dev,
566 				unsigned long vaddr, unsigned long size)
567 {
568 	struct vb2_dc_buf *buf;
569 	struct frame_vector *vec;
570 	unsigned int offset;
571 	int n_pages, i;
572 	int ret = 0;
573 	struct sg_table *sgt;
574 	unsigned long contig_size;
575 	unsigned long dma_align = dma_get_cache_alignment();
576 
577 	/* Only cache aligned DMA transfers are reliable */
578 	if (!IS_ALIGNED(vaddr | size, dma_align)) {
579 		pr_debug("user data must be aligned to %lu bytes\n", dma_align);
580 		return ERR_PTR(-EINVAL);
581 	}
582 
583 	if (!size) {
584 		pr_debug("size is zero\n");
585 		return ERR_PTR(-EINVAL);
586 	}
587 
588 	if (WARN_ON(!dev))
589 		return ERR_PTR(-EINVAL);
590 
591 	buf = kzalloc(sizeof *buf, GFP_KERNEL);
592 	if (!buf)
593 		return ERR_PTR(-ENOMEM);
594 
595 	buf->dev = dev;
596 	buf->dma_dir = vb->vb2_queue->dma_dir;
597 	buf->vb = vb;
598 
599 	offset = lower_32_bits(offset_in_page(vaddr));
600 	vec = vb2_create_framevec(vaddr, size, buf->dma_dir == DMA_FROM_DEVICE ||
601 					       buf->dma_dir == DMA_BIDIRECTIONAL);
602 	if (IS_ERR(vec)) {
603 		ret = PTR_ERR(vec);
604 		goto fail_buf;
605 	}
606 	buf->vec = vec;
607 	n_pages = frame_vector_count(vec);
608 	ret = frame_vector_to_pages(vec);
609 	if (ret < 0) {
610 		unsigned long *nums = frame_vector_pfns(vec);
611 
612 		/*
613 		 * Failed to convert to pages... Check the memory is physically
614 		 * contiguous and use direct mapping
615 		 */
616 		for (i = 1; i < n_pages; i++)
617 			if (nums[i-1] + 1 != nums[i])
618 				goto fail_pfnvec;
619 		buf->dma_addr = dma_map_resource(buf->dev,
620 				__pfn_to_phys(nums[0]), size, buf->dma_dir, 0);
621 		if (dma_mapping_error(buf->dev, buf->dma_addr)) {
622 			ret = -ENOMEM;
623 			goto fail_pfnvec;
624 		}
625 		goto out;
626 	}
627 
628 	sgt = kzalloc(sizeof(*sgt), GFP_KERNEL);
629 	if (!sgt) {
630 		pr_err("failed to allocate sg table\n");
631 		ret = -ENOMEM;
632 		goto fail_pfnvec;
633 	}
634 
635 	ret = sg_alloc_table_from_pages(sgt, frame_vector_pages(vec), n_pages,
636 		offset, size, GFP_KERNEL);
637 	if (ret) {
638 		pr_err("failed to initialize sg table\n");
639 		goto fail_sgt;
640 	}
641 
642 	/*
643 	 * No need to sync to the device, this will happen later when the
644 	 * prepare() memop is called.
645 	 */
646 	if (dma_map_sgtable(buf->dev, sgt, buf->dma_dir,
647 			    DMA_ATTR_SKIP_CPU_SYNC)) {
648 		pr_err("failed to map scatterlist\n");
649 		ret = -EIO;
650 		goto fail_sgt_init;
651 	}
652 
653 	contig_size = vb2_dc_get_contiguous_size(sgt);
654 	if (contig_size < size) {
655 		pr_err("contiguous mapping is too small %lu/%lu\n",
656 			contig_size, size);
657 		ret = -EFAULT;
658 		goto fail_map_sg;
659 	}
660 
661 	buf->dma_addr = sg_dma_address(sgt->sgl);
662 	buf->dma_sgt = sgt;
663 	buf->non_coherent_mem = 1;
664 
665 out:
666 	buf->size = size;
667 
668 	return buf;
669 
670 fail_map_sg:
671 	dma_unmap_sgtable(buf->dev, sgt, buf->dma_dir, DMA_ATTR_SKIP_CPU_SYNC);
672 
673 fail_sgt_init:
674 	sg_free_table(sgt);
675 
676 fail_sgt:
677 	kfree(sgt);
678 
679 fail_pfnvec:
680 	vb2_destroy_framevec(vec);
681 
682 fail_buf:
683 	kfree(buf);
684 
685 	return ERR_PTR(ret);
686 }
687 
688 /*********************************************/
689 /*       callbacks for DMABUF buffers        */
690 /*********************************************/
691 
692 static int vb2_dc_map_dmabuf(void *mem_priv)
693 {
694 	struct vb2_dc_buf *buf = mem_priv;
695 	struct sg_table *sgt;
696 	unsigned long contig_size;
697 
698 	if (WARN_ON(!buf->db_attach)) {
699 		pr_err("trying to pin a non attached buffer\n");
700 		return -EINVAL;
701 	}
702 
703 	if (WARN_ON(buf->dma_sgt)) {
704 		pr_err("dmabuf buffer is already pinned\n");
705 		return 0;
706 	}
707 
708 	/* get the associated scatterlist for this buffer */
709 	sgt = dma_buf_map_attachment_unlocked(buf->db_attach, buf->dma_dir);
710 	if (IS_ERR(sgt)) {
711 		pr_err("Error getting dmabuf scatterlist\n");
712 		return -EINVAL;
713 	}
714 
715 	/* checking if dmabuf is big enough to store contiguous chunk */
716 	contig_size = vb2_dc_get_contiguous_size(sgt);
717 	if (contig_size < buf->size) {
718 		pr_err("contiguous chunk is too small %lu/%lu\n",
719 		       contig_size, buf->size);
720 		dma_buf_unmap_attachment_unlocked(buf->db_attach, sgt,
721 						  buf->dma_dir);
722 		return -EFAULT;
723 	}
724 
725 	buf->dma_addr = sg_dma_address(sgt->sgl);
726 	buf->dma_sgt = sgt;
727 	buf->vaddr = NULL;
728 
729 	return 0;
730 }
731 
732 static void vb2_dc_unmap_dmabuf(void *mem_priv)
733 {
734 	struct vb2_dc_buf *buf = mem_priv;
735 	struct sg_table *sgt = buf->dma_sgt;
736 	struct iosys_map map = IOSYS_MAP_INIT_VADDR(buf->vaddr);
737 
738 	if (WARN_ON(!buf->db_attach)) {
739 		pr_err("trying to unpin a not attached buffer\n");
740 		return;
741 	}
742 
743 	if (WARN_ON(!sgt)) {
744 		pr_err("dmabuf buffer is already unpinned\n");
745 		return;
746 	}
747 
748 	if (buf->vaddr) {
749 		dma_buf_vunmap_unlocked(buf->db_attach->dmabuf, &map);
750 		buf->vaddr = NULL;
751 	}
752 	dma_buf_unmap_attachment_unlocked(buf->db_attach, sgt, buf->dma_dir);
753 
754 	buf->dma_addr = 0;
755 	buf->dma_sgt = NULL;
756 }
757 
758 static void vb2_dc_detach_dmabuf(void *mem_priv)
759 {
760 	struct vb2_dc_buf *buf = mem_priv;
761 
762 	/* if vb2 works correctly you should never detach mapped buffer */
763 	if (WARN_ON(buf->dma_addr))
764 		vb2_dc_unmap_dmabuf(buf);
765 
766 	/* detach this attachment */
767 	dma_buf_detach(buf->db_attach->dmabuf, buf->db_attach);
768 	kfree(buf);
769 }
770 
771 static void *vb2_dc_attach_dmabuf(struct vb2_buffer *vb, struct device *dev,
772 				  struct dma_buf *dbuf, unsigned long size)
773 {
774 	struct vb2_dc_buf *buf;
775 	struct dma_buf_attachment *dba;
776 
777 	if (dbuf->size < size)
778 		return ERR_PTR(-EFAULT);
779 
780 	if (WARN_ON(!dev))
781 		return ERR_PTR(-EINVAL);
782 
783 	buf = kzalloc(sizeof(*buf), GFP_KERNEL);
784 	if (!buf)
785 		return ERR_PTR(-ENOMEM);
786 
787 	buf->dev = dev;
788 	buf->vb = vb;
789 
790 	/* create attachment for the dmabuf with the user device */
791 	dba = dma_buf_attach(dbuf, buf->dev);
792 	if (IS_ERR(dba)) {
793 		pr_err("failed to attach dmabuf\n");
794 		kfree(buf);
795 		return dba;
796 	}
797 
798 	buf->dma_dir = vb->vb2_queue->dma_dir;
799 	buf->size = size;
800 	buf->db_attach = dba;
801 
802 	return buf;
803 }
804 
805 /*********************************************/
806 /*       DMA CONTIG exported functions       */
807 /*********************************************/
808 
809 const struct vb2_mem_ops vb2_dma_contig_memops = {
810 	.alloc		= vb2_dc_alloc,
811 	.put		= vb2_dc_put,
812 	.get_dmabuf	= vb2_dc_get_dmabuf,
813 	.cookie		= vb2_dc_cookie,
814 	.vaddr		= vb2_dc_vaddr,
815 	.mmap		= vb2_dc_mmap,
816 	.get_userptr	= vb2_dc_get_userptr,
817 	.put_userptr	= vb2_dc_put_userptr,
818 	.prepare	= vb2_dc_prepare,
819 	.finish		= vb2_dc_finish,
820 	.map_dmabuf	= vb2_dc_map_dmabuf,
821 	.unmap_dmabuf	= vb2_dc_unmap_dmabuf,
822 	.attach_dmabuf	= vb2_dc_attach_dmabuf,
823 	.detach_dmabuf	= vb2_dc_detach_dmabuf,
824 	.num_users	= vb2_dc_num_users,
825 };
826 EXPORT_SYMBOL_GPL(vb2_dma_contig_memops);
827 
828 /**
829  * vb2_dma_contig_set_max_seg_size() - configure DMA max segment size
830  * @dev:	device for configuring DMA parameters
831  * @size:	size of DMA max segment size to set
832  *
833  * To allow mapping the scatter-list into a single chunk in the DMA
834  * address space, the device is required to have the DMA max segment
835  * size parameter set to a value larger than the buffer size. Otherwise,
836  * the DMA-mapping subsystem will split the mapping into max segment
837  * size chunks. This function sets the DMA max segment size
838  * parameter to let DMA-mapping map a buffer as a single chunk in DMA
839  * address space.
840  * This code assumes that the DMA-mapping subsystem will merge all
841  * scatterlist segments if this is really possible (for example when
842  * an IOMMU is available and enabled).
843  * Ideally, this parameter should be set by the generic bus code, but it
844  * is left with the default 64KiB value due to historical litmiations in
845  * other subsystems (like limited USB host drivers) and there no good
846  * place to set it to the proper value.
847  * This function should be called from the drivers, which are known to
848  * operate on platforms with IOMMU and provide access to shared buffers
849  * (either USERPTR or DMABUF). This should be done before initializing
850  * videobuf2 queue.
851  */
852 int vb2_dma_contig_set_max_seg_size(struct device *dev, unsigned int size)
853 {
854 	if (!dev->dma_parms) {
855 		dev_err(dev, "Failed to set max_seg_size: dma_parms is NULL\n");
856 		return -ENODEV;
857 	}
858 	if (dma_get_max_seg_size(dev) < size)
859 		return dma_set_max_seg_size(dev, size);
860 
861 	return 0;
862 }
863 EXPORT_SYMBOL_GPL(vb2_dma_contig_set_max_seg_size);
864 
865 MODULE_DESCRIPTION("DMA-contig memory handling routines for videobuf2");
866 MODULE_AUTHOR("Pawel Osciak <pawel@osciak.com>");
867 MODULE_LICENSE("GPL");
868 MODULE_IMPORT_NS(DMA_BUF);
869