xref: /linux/drivers/dma/dmaengine.c (revision 6ed7ffddcf61f668114edb676417e5fb33773b59)
1 /*
2  * Copyright(c) 2004 - 2006 Intel Corporation. All rights reserved.
3  *
4  * This program is free software; you can redistribute it and/or modify it
5  * under the terms of the GNU General Public License as published by the Free
6  * Software Foundation; either version 2 of the License, or (at your option)
7  * any later version.
8  *
9  * This program is distributed in the hope that it will be useful, but WITHOUT
10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
12  * more details.
13  *
14  * You should have received a copy of the GNU General Public License along with
15  * this program; if not, write to the Free Software Foundation, Inc., 59
16  * Temple Place - Suite 330, Boston, MA  02111-1307, USA.
17  *
18  * The full GNU General Public License is included in this distribution in the
19  * file called COPYING.
20  */
21 
22 /*
23  * This code implements the DMA subsystem. It provides a HW-neutral interface
24  * for other kernel code to use asynchronous memory copy capabilities,
25  * if present, and allows different HW DMA drivers to register as providing
26  * this capability.
27  *
28  * Due to the fact we are accelerating what is already a relatively fast
29  * operation, the code goes to great lengths to avoid additional overhead,
30  * such as locking.
31  *
32  * LOCKING:
33  *
34  * The subsystem keeps a global list of dma_device structs it is protected by a
35  * mutex, dma_list_mutex.
36  *
37  * A subsystem can get access to a channel by calling dmaengine_get() followed
38  * by dma_find_channel(), or if it has need for an exclusive channel it can call
39  * dma_request_channel().  Once a channel is allocated a reference is taken
40  * against its corresponding driver to disable removal.
41  *
42  * Each device has a channels list, which runs unlocked but is never modified
43  * once the device is registered, it's just setup by the driver.
44  *
45  * See Documentation/dmaengine.txt for more details
46  */
47 
48 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
49 
50 #include <linux/dma-mapping.h>
51 #include <linux/init.h>
52 #include <linux/module.h>
53 #include <linux/mm.h>
54 #include <linux/device.h>
55 #include <linux/dmaengine.h>
56 #include <linux/hardirq.h>
57 #include <linux/spinlock.h>
58 #include <linux/percpu.h>
59 #include <linux/rcupdate.h>
60 #include <linux/mutex.h>
61 #include <linux/jiffies.h>
62 #include <linux/rculist.h>
63 #include <linux/idr.h>
64 #include <linux/slab.h>
65 #include <linux/of_dma.h>
66 
67 static DEFINE_MUTEX(dma_list_mutex);
68 static DEFINE_IDR(dma_idr);
69 static LIST_HEAD(dma_device_list);
70 static long dmaengine_ref_count;
71 
72 /* --- sysfs implementation --- */
73 
74 /**
75  * dev_to_dma_chan - convert a device pointer to the its sysfs container object
76  * @dev - device node
77  *
78  * Must be called under dma_list_mutex
79  */
80 static struct dma_chan *dev_to_dma_chan(struct device *dev)
81 {
82 	struct dma_chan_dev *chan_dev;
83 
84 	chan_dev = container_of(dev, typeof(*chan_dev), device);
85 	return chan_dev->chan;
86 }
87 
88 static ssize_t show_memcpy_count(struct device *dev, struct device_attribute *attr, char *buf)
89 {
90 	struct dma_chan *chan;
91 	unsigned long count = 0;
92 	int i;
93 	int err;
94 
95 	mutex_lock(&dma_list_mutex);
96 	chan = dev_to_dma_chan(dev);
97 	if (chan) {
98 		for_each_possible_cpu(i)
99 			count += per_cpu_ptr(chan->local, i)->memcpy_count;
100 		err = sprintf(buf, "%lu\n", count);
101 	} else
102 		err = -ENODEV;
103 	mutex_unlock(&dma_list_mutex);
104 
105 	return err;
106 }
107 
108 static ssize_t show_bytes_transferred(struct device *dev, struct device_attribute *attr,
109 				      char *buf)
110 {
111 	struct dma_chan *chan;
112 	unsigned long count = 0;
113 	int i;
114 	int err;
115 
116 	mutex_lock(&dma_list_mutex);
117 	chan = dev_to_dma_chan(dev);
118 	if (chan) {
119 		for_each_possible_cpu(i)
120 			count += per_cpu_ptr(chan->local, i)->bytes_transferred;
121 		err = sprintf(buf, "%lu\n", count);
122 	} else
123 		err = -ENODEV;
124 	mutex_unlock(&dma_list_mutex);
125 
126 	return err;
127 }
128 
129 static ssize_t show_in_use(struct device *dev, struct device_attribute *attr, char *buf)
130 {
131 	struct dma_chan *chan;
132 	int err;
133 
134 	mutex_lock(&dma_list_mutex);
135 	chan = dev_to_dma_chan(dev);
136 	if (chan)
137 		err = sprintf(buf, "%d\n", chan->client_count);
138 	else
139 		err = -ENODEV;
140 	mutex_unlock(&dma_list_mutex);
141 
142 	return err;
143 }
144 
145 static struct device_attribute dma_attrs[] = {
146 	__ATTR(memcpy_count, S_IRUGO, show_memcpy_count, NULL),
147 	__ATTR(bytes_transferred, S_IRUGO, show_bytes_transferred, NULL),
148 	__ATTR(in_use, S_IRUGO, show_in_use, NULL),
149 	__ATTR_NULL
150 };
151 
152 static void chan_dev_release(struct device *dev)
153 {
154 	struct dma_chan_dev *chan_dev;
155 
156 	chan_dev = container_of(dev, typeof(*chan_dev), device);
157 	if (atomic_dec_and_test(chan_dev->idr_ref)) {
158 		mutex_lock(&dma_list_mutex);
159 		idr_remove(&dma_idr, chan_dev->dev_id);
160 		mutex_unlock(&dma_list_mutex);
161 		kfree(chan_dev->idr_ref);
162 	}
163 	kfree(chan_dev);
164 }
165 
166 static struct class dma_devclass = {
167 	.name		= "dma",
168 	.dev_attrs	= dma_attrs,
169 	.dev_release	= chan_dev_release,
170 };
171 
172 /* --- client and device registration --- */
173 
174 #define dma_device_satisfies_mask(device, mask) \
175 	__dma_device_satisfies_mask((device), &(mask))
176 static int
177 __dma_device_satisfies_mask(struct dma_device *device, dma_cap_mask_t *want)
178 {
179 	dma_cap_mask_t has;
180 
181 	bitmap_and(has.bits, want->bits, device->cap_mask.bits,
182 		DMA_TX_TYPE_END);
183 	return bitmap_equal(want->bits, has.bits, DMA_TX_TYPE_END);
184 }
185 
186 static struct module *dma_chan_to_owner(struct dma_chan *chan)
187 {
188 	return chan->device->dev->driver->owner;
189 }
190 
191 /**
192  * balance_ref_count - catch up the channel reference count
193  * @chan - channel to balance ->client_count versus dmaengine_ref_count
194  *
195  * balance_ref_count must be called under dma_list_mutex
196  */
197 static void balance_ref_count(struct dma_chan *chan)
198 {
199 	struct module *owner = dma_chan_to_owner(chan);
200 
201 	while (chan->client_count < dmaengine_ref_count) {
202 		__module_get(owner);
203 		chan->client_count++;
204 	}
205 }
206 
207 /**
208  * dma_chan_get - try to grab a dma channel's parent driver module
209  * @chan - channel to grab
210  *
211  * Must be called under dma_list_mutex
212  */
213 static int dma_chan_get(struct dma_chan *chan)
214 {
215 	int err = -ENODEV;
216 	struct module *owner = dma_chan_to_owner(chan);
217 
218 	if (chan->client_count) {
219 		__module_get(owner);
220 		err = 0;
221 	} else if (try_module_get(owner))
222 		err = 0;
223 
224 	if (err == 0)
225 		chan->client_count++;
226 
227 	/* allocate upon first client reference */
228 	if (chan->client_count == 1 && err == 0) {
229 		int desc_cnt = chan->device->device_alloc_chan_resources(chan);
230 
231 		if (desc_cnt < 0) {
232 			err = desc_cnt;
233 			chan->client_count = 0;
234 			module_put(owner);
235 		} else if (!dma_has_cap(DMA_PRIVATE, chan->device->cap_mask))
236 			balance_ref_count(chan);
237 	}
238 
239 	return err;
240 }
241 
242 /**
243  * dma_chan_put - drop a reference to a dma channel's parent driver module
244  * @chan - channel to release
245  *
246  * Must be called under dma_list_mutex
247  */
248 static void dma_chan_put(struct dma_chan *chan)
249 {
250 	if (!chan->client_count)
251 		return; /* this channel failed alloc_chan_resources */
252 	chan->client_count--;
253 	module_put(dma_chan_to_owner(chan));
254 	if (chan->client_count == 0)
255 		chan->device->device_free_chan_resources(chan);
256 }
257 
258 enum dma_status dma_sync_wait(struct dma_chan *chan, dma_cookie_t cookie)
259 {
260 	enum dma_status status;
261 	unsigned long dma_sync_wait_timeout = jiffies + msecs_to_jiffies(5000);
262 
263 	dma_async_issue_pending(chan);
264 	do {
265 		status = dma_async_is_tx_complete(chan, cookie, NULL, NULL);
266 		if (time_after_eq(jiffies, dma_sync_wait_timeout)) {
267 			pr_err("%s: timeout!\n", __func__);
268 			return DMA_ERROR;
269 		}
270 		if (status != DMA_IN_PROGRESS)
271 			break;
272 		cpu_relax();
273 	} while (1);
274 
275 	return status;
276 }
277 EXPORT_SYMBOL(dma_sync_wait);
278 
279 /**
280  * dma_cap_mask_all - enable iteration over all operation types
281  */
282 static dma_cap_mask_t dma_cap_mask_all;
283 
284 /**
285  * dma_chan_tbl_ent - tracks channel allocations per core/operation
286  * @chan - associated channel for this entry
287  */
288 struct dma_chan_tbl_ent {
289 	struct dma_chan *chan;
290 };
291 
292 /**
293  * channel_table - percpu lookup table for memory-to-memory offload providers
294  */
295 static struct dma_chan_tbl_ent __percpu *channel_table[DMA_TX_TYPE_END];
296 
297 static int __init dma_channel_table_init(void)
298 {
299 	enum dma_transaction_type cap;
300 	int err = 0;
301 
302 	bitmap_fill(dma_cap_mask_all.bits, DMA_TX_TYPE_END);
303 
304 	/* 'interrupt', 'private', and 'slave' are channel capabilities,
305 	 * but are not associated with an operation so they do not need
306 	 * an entry in the channel_table
307 	 */
308 	clear_bit(DMA_INTERRUPT, dma_cap_mask_all.bits);
309 	clear_bit(DMA_PRIVATE, dma_cap_mask_all.bits);
310 	clear_bit(DMA_SLAVE, dma_cap_mask_all.bits);
311 
312 	for_each_dma_cap_mask(cap, dma_cap_mask_all) {
313 		channel_table[cap] = alloc_percpu(struct dma_chan_tbl_ent);
314 		if (!channel_table[cap]) {
315 			err = -ENOMEM;
316 			break;
317 		}
318 	}
319 
320 	if (err) {
321 		pr_err("initialization failure\n");
322 		for_each_dma_cap_mask(cap, dma_cap_mask_all)
323 			if (channel_table[cap])
324 				free_percpu(channel_table[cap]);
325 	}
326 
327 	return err;
328 }
329 arch_initcall(dma_channel_table_init);
330 
331 /**
332  * dma_find_channel - find a channel to carry out the operation
333  * @tx_type: transaction type
334  */
335 struct dma_chan *dma_find_channel(enum dma_transaction_type tx_type)
336 {
337 	return this_cpu_read(channel_table[tx_type]->chan);
338 }
339 EXPORT_SYMBOL(dma_find_channel);
340 
341 /*
342  * net_dma_find_channel - find a channel for net_dma
343  * net_dma has alignment requirements
344  */
345 struct dma_chan *net_dma_find_channel(void)
346 {
347 	struct dma_chan *chan = dma_find_channel(DMA_MEMCPY);
348 	if (chan && !is_dma_copy_aligned(chan->device, 1, 1, 1))
349 		return NULL;
350 
351 	return chan;
352 }
353 EXPORT_SYMBOL(net_dma_find_channel);
354 
355 /**
356  * dma_issue_pending_all - flush all pending operations across all channels
357  */
358 void dma_issue_pending_all(void)
359 {
360 	struct dma_device *device;
361 	struct dma_chan *chan;
362 
363 	rcu_read_lock();
364 	list_for_each_entry_rcu(device, &dma_device_list, global_node) {
365 		if (dma_has_cap(DMA_PRIVATE, device->cap_mask))
366 			continue;
367 		list_for_each_entry(chan, &device->channels, device_node)
368 			if (chan->client_count)
369 				device->device_issue_pending(chan);
370 	}
371 	rcu_read_unlock();
372 }
373 EXPORT_SYMBOL(dma_issue_pending_all);
374 
375 /**
376  * nth_chan - returns the nth channel of the given capability
377  * @cap: capability to match
378  * @n: nth channel desired
379  *
380  * Defaults to returning the channel with the desired capability and the
381  * lowest reference count when 'n' cannot be satisfied.  Must be called
382  * under dma_list_mutex.
383  */
384 static struct dma_chan *nth_chan(enum dma_transaction_type cap, int n)
385 {
386 	struct dma_device *device;
387 	struct dma_chan *chan;
388 	struct dma_chan *ret = NULL;
389 	struct dma_chan *min = NULL;
390 
391 	list_for_each_entry(device, &dma_device_list, global_node) {
392 		if (!dma_has_cap(cap, device->cap_mask) ||
393 		    dma_has_cap(DMA_PRIVATE, device->cap_mask))
394 			continue;
395 		list_for_each_entry(chan, &device->channels, device_node) {
396 			if (!chan->client_count)
397 				continue;
398 			if (!min)
399 				min = chan;
400 			else if (chan->table_count < min->table_count)
401 				min = chan;
402 
403 			if (n-- == 0) {
404 				ret = chan;
405 				break; /* done */
406 			}
407 		}
408 		if (ret)
409 			break; /* done */
410 	}
411 
412 	if (!ret)
413 		ret = min;
414 
415 	if (ret)
416 		ret->table_count++;
417 
418 	return ret;
419 }
420 
421 /**
422  * dma_channel_rebalance - redistribute the available channels
423  *
424  * Optimize for cpu isolation (each cpu gets a dedicated channel for an
425  * operation type) in the SMP case,  and operation isolation (avoid
426  * multi-tasking channels) in the non-SMP case.  Must be called under
427  * dma_list_mutex.
428  */
429 static void dma_channel_rebalance(void)
430 {
431 	struct dma_chan *chan;
432 	struct dma_device *device;
433 	int cpu;
434 	int cap;
435 	int n;
436 
437 	/* undo the last distribution */
438 	for_each_dma_cap_mask(cap, dma_cap_mask_all)
439 		for_each_possible_cpu(cpu)
440 			per_cpu_ptr(channel_table[cap], cpu)->chan = NULL;
441 
442 	list_for_each_entry(device, &dma_device_list, global_node) {
443 		if (dma_has_cap(DMA_PRIVATE, device->cap_mask))
444 			continue;
445 		list_for_each_entry(chan, &device->channels, device_node)
446 			chan->table_count = 0;
447 	}
448 
449 	/* don't populate the channel_table if no clients are available */
450 	if (!dmaengine_ref_count)
451 		return;
452 
453 	/* redistribute available channels */
454 	n = 0;
455 	for_each_dma_cap_mask(cap, dma_cap_mask_all)
456 		for_each_online_cpu(cpu) {
457 			if (num_possible_cpus() > 1)
458 				chan = nth_chan(cap, n++);
459 			else
460 				chan = nth_chan(cap, -1);
461 
462 			per_cpu_ptr(channel_table[cap], cpu)->chan = chan;
463 		}
464 }
465 
466 static struct dma_chan *private_candidate(dma_cap_mask_t *mask, struct dma_device *dev,
467 					  dma_filter_fn fn, void *fn_param)
468 {
469 	struct dma_chan *chan;
470 
471 	if (!__dma_device_satisfies_mask(dev, mask)) {
472 		pr_debug("%s: wrong capabilities\n", __func__);
473 		return NULL;
474 	}
475 	/* devices with multiple channels need special handling as we need to
476 	 * ensure that all channels are either private or public.
477 	 */
478 	if (dev->chancnt > 1 && !dma_has_cap(DMA_PRIVATE, dev->cap_mask))
479 		list_for_each_entry(chan, &dev->channels, device_node) {
480 			/* some channels are already publicly allocated */
481 			if (chan->client_count)
482 				return NULL;
483 		}
484 
485 	list_for_each_entry(chan, &dev->channels, device_node) {
486 		if (chan->client_count) {
487 			pr_debug("%s: %s busy\n",
488 				 __func__, dma_chan_name(chan));
489 			continue;
490 		}
491 		if (fn && !fn(chan, fn_param)) {
492 			pr_debug("%s: %s filter said false\n",
493 				 __func__, dma_chan_name(chan));
494 			continue;
495 		}
496 		return chan;
497 	}
498 
499 	return NULL;
500 }
501 
502 /**
503  * dma_request_channel - try to allocate an exclusive channel
504  * @mask: capabilities that the channel must satisfy
505  * @fn: optional callback to disposition available channels
506  * @fn_param: opaque parameter to pass to dma_filter_fn
507  */
508 struct dma_chan *__dma_request_channel(dma_cap_mask_t *mask, dma_filter_fn fn, void *fn_param)
509 {
510 	struct dma_device *device, *_d;
511 	struct dma_chan *chan = NULL;
512 	int err;
513 
514 	/* Find a channel */
515 	mutex_lock(&dma_list_mutex);
516 	list_for_each_entry_safe(device, _d, &dma_device_list, global_node) {
517 		chan = private_candidate(mask, device, fn, fn_param);
518 		if (chan) {
519 			/* Found a suitable channel, try to grab, prep, and
520 			 * return it.  We first set DMA_PRIVATE to disable
521 			 * balance_ref_count as this channel will not be
522 			 * published in the general-purpose allocator
523 			 */
524 			dma_cap_set(DMA_PRIVATE, device->cap_mask);
525 			device->privatecnt++;
526 			err = dma_chan_get(chan);
527 
528 			if (err == -ENODEV) {
529 				pr_debug("%s: %s module removed\n",
530 					 __func__, dma_chan_name(chan));
531 				list_del_rcu(&device->global_node);
532 			} else if (err)
533 				pr_debug("%s: failed to get %s: (%d)\n",
534 					 __func__, dma_chan_name(chan), err);
535 			else
536 				break;
537 			if (--device->privatecnt == 0)
538 				dma_cap_clear(DMA_PRIVATE, device->cap_mask);
539 			chan = NULL;
540 		}
541 	}
542 	mutex_unlock(&dma_list_mutex);
543 
544 	pr_debug("%s: %s (%s)\n",
545 		 __func__,
546 		 chan ? "success" : "fail",
547 		 chan ? dma_chan_name(chan) : NULL);
548 
549 	return chan;
550 }
551 EXPORT_SYMBOL_GPL(__dma_request_channel);
552 
553 /**
554  * dma_request_slave_channel - try to allocate an exclusive slave channel
555  * @dev:	pointer to client device structure
556  * @name:	slave channel name
557  */
558 struct dma_chan *dma_request_slave_channel(struct device *dev, char *name)
559 {
560 	/* If device-tree is present get slave info from here */
561 	if (dev->of_node)
562 		return of_dma_request_slave_channel(dev->of_node, name);
563 
564 	return NULL;
565 }
566 EXPORT_SYMBOL_GPL(dma_request_slave_channel);
567 
568 void dma_release_channel(struct dma_chan *chan)
569 {
570 	mutex_lock(&dma_list_mutex);
571 	WARN_ONCE(chan->client_count != 1,
572 		  "chan reference count %d != 1\n", chan->client_count);
573 	dma_chan_put(chan);
574 	/* drop PRIVATE cap enabled by __dma_request_channel() */
575 	if (--chan->device->privatecnt == 0)
576 		dma_cap_clear(DMA_PRIVATE, chan->device->cap_mask);
577 	mutex_unlock(&dma_list_mutex);
578 }
579 EXPORT_SYMBOL_GPL(dma_release_channel);
580 
581 /**
582  * dmaengine_get - register interest in dma_channels
583  */
584 void dmaengine_get(void)
585 {
586 	struct dma_device *device, *_d;
587 	struct dma_chan *chan;
588 	int err;
589 
590 	mutex_lock(&dma_list_mutex);
591 	dmaengine_ref_count++;
592 
593 	/* try to grab channels */
594 	list_for_each_entry_safe(device, _d, &dma_device_list, global_node) {
595 		if (dma_has_cap(DMA_PRIVATE, device->cap_mask))
596 			continue;
597 		list_for_each_entry(chan, &device->channels, device_node) {
598 			err = dma_chan_get(chan);
599 			if (err == -ENODEV) {
600 				/* module removed before we could use it */
601 				list_del_rcu(&device->global_node);
602 				break;
603 			} else if (err)
604 				pr_debug("%s: failed to get %s: (%d)\n",
605 				       __func__, dma_chan_name(chan), err);
606 		}
607 	}
608 
609 	/* if this is the first reference and there were channels
610 	 * waiting we need to rebalance to get those channels
611 	 * incorporated into the channel table
612 	 */
613 	if (dmaengine_ref_count == 1)
614 		dma_channel_rebalance();
615 	mutex_unlock(&dma_list_mutex);
616 }
617 EXPORT_SYMBOL(dmaengine_get);
618 
619 /**
620  * dmaengine_put - let dma drivers be removed when ref_count == 0
621  */
622 void dmaengine_put(void)
623 {
624 	struct dma_device *device;
625 	struct dma_chan *chan;
626 
627 	mutex_lock(&dma_list_mutex);
628 	dmaengine_ref_count--;
629 	BUG_ON(dmaengine_ref_count < 0);
630 	/* drop channel references */
631 	list_for_each_entry(device, &dma_device_list, global_node) {
632 		if (dma_has_cap(DMA_PRIVATE, device->cap_mask))
633 			continue;
634 		list_for_each_entry(chan, &device->channels, device_node)
635 			dma_chan_put(chan);
636 	}
637 	mutex_unlock(&dma_list_mutex);
638 }
639 EXPORT_SYMBOL(dmaengine_put);
640 
641 static bool device_has_all_tx_types(struct dma_device *device)
642 {
643 	/* A device that satisfies this test has channels that will never cause
644 	 * an async_tx channel switch event as all possible operation types can
645 	 * be handled.
646 	 */
647 	#ifdef CONFIG_ASYNC_TX_DMA
648 	if (!dma_has_cap(DMA_INTERRUPT, device->cap_mask))
649 		return false;
650 	#endif
651 
652 	#if defined(CONFIG_ASYNC_MEMCPY) || defined(CONFIG_ASYNC_MEMCPY_MODULE)
653 	if (!dma_has_cap(DMA_MEMCPY, device->cap_mask))
654 		return false;
655 	#endif
656 
657 	#if defined(CONFIG_ASYNC_MEMSET) || defined(CONFIG_ASYNC_MEMSET_MODULE)
658 	if (!dma_has_cap(DMA_MEMSET, device->cap_mask))
659 		return false;
660 	#endif
661 
662 	#if defined(CONFIG_ASYNC_XOR) || defined(CONFIG_ASYNC_XOR_MODULE)
663 	if (!dma_has_cap(DMA_XOR, device->cap_mask))
664 		return false;
665 
666 	#ifndef CONFIG_ASYNC_TX_DISABLE_XOR_VAL_DMA
667 	if (!dma_has_cap(DMA_XOR_VAL, device->cap_mask))
668 		return false;
669 	#endif
670 	#endif
671 
672 	#if defined(CONFIG_ASYNC_PQ) || defined(CONFIG_ASYNC_PQ_MODULE)
673 	if (!dma_has_cap(DMA_PQ, device->cap_mask))
674 		return false;
675 
676 	#ifndef CONFIG_ASYNC_TX_DISABLE_PQ_VAL_DMA
677 	if (!dma_has_cap(DMA_PQ_VAL, device->cap_mask))
678 		return false;
679 	#endif
680 	#endif
681 
682 	return true;
683 }
684 
685 static int get_dma_id(struct dma_device *device)
686 {
687 	int rc;
688 
689 	mutex_lock(&dma_list_mutex);
690 
691 	rc = idr_alloc(&dma_idr, NULL, 0, 0, GFP_KERNEL);
692 	if (rc >= 0)
693 		device->dev_id = rc;
694 
695 	mutex_unlock(&dma_list_mutex);
696 	return rc < 0 ? rc : 0;
697 }
698 
699 /**
700  * dma_async_device_register - registers DMA devices found
701  * @device: &dma_device
702  */
703 int dma_async_device_register(struct dma_device *device)
704 {
705 	int chancnt = 0, rc;
706 	struct dma_chan* chan;
707 	atomic_t *idr_ref;
708 
709 	if (!device)
710 		return -ENODEV;
711 
712 	/* validate device routines */
713 	BUG_ON(dma_has_cap(DMA_MEMCPY, device->cap_mask) &&
714 		!device->device_prep_dma_memcpy);
715 	BUG_ON(dma_has_cap(DMA_XOR, device->cap_mask) &&
716 		!device->device_prep_dma_xor);
717 	BUG_ON(dma_has_cap(DMA_XOR_VAL, device->cap_mask) &&
718 		!device->device_prep_dma_xor_val);
719 	BUG_ON(dma_has_cap(DMA_PQ, device->cap_mask) &&
720 		!device->device_prep_dma_pq);
721 	BUG_ON(dma_has_cap(DMA_PQ_VAL, device->cap_mask) &&
722 		!device->device_prep_dma_pq_val);
723 	BUG_ON(dma_has_cap(DMA_MEMSET, device->cap_mask) &&
724 		!device->device_prep_dma_memset);
725 	BUG_ON(dma_has_cap(DMA_INTERRUPT, device->cap_mask) &&
726 		!device->device_prep_dma_interrupt);
727 	BUG_ON(dma_has_cap(DMA_SG, device->cap_mask) &&
728 		!device->device_prep_dma_sg);
729 	BUG_ON(dma_has_cap(DMA_CYCLIC, device->cap_mask) &&
730 		!device->device_prep_dma_cyclic);
731 	BUG_ON(dma_has_cap(DMA_SLAVE, device->cap_mask) &&
732 		!device->device_control);
733 	BUG_ON(dma_has_cap(DMA_INTERLEAVE, device->cap_mask) &&
734 		!device->device_prep_interleaved_dma);
735 
736 	BUG_ON(!device->device_alloc_chan_resources);
737 	BUG_ON(!device->device_free_chan_resources);
738 	BUG_ON(!device->device_tx_status);
739 	BUG_ON(!device->device_issue_pending);
740 	BUG_ON(!device->dev);
741 
742 	/* note: this only matters in the
743 	 * CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH=n case
744 	 */
745 	if (device_has_all_tx_types(device))
746 		dma_cap_set(DMA_ASYNC_TX, device->cap_mask);
747 
748 	idr_ref = kmalloc(sizeof(*idr_ref), GFP_KERNEL);
749 	if (!idr_ref)
750 		return -ENOMEM;
751 	rc = get_dma_id(device);
752 	if (rc != 0) {
753 		kfree(idr_ref);
754 		return rc;
755 	}
756 
757 	atomic_set(idr_ref, 0);
758 
759 	/* represent channels in sysfs. Probably want devs too */
760 	list_for_each_entry(chan, &device->channels, device_node) {
761 		rc = -ENOMEM;
762 		chan->local = alloc_percpu(typeof(*chan->local));
763 		if (chan->local == NULL)
764 			goto err_out;
765 		chan->dev = kzalloc(sizeof(*chan->dev), GFP_KERNEL);
766 		if (chan->dev == NULL) {
767 			free_percpu(chan->local);
768 			chan->local = NULL;
769 			goto err_out;
770 		}
771 
772 		chan->chan_id = chancnt++;
773 		chan->dev->device.class = &dma_devclass;
774 		chan->dev->device.parent = device->dev;
775 		chan->dev->chan = chan;
776 		chan->dev->idr_ref = idr_ref;
777 		chan->dev->dev_id = device->dev_id;
778 		atomic_inc(idr_ref);
779 		dev_set_name(&chan->dev->device, "dma%dchan%d",
780 			     device->dev_id, chan->chan_id);
781 
782 		rc = device_register(&chan->dev->device);
783 		if (rc) {
784 			free_percpu(chan->local);
785 			chan->local = NULL;
786 			kfree(chan->dev);
787 			atomic_dec(idr_ref);
788 			goto err_out;
789 		}
790 		chan->client_count = 0;
791 	}
792 	device->chancnt = chancnt;
793 
794 	mutex_lock(&dma_list_mutex);
795 	/* take references on public channels */
796 	if (dmaengine_ref_count && !dma_has_cap(DMA_PRIVATE, device->cap_mask))
797 		list_for_each_entry(chan, &device->channels, device_node) {
798 			/* if clients are already waiting for channels we need
799 			 * to take references on their behalf
800 			 */
801 			if (dma_chan_get(chan) == -ENODEV) {
802 				/* note we can only get here for the first
803 				 * channel as the remaining channels are
804 				 * guaranteed to get a reference
805 				 */
806 				rc = -ENODEV;
807 				mutex_unlock(&dma_list_mutex);
808 				goto err_out;
809 			}
810 		}
811 	list_add_tail_rcu(&device->global_node, &dma_device_list);
812 	if (dma_has_cap(DMA_PRIVATE, device->cap_mask))
813 		device->privatecnt++;	/* Always private */
814 	dma_channel_rebalance();
815 	mutex_unlock(&dma_list_mutex);
816 
817 	return 0;
818 
819 err_out:
820 	/* if we never registered a channel just release the idr */
821 	if (atomic_read(idr_ref) == 0) {
822 		mutex_lock(&dma_list_mutex);
823 		idr_remove(&dma_idr, device->dev_id);
824 		mutex_unlock(&dma_list_mutex);
825 		kfree(idr_ref);
826 		return rc;
827 	}
828 
829 	list_for_each_entry(chan, &device->channels, device_node) {
830 		if (chan->local == NULL)
831 			continue;
832 		mutex_lock(&dma_list_mutex);
833 		chan->dev->chan = NULL;
834 		mutex_unlock(&dma_list_mutex);
835 		device_unregister(&chan->dev->device);
836 		free_percpu(chan->local);
837 	}
838 	return rc;
839 }
840 EXPORT_SYMBOL(dma_async_device_register);
841 
842 /**
843  * dma_async_device_unregister - unregister a DMA device
844  * @device: &dma_device
845  *
846  * This routine is called by dma driver exit routines, dmaengine holds module
847  * references to prevent it being called while channels are in use.
848  */
849 void dma_async_device_unregister(struct dma_device *device)
850 {
851 	struct dma_chan *chan;
852 
853 	mutex_lock(&dma_list_mutex);
854 	list_del_rcu(&device->global_node);
855 	dma_channel_rebalance();
856 	mutex_unlock(&dma_list_mutex);
857 
858 	list_for_each_entry(chan, &device->channels, device_node) {
859 		WARN_ONCE(chan->client_count,
860 			  "%s called while %d clients hold a reference\n",
861 			  __func__, chan->client_count);
862 		mutex_lock(&dma_list_mutex);
863 		chan->dev->chan = NULL;
864 		mutex_unlock(&dma_list_mutex);
865 		device_unregister(&chan->dev->device);
866 		free_percpu(chan->local);
867 	}
868 }
869 EXPORT_SYMBOL(dma_async_device_unregister);
870 
871 /**
872  * dma_async_memcpy_buf_to_buf - offloaded copy between virtual addresses
873  * @chan: DMA channel to offload copy to
874  * @dest: destination address (virtual)
875  * @src: source address (virtual)
876  * @len: length
877  *
878  * Both @dest and @src must be mappable to a bus address according to the
879  * DMA mapping API rules for streaming mappings.
880  * Both @dest and @src must stay memory resident (kernel memory or locked
881  * user space pages).
882  */
883 dma_cookie_t
884 dma_async_memcpy_buf_to_buf(struct dma_chan *chan, void *dest,
885 			void *src, size_t len)
886 {
887 	struct dma_device *dev = chan->device;
888 	struct dma_async_tx_descriptor *tx;
889 	dma_addr_t dma_dest, dma_src;
890 	dma_cookie_t cookie;
891 	unsigned long flags;
892 
893 	dma_src = dma_map_single(dev->dev, src, len, DMA_TO_DEVICE);
894 	dma_dest = dma_map_single(dev->dev, dest, len, DMA_FROM_DEVICE);
895 	flags = DMA_CTRL_ACK |
896 		DMA_COMPL_SRC_UNMAP_SINGLE |
897 		DMA_COMPL_DEST_UNMAP_SINGLE;
898 	tx = dev->device_prep_dma_memcpy(chan, dma_dest, dma_src, len, flags);
899 
900 	if (!tx) {
901 		dma_unmap_single(dev->dev, dma_src, len, DMA_TO_DEVICE);
902 		dma_unmap_single(dev->dev, dma_dest, len, DMA_FROM_DEVICE);
903 		return -ENOMEM;
904 	}
905 
906 	tx->callback = NULL;
907 	cookie = tx->tx_submit(tx);
908 
909 	preempt_disable();
910 	__this_cpu_add(chan->local->bytes_transferred, len);
911 	__this_cpu_inc(chan->local->memcpy_count);
912 	preempt_enable();
913 
914 	return cookie;
915 }
916 EXPORT_SYMBOL(dma_async_memcpy_buf_to_buf);
917 
918 /**
919  * dma_async_memcpy_buf_to_pg - offloaded copy from address to page
920  * @chan: DMA channel to offload copy to
921  * @page: destination page
922  * @offset: offset in page to copy to
923  * @kdata: source address (virtual)
924  * @len: length
925  *
926  * Both @page/@offset and @kdata must be mappable to a bus address according
927  * to the DMA mapping API rules for streaming mappings.
928  * Both @page/@offset and @kdata must stay memory resident (kernel memory or
929  * locked user space pages)
930  */
931 dma_cookie_t
932 dma_async_memcpy_buf_to_pg(struct dma_chan *chan, struct page *page,
933 			unsigned int offset, void *kdata, size_t len)
934 {
935 	struct dma_device *dev = chan->device;
936 	struct dma_async_tx_descriptor *tx;
937 	dma_addr_t dma_dest, dma_src;
938 	dma_cookie_t cookie;
939 	unsigned long flags;
940 
941 	dma_src = dma_map_single(dev->dev, kdata, len, DMA_TO_DEVICE);
942 	dma_dest = dma_map_page(dev->dev, page, offset, len, DMA_FROM_DEVICE);
943 	flags = DMA_CTRL_ACK | DMA_COMPL_SRC_UNMAP_SINGLE;
944 	tx = dev->device_prep_dma_memcpy(chan, dma_dest, dma_src, len, flags);
945 
946 	if (!tx) {
947 		dma_unmap_single(dev->dev, dma_src, len, DMA_TO_DEVICE);
948 		dma_unmap_page(dev->dev, dma_dest, len, DMA_FROM_DEVICE);
949 		return -ENOMEM;
950 	}
951 
952 	tx->callback = NULL;
953 	cookie = tx->tx_submit(tx);
954 
955 	preempt_disable();
956 	__this_cpu_add(chan->local->bytes_transferred, len);
957 	__this_cpu_inc(chan->local->memcpy_count);
958 	preempt_enable();
959 
960 	return cookie;
961 }
962 EXPORT_SYMBOL(dma_async_memcpy_buf_to_pg);
963 
964 /**
965  * dma_async_memcpy_pg_to_pg - offloaded copy from page to page
966  * @chan: DMA channel to offload copy to
967  * @dest_pg: destination page
968  * @dest_off: offset in page to copy to
969  * @src_pg: source page
970  * @src_off: offset in page to copy from
971  * @len: length
972  *
973  * Both @dest_page/@dest_off and @src_page/@src_off must be mappable to a bus
974  * address according to the DMA mapping API rules for streaming mappings.
975  * Both @dest_page/@dest_off and @src_page/@src_off must stay memory resident
976  * (kernel memory or locked user space pages).
977  */
978 dma_cookie_t
979 dma_async_memcpy_pg_to_pg(struct dma_chan *chan, struct page *dest_pg,
980 	unsigned int dest_off, struct page *src_pg, unsigned int src_off,
981 	size_t len)
982 {
983 	struct dma_device *dev = chan->device;
984 	struct dma_async_tx_descriptor *tx;
985 	dma_addr_t dma_dest, dma_src;
986 	dma_cookie_t cookie;
987 	unsigned long flags;
988 
989 	dma_src = dma_map_page(dev->dev, src_pg, src_off, len, DMA_TO_DEVICE);
990 	dma_dest = dma_map_page(dev->dev, dest_pg, dest_off, len,
991 				DMA_FROM_DEVICE);
992 	flags = DMA_CTRL_ACK;
993 	tx = dev->device_prep_dma_memcpy(chan, dma_dest, dma_src, len, flags);
994 
995 	if (!tx) {
996 		dma_unmap_page(dev->dev, dma_src, len, DMA_TO_DEVICE);
997 		dma_unmap_page(dev->dev, dma_dest, len, DMA_FROM_DEVICE);
998 		return -ENOMEM;
999 	}
1000 
1001 	tx->callback = NULL;
1002 	cookie = tx->tx_submit(tx);
1003 
1004 	preempt_disable();
1005 	__this_cpu_add(chan->local->bytes_transferred, len);
1006 	__this_cpu_inc(chan->local->memcpy_count);
1007 	preempt_enable();
1008 
1009 	return cookie;
1010 }
1011 EXPORT_SYMBOL(dma_async_memcpy_pg_to_pg);
1012 
1013 void dma_async_tx_descriptor_init(struct dma_async_tx_descriptor *tx,
1014 	struct dma_chan *chan)
1015 {
1016 	tx->chan = chan;
1017 	#ifdef CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH
1018 	spin_lock_init(&tx->lock);
1019 	#endif
1020 }
1021 EXPORT_SYMBOL(dma_async_tx_descriptor_init);
1022 
1023 /* dma_wait_for_async_tx - spin wait for a transaction to complete
1024  * @tx: in-flight transaction to wait on
1025  */
1026 enum dma_status
1027 dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx)
1028 {
1029 	unsigned long dma_sync_wait_timeout = jiffies + msecs_to_jiffies(5000);
1030 
1031 	if (!tx)
1032 		return DMA_SUCCESS;
1033 
1034 	while (tx->cookie == -EBUSY) {
1035 		if (time_after_eq(jiffies, dma_sync_wait_timeout)) {
1036 			pr_err("%s timeout waiting for descriptor submission\n",
1037 			       __func__);
1038 			return DMA_ERROR;
1039 		}
1040 		cpu_relax();
1041 	}
1042 	return dma_sync_wait(tx->chan, tx->cookie);
1043 }
1044 EXPORT_SYMBOL_GPL(dma_wait_for_async_tx);
1045 
1046 /* dma_run_dependencies - helper routine for dma drivers to process
1047  *	(start) dependent operations on their target channel
1048  * @tx: transaction with dependencies
1049  */
1050 void dma_run_dependencies(struct dma_async_tx_descriptor *tx)
1051 {
1052 	struct dma_async_tx_descriptor *dep = txd_next(tx);
1053 	struct dma_async_tx_descriptor *dep_next;
1054 	struct dma_chan *chan;
1055 
1056 	if (!dep)
1057 		return;
1058 
1059 	/* we'll submit tx->next now, so clear the link */
1060 	txd_clear_next(tx);
1061 	chan = dep->chan;
1062 
1063 	/* keep submitting up until a channel switch is detected
1064 	 * in that case we will be called again as a result of
1065 	 * processing the interrupt from async_tx_channel_switch
1066 	 */
1067 	for (; dep; dep = dep_next) {
1068 		txd_lock(dep);
1069 		txd_clear_parent(dep);
1070 		dep_next = txd_next(dep);
1071 		if (dep_next && dep_next->chan == chan)
1072 			txd_clear_next(dep); /* ->next will be submitted */
1073 		else
1074 			dep_next = NULL; /* submit current dep and terminate */
1075 		txd_unlock(dep);
1076 
1077 		dep->tx_submit(dep);
1078 	}
1079 
1080 	chan->device->device_issue_pending(chan);
1081 }
1082 EXPORT_SYMBOL_GPL(dma_run_dependencies);
1083 
1084 static int __init dma_bus_init(void)
1085 {
1086 	return class_register(&dma_devclass);
1087 }
1088 arch_initcall(dma_bus_init);
1089 
1090 
1091