1 /* 2 * Texas Instruments CPDMA Driver 3 * 4 * Copyright (C) 2010 Texas Instruments 5 * 6 * This program is free software; you can redistribute it and/or 7 * modify it under the terms of the GNU General Public License as 8 * published by the Free Software Foundation version 2. 9 * 10 * This program is distributed "as is" WITHOUT ANY WARRANTY of any 11 * kind, whether express or implied; without even the implied warranty 12 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 13 * GNU General Public License for more details. 14 */ 15 #include <linux/kernel.h> 16 #include <linux/spinlock.h> 17 #include <linux/device.h> 18 #include <linux/module.h> 19 #include <linux/slab.h> 20 #include <linux/err.h> 21 #include <linux/dma-mapping.h> 22 #include <linux/io.h> 23 #include <linux/delay.h> 24 #include <linux/genalloc.h> 25 #include "davinci_cpdma.h" 26 27 /* DMA Registers */ 28 #define CPDMA_TXIDVER 0x00 29 #define CPDMA_TXCONTROL 0x04 30 #define CPDMA_TXTEARDOWN 0x08 31 #define CPDMA_RXIDVER 0x10 32 #define CPDMA_RXCONTROL 0x14 33 #define CPDMA_SOFTRESET 0x1c 34 #define CPDMA_RXTEARDOWN 0x18 35 #define CPDMA_TX_PRI0_RATE 0x30 36 #define CPDMA_TXINTSTATRAW 0x80 37 #define CPDMA_TXINTSTATMASKED 0x84 38 #define CPDMA_TXINTMASKSET 0x88 39 #define CPDMA_TXINTMASKCLEAR 0x8c 40 #define CPDMA_MACINVECTOR 0x90 41 #define CPDMA_MACEOIVECTOR 0x94 42 #define CPDMA_RXINTSTATRAW 0xa0 43 #define CPDMA_RXINTSTATMASKED 0xa4 44 #define CPDMA_RXINTMASKSET 0xa8 45 #define CPDMA_RXINTMASKCLEAR 0xac 46 #define CPDMA_DMAINTSTATRAW 0xb0 47 #define CPDMA_DMAINTSTATMASKED 0xb4 48 #define CPDMA_DMAINTMASKSET 0xb8 49 #define CPDMA_DMAINTMASKCLEAR 0xbc 50 #define CPDMA_DMAINT_HOSTERR BIT(1) 51 52 /* the following exist only if has_ext_regs is set */ 53 #define CPDMA_DMACONTROL 0x20 54 #define CPDMA_DMASTATUS 0x24 55 #define CPDMA_RXBUFFOFS 0x28 56 #define CPDMA_EM_CONTROL 0x2c 57 58 /* Descriptor mode bits */ 59 #define CPDMA_DESC_SOP BIT(31) 60 #define CPDMA_DESC_EOP BIT(30) 61 #define CPDMA_DESC_OWNER BIT(29) 62 #define CPDMA_DESC_EOQ BIT(28) 63 #define CPDMA_DESC_TD_COMPLETE BIT(27) 64 #define CPDMA_DESC_PASS_CRC BIT(26) 65 #define CPDMA_DESC_TO_PORT_EN BIT(20) 66 #define CPDMA_TO_PORT_SHIFT 16 67 #define CPDMA_DESC_PORT_MASK (BIT(18) | BIT(17) | BIT(16)) 68 #define CPDMA_DESC_CRC_LEN 4 69 70 #define CPDMA_TEARDOWN_VALUE 0xfffffffc 71 72 #define CPDMA_MAX_RLIM_CNT 16384 73 74 struct cpdma_desc { 75 /* hardware fields */ 76 u32 hw_next; 77 u32 hw_buffer; 78 u32 hw_len; 79 u32 hw_mode; 80 /* software fields */ 81 void *sw_token; 82 u32 sw_buffer; 83 u32 sw_len; 84 }; 85 86 struct cpdma_desc_pool { 87 phys_addr_t phys; 88 dma_addr_t hw_addr; 89 void __iomem *iomap; /* ioremap map */ 90 void *cpumap; /* dma_alloc map */ 91 int desc_size, mem_size; 92 int num_desc; 93 struct device *dev; 94 struct gen_pool *gen_pool; 95 }; 96 97 enum cpdma_state { 98 CPDMA_STATE_IDLE, 99 CPDMA_STATE_ACTIVE, 100 CPDMA_STATE_TEARDOWN, 101 }; 102 103 struct cpdma_ctlr { 104 enum cpdma_state state; 105 struct cpdma_params params; 106 struct device *dev; 107 struct cpdma_desc_pool *pool; 108 spinlock_t lock; 109 struct cpdma_chan *channels[2 * CPDMA_MAX_CHANNELS]; 110 int chan_num; 111 }; 112 113 struct cpdma_chan { 114 struct cpdma_desc __iomem *head, *tail; 115 void __iomem *hdp, *cp, *rxfree; 116 enum cpdma_state state; 117 struct cpdma_ctlr *ctlr; 118 int chan_num; 119 spinlock_t lock; 120 int count; 121 u32 desc_num; 122 u32 mask; 123 cpdma_handler_fn handler; 124 enum dma_data_direction dir; 125 struct cpdma_chan_stats stats; 126 /* offsets into dmaregs */ 127 int int_set, int_clear, td; 128 int weight; 129 u32 rate_factor; 130 u32 rate; 131 }; 132 133 struct cpdma_control_info { 134 u32 reg; 135 u32 shift, mask; 136 int access; 137 #define ACCESS_RO BIT(0) 138 #define ACCESS_WO BIT(1) 139 #define ACCESS_RW (ACCESS_RO | ACCESS_WO) 140 }; 141 142 static struct cpdma_control_info controls[] = { 143 [CPDMA_TX_RLIM] = {CPDMA_DMACONTROL, 8, 0xffff, ACCESS_RW}, 144 [CPDMA_CMD_IDLE] = {CPDMA_DMACONTROL, 3, 1, ACCESS_WO}, 145 [CPDMA_COPY_ERROR_FRAMES] = {CPDMA_DMACONTROL, 4, 1, ACCESS_RW}, 146 [CPDMA_RX_OFF_LEN_UPDATE] = {CPDMA_DMACONTROL, 2, 1, ACCESS_RW}, 147 [CPDMA_RX_OWNERSHIP_FLIP] = {CPDMA_DMACONTROL, 1, 1, ACCESS_RW}, 148 [CPDMA_TX_PRIO_FIXED] = {CPDMA_DMACONTROL, 0, 1, ACCESS_RW}, 149 [CPDMA_STAT_IDLE] = {CPDMA_DMASTATUS, 31, 1, ACCESS_RO}, 150 [CPDMA_STAT_TX_ERR_CODE] = {CPDMA_DMASTATUS, 20, 0xf, ACCESS_RW}, 151 [CPDMA_STAT_TX_ERR_CHAN] = {CPDMA_DMASTATUS, 16, 0x7, ACCESS_RW}, 152 [CPDMA_STAT_RX_ERR_CODE] = {CPDMA_DMASTATUS, 12, 0xf, ACCESS_RW}, 153 [CPDMA_STAT_RX_ERR_CHAN] = {CPDMA_DMASTATUS, 8, 0x7, ACCESS_RW}, 154 [CPDMA_RX_BUFFER_OFFSET] = {CPDMA_RXBUFFOFS, 0, 0xffff, ACCESS_RW}, 155 }; 156 157 #define tx_chan_num(chan) (chan) 158 #define rx_chan_num(chan) ((chan) + CPDMA_MAX_CHANNELS) 159 #define is_rx_chan(chan) ((chan)->chan_num >= CPDMA_MAX_CHANNELS) 160 #define is_tx_chan(chan) (!is_rx_chan(chan)) 161 #define __chan_linear(chan_num) ((chan_num) & (CPDMA_MAX_CHANNELS - 1)) 162 #define chan_linear(chan) __chan_linear((chan)->chan_num) 163 164 /* The following make access to common cpdma_ctlr params more readable */ 165 #define dmaregs params.dmaregs 166 #define num_chan params.num_chan 167 168 /* various accessors */ 169 #define dma_reg_read(ctlr, ofs) __raw_readl((ctlr)->dmaregs + (ofs)) 170 #define chan_read(chan, fld) __raw_readl((chan)->fld) 171 #define desc_read(desc, fld) __raw_readl(&(desc)->fld) 172 #define dma_reg_write(ctlr, ofs, v) __raw_writel(v, (ctlr)->dmaregs + (ofs)) 173 #define chan_write(chan, fld, v) __raw_writel(v, (chan)->fld) 174 #define desc_write(desc, fld, v) __raw_writel((u32)(v), &(desc)->fld) 175 176 #define cpdma_desc_to_port(chan, mode, directed) \ 177 do { \ 178 if (!is_rx_chan(chan) && ((directed == 1) || \ 179 (directed == 2))) \ 180 mode |= (CPDMA_DESC_TO_PORT_EN | \ 181 (directed << CPDMA_TO_PORT_SHIFT)); \ 182 } while (0) 183 184 static void cpdma_desc_pool_destroy(struct cpdma_desc_pool *pool) 185 { 186 if (!pool) 187 return; 188 189 WARN(gen_pool_size(pool->gen_pool) != gen_pool_avail(pool->gen_pool), 190 "cpdma_desc_pool size %d != avail %d", 191 gen_pool_size(pool->gen_pool), 192 gen_pool_avail(pool->gen_pool)); 193 if (pool->cpumap) 194 dma_free_coherent(pool->dev, pool->mem_size, pool->cpumap, 195 pool->phys); 196 else 197 iounmap(pool->iomap); 198 } 199 200 /* 201 * Utility constructs for a cpdma descriptor pool. Some devices (e.g. davinci 202 * emac) have dedicated on-chip memory for these descriptors. Some other 203 * devices (e.g. cpsw switches) use plain old memory. Descriptor pools 204 * abstract out these details 205 */ 206 static struct cpdma_desc_pool * 207 cpdma_desc_pool_create(struct device *dev, u32 phys, dma_addr_t hw_addr, 208 int size, int align) 209 { 210 struct cpdma_desc_pool *pool; 211 int ret; 212 213 pool = devm_kzalloc(dev, sizeof(*pool), GFP_KERNEL); 214 if (!pool) 215 goto gen_pool_create_fail; 216 217 pool->dev = dev; 218 pool->mem_size = size; 219 pool->desc_size = ALIGN(sizeof(struct cpdma_desc), align); 220 pool->num_desc = size / pool->desc_size; 221 222 pool->gen_pool = devm_gen_pool_create(dev, ilog2(pool->desc_size), -1, 223 "cpdma"); 224 if (IS_ERR(pool->gen_pool)) { 225 dev_err(dev, "pool create failed %ld\n", 226 PTR_ERR(pool->gen_pool)); 227 goto gen_pool_create_fail; 228 } 229 230 if (phys) { 231 pool->phys = phys; 232 pool->iomap = ioremap(phys, size); /* should be memremap? */ 233 pool->hw_addr = hw_addr; 234 } else { 235 pool->cpumap = dma_alloc_coherent(dev, size, &pool->hw_addr, 236 GFP_KERNEL); 237 pool->iomap = (void __iomem __force *)pool->cpumap; 238 pool->phys = pool->hw_addr; /* assumes no IOMMU, don't use this value */ 239 } 240 241 if (!pool->iomap) 242 goto gen_pool_create_fail; 243 244 ret = gen_pool_add_virt(pool->gen_pool, (unsigned long)pool->iomap, 245 pool->phys, pool->mem_size, -1); 246 if (ret < 0) { 247 dev_err(dev, "pool add failed %d\n", ret); 248 goto gen_pool_add_virt_fail; 249 } 250 251 return pool; 252 253 gen_pool_add_virt_fail: 254 cpdma_desc_pool_destroy(pool); 255 gen_pool_create_fail: 256 return NULL; 257 } 258 259 static inline dma_addr_t desc_phys(struct cpdma_desc_pool *pool, 260 struct cpdma_desc __iomem *desc) 261 { 262 if (!desc) 263 return 0; 264 return pool->hw_addr + (__force long)desc - (__force long)pool->iomap; 265 } 266 267 static inline struct cpdma_desc __iomem * 268 desc_from_phys(struct cpdma_desc_pool *pool, dma_addr_t dma) 269 { 270 return dma ? pool->iomap + dma - pool->hw_addr : NULL; 271 } 272 273 static struct cpdma_desc __iomem * 274 cpdma_desc_alloc(struct cpdma_desc_pool *pool) 275 { 276 return (struct cpdma_desc __iomem *) 277 gen_pool_alloc(pool->gen_pool, pool->desc_size); 278 } 279 280 static void cpdma_desc_free(struct cpdma_desc_pool *pool, 281 struct cpdma_desc __iomem *desc, int num_desc) 282 { 283 gen_pool_free(pool->gen_pool, (unsigned long)desc, pool->desc_size); 284 } 285 286 static int _cpdma_control_set(struct cpdma_ctlr *ctlr, int control, int value) 287 { 288 struct cpdma_control_info *info = &controls[control]; 289 u32 val; 290 291 if (!ctlr->params.has_ext_regs) 292 return -ENOTSUPP; 293 294 if (ctlr->state != CPDMA_STATE_ACTIVE) 295 return -EINVAL; 296 297 if (control < 0 || control >= ARRAY_SIZE(controls)) 298 return -ENOENT; 299 300 if ((info->access & ACCESS_WO) != ACCESS_WO) 301 return -EPERM; 302 303 val = dma_reg_read(ctlr, info->reg); 304 val &= ~(info->mask << info->shift); 305 val |= (value & info->mask) << info->shift; 306 dma_reg_write(ctlr, info->reg, val); 307 308 return 0; 309 } 310 311 static int _cpdma_control_get(struct cpdma_ctlr *ctlr, int control) 312 { 313 struct cpdma_control_info *info = &controls[control]; 314 int ret; 315 316 if (!ctlr->params.has_ext_regs) 317 return -ENOTSUPP; 318 319 if (ctlr->state != CPDMA_STATE_ACTIVE) 320 return -EINVAL; 321 322 if (control < 0 || control >= ARRAY_SIZE(controls)) 323 return -ENOENT; 324 325 if ((info->access & ACCESS_RO) != ACCESS_RO) 326 return -EPERM; 327 328 ret = (dma_reg_read(ctlr, info->reg) >> info->shift) & info->mask; 329 return ret; 330 } 331 332 /* cpdma_chan_set_chan_shaper - set shaper for a channel 333 * Has to be called under ctlr lock 334 */ 335 static int cpdma_chan_set_chan_shaper(struct cpdma_chan *chan) 336 { 337 struct cpdma_ctlr *ctlr = chan->ctlr; 338 u32 rate_reg; 339 u32 rmask; 340 int ret; 341 342 if (!chan->rate) 343 return 0; 344 345 rate_reg = CPDMA_TX_PRI0_RATE + 4 * chan->chan_num; 346 dma_reg_write(ctlr, rate_reg, chan->rate_factor); 347 348 rmask = _cpdma_control_get(ctlr, CPDMA_TX_RLIM); 349 rmask |= chan->mask; 350 351 ret = _cpdma_control_set(ctlr, CPDMA_TX_RLIM, rmask); 352 return ret; 353 } 354 355 static int cpdma_chan_on(struct cpdma_chan *chan) 356 { 357 struct cpdma_ctlr *ctlr = chan->ctlr; 358 struct cpdma_desc_pool *pool = ctlr->pool; 359 unsigned long flags; 360 361 spin_lock_irqsave(&chan->lock, flags); 362 if (chan->state != CPDMA_STATE_IDLE) { 363 spin_unlock_irqrestore(&chan->lock, flags); 364 return -EBUSY; 365 } 366 if (ctlr->state != CPDMA_STATE_ACTIVE) { 367 spin_unlock_irqrestore(&chan->lock, flags); 368 return -EINVAL; 369 } 370 dma_reg_write(ctlr, chan->int_set, chan->mask); 371 chan->state = CPDMA_STATE_ACTIVE; 372 if (chan->head) { 373 chan_write(chan, hdp, desc_phys(pool, chan->head)); 374 if (chan->rxfree) 375 chan_write(chan, rxfree, chan->count); 376 } 377 378 spin_unlock_irqrestore(&chan->lock, flags); 379 return 0; 380 } 381 382 /* cpdma_chan_fit_rate - set rate for a channel and check if it's possible. 383 * rmask - mask of rate limited channels 384 * Returns min rate in Kb/s 385 */ 386 static int cpdma_chan_fit_rate(struct cpdma_chan *ch, u32 rate, 387 u32 *rmask, int *prio_mode) 388 { 389 struct cpdma_ctlr *ctlr = ch->ctlr; 390 struct cpdma_chan *chan; 391 u32 old_rate = ch->rate; 392 u32 new_rmask = 0; 393 int rlim = 1; 394 int i; 395 396 *prio_mode = 0; 397 for (i = tx_chan_num(0); i < tx_chan_num(CPDMA_MAX_CHANNELS); i++) { 398 chan = ctlr->channels[i]; 399 if (!chan) { 400 rlim = 0; 401 continue; 402 } 403 404 if (chan == ch) 405 chan->rate = rate; 406 407 if (chan->rate) { 408 if (rlim) { 409 new_rmask |= chan->mask; 410 } else { 411 ch->rate = old_rate; 412 dev_err(ctlr->dev, "Prev channel of %dch is not rate limited\n", 413 chan->chan_num); 414 return -EINVAL; 415 } 416 } else { 417 *prio_mode = 1; 418 rlim = 0; 419 } 420 } 421 422 *rmask = new_rmask; 423 return 0; 424 } 425 426 static u32 cpdma_chan_set_factors(struct cpdma_ctlr *ctlr, 427 struct cpdma_chan *ch) 428 { 429 u32 delta = UINT_MAX, prev_delta = UINT_MAX, best_delta = UINT_MAX; 430 u32 best_send_cnt = 0, best_idle_cnt = 0; 431 u32 new_rate, best_rate = 0, rate_reg; 432 u64 send_cnt, idle_cnt; 433 u32 min_send_cnt, freq; 434 u64 divident, divisor; 435 436 if (!ch->rate) { 437 ch->rate_factor = 0; 438 goto set_factor; 439 } 440 441 freq = ctlr->params.bus_freq_mhz * 1000 * 32; 442 if (!freq) { 443 dev_err(ctlr->dev, "The bus frequency is not set\n"); 444 return -EINVAL; 445 } 446 447 min_send_cnt = freq - ch->rate; 448 send_cnt = DIV_ROUND_UP(min_send_cnt, ch->rate); 449 while (send_cnt <= CPDMA_MAX_RLIM_CNT) { 450 divident = ch->rate * send_cnt; 451 divisor = min_send_cnt; 452 idle_cnt = DIV_ROUND_CLOSEST_ULL(divident, divisor); 453 454 divident = freq * idle_cnt; 455 divisor = idle_cnt + send_cnt; 456 new_rate = DIV_ROUND_CLOSEST_ULL(divident, divisor); 457 458 delta = new_rate >= ch->rate ? new_rate - ch->rate : delta; 459 if (delta < best_delta) { 460 best_delta = delta; 461 best_send_cnt = send_cnt; 462 best_idle_cnt = idle_cnt; 463 best_rate = new_rate; 464 465 if (!delta) 466 break; 467 } 468 469 if (prev_delta >= delta) { 470 prev_delta = delta; 471 send_cnt++; 472 continue; 473 } 474 475 idle_cnt++; 476 divident = freq * idle_cnt; 477 send_cnt = DIV_ROUND_CLOSEST_ULL(divident, ch->rate); 478 send_cnt -= idle_cnt; 479 prev_delta = UINT_MAX; 480 } 481 482 ch->rate = best_rate; 483 ch->rate_factor = best_send_cnt | (best_idle_cnt << 16); 484 485 set_factor: 486 rate_reg = CPDMA_TX_PRI0_RATE + 4 * ch->chan_num; 487 dma_reg_write(ctlr, rate_reg, ch->rate_factor); 488 return 0; 489 } 490 491 struct cpdma_ctlr *cpdma_ctlr_create(struct cpdma_params *params) 492 { 493 struct cpdma_ctlr *ctlr; 494 495 ctlr = devm_kzalloc(params->dev, sizeof(*ctlr), GFP_KERNEL); 496 if (!ctlr) 497 return NULL; 498 499 ctlr->state = CPDMA_STATE_IDLE; 500 ctlr->params = *params; 501 ctlr->dev = params->dev; 502 ctlr->chan_num = 0; 503 spin_lock_init(&ctlr->lock); 504 505 ctlr->pool = cpdma_desc_pool_create(ctlr->dev, 506 ctlr->params.desc_mem_phys, 507 ctlr->params.desc_hw_addr, 508 ctlr->params.desc_mem_size, 509 ctlr->params.desc_align); 510 if (!ctlr->pool) 511 return NULL; 512 513 if (WARN_ON(ctlr->num_chan > CPDMA_MAX_CHANNELS)) 514 ctlr->num_chan = CPDMA_MAX_CHANNELS; 515 return ctlr; 516 } 517 EXPORT_SYMBOL_GPL(cpdma_ctlr_create); 518 519 int cpdma_ctlr_start(struct cpdma_ctlr *ctlr) 520 { 521 struct cpdma_chan *chan; 522 unsigned long flags; 523 int i, prio_mode; 524 525 spin_lock_irqsave(&ctlr->lock, flags); 526 if (ctlr->state != CPDMA_STATE_IDLE) { 527 spin_unlock_irqrestore(&ctlr->lock, flags); 528 return -EBUSY; 529 } 530 531 if (ctlr->params.has_soft_reset) { 532 unsigned timeout = 10 * 100; 533 534 dma_reg_write(ctlr, CPDMA_SOFTRESET, 1); 535 while (timeout) { 536 if (dma_reg_read(ctlr, CPDMA_SOFTRESET) == 0) 537 break; 538 udelay(10); 539 timeout--; 540 } 541 WARN_ON(!timeout); 542 } 543 544 for (i = 0; i < ctlr->num_chan; i++) { 545 __raw_writel(0, ctlr->params.txhdp + 4 * i); 546 __raw_writel(0, ctlr->params.rxhdp + 4 * i); 547 __raw_writel(0, ctlr->params.txcp + 4 * i); 548 __raw_writel(0, ctlr->params.rxcp + 4 * i); 549 } 550 551 dma_reg_write(ctlr, CPDMA_RXINTMASKCLEAR, 0xffffffff); 552 dma_reg_write(ctlr, CPDMA_TXINTMASKCLEAR, 0xffffffff); 553 554 dma_reg_write(ctlr, CPDMA_TXCONTROL, 1); 555 dma_reg_write(ctlr, CPDMA_RXCONTROL, 1); 556 557 ctlr->state = CPDMA_STATE_ACTIVE; 558 559 prio_mode = 0; 560 for (i = 0; i < ARRAY_SIZE(ctlr->channels); i++) { 561 chan = ctlr->channels[i]; 562 if (chan) { 563 cpdma_chan_set_chan_shaper(chan); 564 cpdma_chan_on(chan); 565 566 /* off prio mode if all tx channels are rate limited */ 567 if (is_tx_chan(chan) && !chan->rate) 568 prio_mode = 1; 569 } 570 } 571 572 _cpdma_control_set(ctlr, CPDMA_TX_PRIO_FIXED, prio_mode); 573 _cpdma_control_set(ctlr, CPDMA_RX_BUFFER_OFFSET, 0); 574 575 spin_unlock_irqrestore(&ctlr->lock, flags); 576 return 0; 577 } 578 EXPORT_SYMBOL_GPL(cpdma_ctlr_start); 579 580 int cpdma_ctlr_stop(struct cpdma_ctlr *ctlr) 581 { 582 unsigned long flags; 583 int i; 584 585 spin_lock_irqsave(&ctlr->lock, flags); 586 if (ctlr->state != CPDMA_STATE_ACTIVE) { 587 spin_unlock_irqrestore(&ctlr->lock, flags); 588 return -EINVAL; 589 } 590 591 ctlr->state = CPDMA_STATE_TEARDOWN; 592 spin_unlock_irqrestore(&ctlr->lock, flags); 593 594 for (i = 0; i < ARRAY_SIZE(ctlr->channels); i++) { 595 if (ctlr->channels[i]) 596 cpdma_chan_stop(ctlr->channels[i]); 597 } 598 599 spin_lock_irqsave(&ctlr->lock, flags); 600 dma_reg_write(ctlr, CPDMA_RXINTMASKCLEAR, 0xffffffff); 601 dma_reg_write(ctlr, CPDMA_TXINTMASKCLEAR, 0xffffffff); 602 603 dma_reg_write(ctlr, CPDMA_TXCONTROL, 0); 604 dma_reg_write(ctlr, CPDMA_RXCONTROL, 0); 605 606 ctlr->state = CPDMA_STATE_IDLE; 607 608 spin_unlock_irqrestore(&ctlr->lock, flags); 609 return 0; 610 } 611 EXPORT_SYMBOL_GPL(cpdma_ctlr_stop); 612 613 int cpdma_ctlr_destroy(struct cpdma_ctlr *ctlr) 614 { 615 int ret = 0, i; 616 617 if (!ctlr) 618 return -EINVAL; 619 620 if (ctlr->state != CPDMA_STATE_IDLE) 621 cpdma_ctlr_stop(ctlr); 622 623 for (i = 0; i < ARRAY_SIZE(ctlr->channels); i++) 624 cpdma_chan_destroy(ctlr->channels[i]); 625 626 cpdma_desc_pool_destroy(ctlr->pool); 627 return ret; 628 } 629 EXPORT_SYMBOL_GPL(cpdma_ctlr_destroy); 630 631 int cpdma_ctlr_int_ctrl(struct cpdma_ctlr *ctlr, bool enable) 632 { 633 unsigned long flags; 634 int i, reg; 635 636 spin_lock_irqsave(&ctlr->lock, flags); 637 if (ctlr->state != CPDMA_STATE_ACTIVE) { 638 spin_unlock_irqrestore(&ctlr->lock, flags); 639 return -EINVAL; 640 } 641 642 reg = enable ? CPDMA_DMAINTMASKSET : CPDMA_DMAINTMASKCLEAR; 643 dma_reg_write(ctlr, reg, CPDMA_DMAINT_HOSTERR); 644 645 for (i = 0; i < ARRAY_SIZE(ctlr->channels); i++) { 646 if (ctlr->channels[i]) 647 cpdma_chan_int_ctrl(ctlr->channels[i], enable); 648 } 649 650 spin_unlock_irqrestore(&ctlr->lock, flags); 651 return 0; 652 } 653 EXPORT_SYMBOL_GPL(cpdma_ctlr_int_ctrl); 654 655 void cpdma_ctlr_eoi(struct cpdma_ctlr *ctlr, u32 value) 656 { 657 dma_reg_write(ctlr, CPDMA_MACEOIVECTOR, value); 658 } 659 EXPORT_SYMBOL_GPL(cpdma_ctlr_eoi); 660 661 u32 cpdma_ctrl_rxchs_state(struct cpdma_ctlr *ctlr) 662 { 663 return dma_reg_read(ctlr, CPDMA_RXINTSTATMASKED); 664 } 665 EXPORT_SYMBOL_GPL(cpdma_ctrl_rxchs_state); 666 667 u32 cpdma_ctrl_txchs_state(struct cpdma_ctlr *ctlr) 668 { 669 return dma_reg_read(ctlr, CPDMA_TXINTSTATMASKED); 670 } 671 EXPORT_SYMBOL_GPL(cpdma_ctrl_txchs_state); 672 673 static void cpdma_chan_set_descs(struct cpdma_ctlr *ctlr, 674 int rx, int desc_num, 675 int per_ch_desc) 676 { 677 struct cpdma_chan *chan, *most_chan = NULL; 678 int desc_cnt = desc_num; 679 int most_dnum = 0; 680 int min, max, i; 681 682 if (!desc_num) 683 return; 684 685 if (rx) { 686 min = rx_chan_num(0); 687 max = rx_chan_num(CPDMA_MAX_CHANNELS); 688 } else { 689 min = tx_chan_num(0); 690 max = tx_chan_num(CPDMA_MAX_CHANNELS); 691 } 692 693 for (i = min; i < max; i++) { 694 chan = ctlr->channels[i]; 695 if (!chan) 696 continue; 697 698 if (chan->weight) 699 chan->desc_num = (chan->weight * desc_num) / 100; 700 else 701 chan->desc_num = per_ch_desc; 702 703 desc_cnt -= chan->desc_num; 704 705 if (most_dnum < chan->desc_num) { 706 most_dnum = chan->desc_num; 707 most_chan = chan; 708 } 709 } 710 /* use remains */ 711 most_chan->desc_num += desc_cnt; 712 } 713 714 /** 715 * cpdma_chan_split_pool - Splits ctrl pool between all channels. 716 * Has to be called under ctlr lock 717 */ 718 static int cpdma_chan_split_pool(struct cpdma_ctlr *ctlr) 719 { 720 int tx_per_ch_desc = 0, rx_per_ch_desc = 0; 721 struct cpdma_desc_pool *pool = ctlr->pool; 722 int free_rx_num = 0, free_tx_num = 0; 723 int rx_weight = 0, tx_weight = 0; 724 int tx_desc_num, rx_desc_num; 725 struct cpdma_chan *chan; 726 int i, tx_num = 0; 727 728 if (!ctlr->chan_num) 729 return 0; 730 731 for (i = 0; i < ARRAY_SIZE(ctlr->channels); i++) { 732 chan = ctlr->channels[i]; 733 if (!chan) 734 continue; 735 736 if (is_rx_chan(chan)) { 737 if (!chan->weight) 738 free_rx_num++; 739 rx_weight += chan->weight; 740 } else { 741 if (!chan->weight) 742 free_tx_num++; 743 tx_weight += chan->weight; 744 tx_num++; 745 } 746 } 747 748 if (rx_weight > 100 || tx_weight > 100) 749 return -EINVAL; 750 751 tx_desc_num = (tx_num * pool->num_desc) / ctlr->chan_num; 752 rx_desc_num = pool->num_desc - tx_desc_num; 753 754 if (free_tx_num) { 755 tx_per_ch_desc = tx_desc_num - (tx_weight * tx_desc_num) / 100; 756 tx_per_ch_desc /= free_tx_num; 757 } 758 if (free_rx_num) { 759 rx_per_ch_desc = rx_desc_num - (rx_weight * rx_desc_num) / 100; 760 rx_per_ch_desc /= free_rx_num; 761 } 762 763 cpdma_chan_set_descs(ctlr, 0, tx_desc_num, tx_per_ch_desc); 764 cpdma_chan_set_descs(ctlr, 1, rx_desc_num, rx_per_ch_desc); 765 766 return 0; 767 } 768 769 /* cpdma_chan_set_weight - set weight of a channel in percentage. 770 * Tx and Rx channels have separate weights. That is 100% for RX 771 * and 100% for Tx. The weight is used to split cpdma resources 772 * in correct proportion required by the channels, including number 773 * of descriptors. The channel rate is not enough to know the 774 * weight of a channel as the maximum rate of an interface is needed. 775 * If weight = 0, then channel uses rest of descriptors leaved by 776 * weighted channels. 777 */ 778 int cpdma_chan_set_weight(struct cpdma_chan *ch, int weight) 779 { 780 struct cpdma_ctlr *ctlr = ch->ctlr; 781 unsigned long flags, ch_flags; 782 int ret; 783 784 spin_lock_irqsave(&ctlr->lock, flags); 785 spin_lock_irqsave(&ch->lock, ch_flags); 786 if (ch->weight == weight) { 787 spin_unlock_irqrestore(&ch->lock, ch_flags); 788 spin_unlock_irqrestore(&ctlr->lock, flags); 789 return 0; 790 } 791 ch->weight = weight; 792 spin_unlock_irqrestore(&ch->lock, ch_flags); 793 794 /* re-split pool using new channel weight */ 795 ret = cpdma_chan_split_pool(ctlr); 796 spin_unlock_irqrestore(&ctlr->lock, flags); 797 return ret; 798 } 799 EXPORT_SYMBOL_GPL(cpdma_chan_set_weight); 800 801 /* cpdma_chan_get_min_rate - get minimum allowed rate for channel 802 * Should be called before cpdma_chan_set_rate. 803 * Returns min rate in Kb/s 804 */ 805 u32 cpdma_chan_get_min_rate(struct cpdma_ctlr *ctlr) 806 { 807 unsigned int divident, divisor; 808 809 divident = ctlr->params.bus_freq_mhz * 32 * 1000; 810 divisor = 1 + CPDMA_MAX_RLIM_CNT; 811 812 return DIV_ROUND_UP(divident, divisor); 813 } 814 EXPORT_SYMBOL_GPL(cpdma_chan_get_min_rate); 815 816 /* cpdma_chan_set_rate - limits bandwidth for transmit channel. 817 * The bandwidth * limited channels have to be in order beginning from lowest. 818 * ch - transmit channel the bandwidth is configured for 819 * rate - bandwidth in Kb/s, if 0 - then off shaper 820 */ 821 int cpdma_chan_set_rate(struct cpdma_chan *ch, u32 rate) 822 { 823 struct cpdma_ctlr *ctlr = ch->ctlr; 824 unsigned long flags, ch_flags; 825 int ret, prio_mode; 826 u32 rmask; 827 828 if (!ch || !is_tx_chan(ch)) 829 return -EINVAL; 830 831 if (ch->rate == rate) 832 return rate; 833 834 spin_lock_irqsave(&ctlr->lock, flags); 835 spin_lock_irqsave(&ch->lock, ch_flags); 836 837 ret = cpdma_chan_fit_rate(ch, rate, &rmask, &prio_mode); 838 if (ret) 839 goto err; 840 841 ret = cpdma_chan_set_factors(ctlr, ch); 842 if (ret) 843 goto err; 844 845 spin_unlock_irqrestore(&ch->lock, ch_flags); 846 847 /* on shapers */ 848 _cpdma_control_set(ctlr, CPDMA_TX_RLIM, rmask); 849 _cpdma_control_set(ctlr, CPDMA_TX_PRIO_FIXED, prio_mode); 850 spin_unlock_irqrestore(&ctlr->lock, flags); 851 return ret; 852 853 err: 854 spin_unlock_irqrestore(&ch->lock, ch_flags); 855 spin_unlock_irqrestore(&ctlr->lock, flags); 856 return ret; 857 } 858 EXPORT_SYMBOL_GPL(cpdma_chan_set_rate); 859 860 u32 cpdma_chan_get_rate(struct cpdma_chan *ch) 861 { 862 unsigned long flags; 863 u32 rate; 864 865 spin_lock_irqsave(&ch->lock, flags); 866 rate = ch->rate; 867 spin_unlock_irqrestore(&ch->lock, flags); 868 869 return rate; 870 } 871 EXPORT_SYMBOL_GPL(cpdma_chan_get_rate); 872 873 struct cpdma_chan *cpdma_chan_create(struct cpdma_ctlr *ctlr, int chan_num, 874 cpdma_handler_fn handler, int rx_type) 875 { 876 int offset = chan_num * 4; 877 struct cpdma_chan *chan; 878 unsigned long flags; 879 880 chan_num = rx_type ? rx_chan_num(chan_num) : tx_chan_num(chan_num); 881 882 if (__chan_linear(chan_num) >= ctlr->num_chan) 883 return NULL; 884 885 chan = devm_kzalloc(ctlr->dev, sizeof(*chan), GFP_KERNEL); 886 if (!chan) 887 return ERR_PTR(-ENOMEM); 888 889 spin_lock_irqsave(&ctlr->lock, flags); 890 if (ctlr->channels[chan_num]) { 891 spin_unlock_irqrestore(&ctlr->lock, flags); 892 devm_kfree(ctlr->dev, chan); 893 return ERR_PTR(-EBUSY); 894 } 895 896 chan->ctlr = ctlr; 897 chan->state = CPDMA_STATE_IDLE; 898 chan->chan_num = chan_num; 899 chan->handler = handler; 900 chan->rate = 0; 901 chan->desc_num = ctlr->pool->num_desc / 2; 902 chan->weight = 0; 903 904 if (is_rx_chan(chan)) { 905 chan->hdp = ctlr->params.rxhdp + offset; 906 chan->cp = ctlr->params.rxcp + offset; 907 chan->rxfree = ctlr->params.rxfree + offset; 908 chan->int_set = CPDMA_RXINTMASKSET; 909 chan->int_clear = CPDMA_RXINTMASKCLEAR; 910 chan->td = CPDMA_RXTEARDOWN; 911 chan->dir = DMA_FROM_DEVICE; 912 } else { 913 chan->hdp = ctlr->params.txhdp + offset; 914 chan->cp = ctlr->params.txcp + offset; 915 chan->int_set = CPDMA_TXINTMASKSET; 916 chan->int_clear = CPDMA_TXINTMASKCLEAR; 917 chan->td = CPDMA_TXTEARDOWN; 918 chan->dir = DMA_TO_DEVICE; 919 } 920 chan->mask = BIT(chan_linear(chan)); 921 922 spin_lock_init(&chan->lock); 923 924 ctlr->channels[chan_num] = chan; 925 ctlr->chan_num++; 926 927 cpdma_chan_split_pool(ctlr); 928 929 spin_unlock_irqrestore(&ctlr->lock, flags); 930 return chan; 931 } 932 EXPORT_SYMBOL_GPL(cpdma_chan_create); 933 934 int cpdma_chan_get_rx_buf_num(struct cpdma_chan *chan) 935 { 936 unsigned long flags; 937 int desc_num; 938 939 spin_lock_irqsave(&chan->lock, flags); 940 desc_num = chan->desc_num; 941 spin_unlock_irqrestore(&chan->lock, flags); 942 943 return desc_num; 944 } 945 EXPORT_SYMBOL_GPL(cpdma_chan_get_rx_buf_num); 946 947 int cpdma_chan_destroy(struct cpdma_chan *chan) 948 { 949 struct cpdma_ctlr *ctlr; 950 unsigned long flags; 951 952 if (!chan) 953 return -EINVAL; 954 ctlr = chan->ctlr; 955 956 spin_lock_irqsave(&ctlr->lock, flags); 957 if (chan->state != CPDMA_STATE_IDLE) 958 cpdma_chan_stop(chan); 959 ctlr->channels[chan->chan_num] = NULL; 960 ctlr->chan_num--; 961 devm_kfree(ctlr->dev, chan); 962 cpdma_chan_split_pool(ctlr); 963 964 spin_unlock_irqrestore(&ctlr->lock, flags); 965 return 0; 966 } 967 EXPORT_SYMBOL_GPL(cpdma_chan_destroy); 968 969 int cpdma_chan_get_stats(struct cpdma_chan *chan, 970 struct cpdma_chan_stats *stats) 971 { 972 unsigned long flags; 973 if (!chan) 974 return -EINVAL; 975 spin_lock_irqsave(&chan->lock, flags); 976 memcpy(stats, &chan->stats, sizeof(*stats)); 977 spin_unlock_irqrestore(&chan->lock, flags); 978 return 0; 979 } 980 EXPORT_SYMBOL_GPL(cpdma_chan_get_stats); 981 982 static void __cpdma_chan_submit(struct cpdma_chan *chan, 983 struct cpdma_desc __iomem *desc) 984 { 985 struct cpdma_ctlr *ctlr = chan->ctlr; 986 struct cpdma_desc __iomem *prev = chan->tail; 987 struct cpdma_desc_pool *pool = ctlr->pool; 988 dma_addr_t desc_dma; 989 u32 mode; 990 991 desc_dma = desc_phys(pool, desc); 992 993 /* simple case - idle channel */ 994 if (!chan->head) { 995 chan->stats.head_enqueue++; 996 chan->head = desc; 997 chan->tail = desc; 998 if (chan->state == CPDMA_STATE_ACTIVE) 999 chan_write(chan, hdp, desc_dma); 1000 return; 1001 } 1002 1003 /* first chain the descriptor at the tail of the list */ 1004 desc_write(prev, hw_next, desc_dma); 1005 chan->tail = desc; 1006 chan->stats.tail_enqueue++; 1007 1008 /* next check if EOQ has been triggered already */ 1009 mode = desc_read(prev, hw_mode); 1010 if (((mode & (CPDMA_DESC_EOQ | CPDMA_DESC_OWNER)) == CPDMA_DESC_EOQ) && 1011 (chan->state == CPDMA_STATE_ACTIVE)) { 1012 desc_write(prev, hw_mode, mode & ~CPDMA_DESC_EOQ); 1013 chan_write(chan, hdp, desc_dma); 1014 chan->stats.misqueued++; 1015 } 1016 } 1017 1018 int cpdma_chan_submit(struct cpdma_chan *chan, void *token, void *data, 1019 int len, int directed) 1020 { 1021 struct cpdma_ctlr *ctlr = chan->ctlr; 1022 struct cpdma_desc __iomem *desc; 1023 dma_addr_t buffer; 1024 unsigned long flags; 1025 u32 mode; 1026 int ret = 0; 1027 1028 spin_lock_irqsave(&chan->lock, flags); 1029 1030 if (chan->state == CPDMA_STATE_TEARDOWN) { 1031 ret = -EINVAL; 1032 goto unlock_ret; 1033 } 1034 1035 if (chan->count >= chan->desc_num) { 1036 chan->stats.desc_alloc_fail++; 1037 ret = -ENOMEM; 1038 goto unlock_ret; 1039 } 1040 1041 desc = cpdma_desc_alloc(ctlr->pool); 1042 if (!desc) { 1043 chan->stats.desc_alloc_fail++; 1044 ret = -ENOMEM; 1045 goto unlock_ret; 1046 } 1047 1048 if (len < ctlr->params.min_packet_size) { 1049 len = ctlr->params.min_packet_size; 1050 chan->stats.runt_transmit_buff++; 1051 } 1052 1053 buffer = dma_map_single(ctlr->dev, data, len, chan->dir); 1054 ret = dma_mapping_error(ctlr->dev, buffer); 1055 if (ret) { 1056 cpdma_desc_free(ctlr->pool, desc, 1); 1057 ret = -EINVAL; 1058 goto unlock_ret; 1059 } 1060 1061 mode = CPDMA_DESC_OWNER | CPDMA_DESC_SOP | CPDMA_DESC_EOP; 1062 cpdma_desc_to_port(chan, mode, directed); 1063 1064 desc_write(desc, hw_next, 0); 1065 desc_write(desc, hw_buffer, buffer); 1066 desc_write(desc, hw_len, len); 1067 desc_write(desc, hw_mode, mode | len); 1068 desc_write(desc, sw_token, token); 1069 desc_write(desc, sw_buffer, buffer); 1070 desc_write(desc, sw_len, len); 1071 1072 __cpdma_chan_submit(chan, desc); 1073 1074 if (chan->state == CPDMA_STATE_ACTIVE && chan->rxfree) 1075 chan_write(chan, rxfree, 1); 1076 1077 chan->count++; 1078 1079 unlock_ret: 1080 spin_unlock_irqrestore(&chan->lock, flags); 1081 return ret; 1082 } 1083 EXPORT_SYMBOL_GPL(cpdma_chan_submit); 1084 1085 bool cpdma_check_free_tx_desc(struct cpdma_chan *chan) 1086 { 1087 struct cpdma_ctlr *ctlr = chan->ctlr; 1088 struct cpdma_desc_pool *pool = ctlr->pool; 1089 bool free_tx_desc; 1090 unsigned long flags; 1091 1092 spin_lock_irqsave(&chan->lock, flags); 1093 free_tx_desc = (chan->count < chan->desc_num) && 1094 gen_pool_avail(pool->gen_pool); 1095 spin_unlock_irqrestore(&chan->lock, flags); 1096 return free_tx_desc; 1097 } 1098 EXPORT_SYMBOL_GPL(cpdma_check_free_tx_desc); 1099 1100 static void __cpdma_chan_free(struct cpdma_chan *chan, 1101 struct cpdma_desc __iomem *desc, 1102 int outlen, int status) 1103 { 1104 struct cpdma_ctlr *ctlr = chan->ctlr; 1105 struct cpdma_desc_pool *pool = ctlr->pool; 1106 dma_addr_t buff_dma; 1107 int origlen; 1108 void *token; 1109 1110 token = (void *)desc_read(desc, sw_token); 1111 buff_dma = desc_read(desc, sw_buffer); 1112 origlen = desc_read(desc, sw_len); 1113 1114 dma_unmap_single(ctlr->dev, buff_dma, origlen, chan->dir); 1115 cpdma_desc_free(pool, desc, 1); 1116 (*chan->handler)(token, outlen, status); 1117 } 1118 1119 static int __cpdma_chan_process(struct cpdma_chan *chan) 1120 { 1121 struct cpdma_ctlr *ctlr = chan->ctlr; 1122 struct cpdma_desc __iomem *desc; 1123 int status, outlen; 1124 int cb_status = 0; 1125 struct cpdma_desc_pool *pool = ctlr->pool; 1126 dma_addr_t desc_dma; 1127 unsigned long flags; 1128 1129 spin_lock_irqsave(&chan->lock, flags); 1130 1131 desc = chan->head; 1132 if (!desc) { 1133 chan->stats.empty_dequeue++; 1134 status = -ENOENT; 1135 goto unlock_ret; 1136 } 1137 desc_dma = desc_phys(pool, desc); 1138 1139 status = __raw_readl(&desc->hw_mode); 1140 outlen = status & 0x7ff; 1141 if (status & CPDMA_DESC_OWNER) { 1142 chan->stats.busy_dequeue++; 1143 status = -EBUSY; 1144 goto unlock_ret; 1145 } 1146 1147 if (status & CPDMA_DESC_PASS_CRC) 1148 outlen -= CPDMA_DESC_CRC_LEN; 1149 1150 status = status & (CPDMA_DESC_EOQ | CPDMA_DESC_TD_COMPLETE | 1151 CPDMA_DESC_PORT_MASK); 1152 1153 chan->head = desc_from_phys(pool, desc_read(desc, hw_next)); 1154 chan_write(chan, cp, desc_dma); 1155 chan->count--; 1156 chan->stats.good_dequeue++; 1157 1158 if (status & CPDMA_DESC_EOQ) { 1159 chan->stats.requeue++; 1160 chan_write(chan, hdp, desc_phys(pool, chan->head)); 1161 } 1162 1163 spin_unlock_irqrestore(&chan->lock, flags); 1164 if (unlikely(status & CPDMA_DESC_TD_COMPLETE)) 1165 cb_status = -ENOSYS; 1166 else 1167 cb_status = status; 1168 1169 __cpdma_chan_free(chan, desc, outlen, cb_status); 1170 return status; 1171 1172 unlock_ret: 1173 spin_unlock_irqrestore(&chan->lock, flags); 1174 return status; 1175 } 1176 1177 int cpdma_chan_process(struct cpdma_chan *chan, int quota) 1178 { 1179 int used = 0, ret = 0; 1180 1181 if (chan->state != CPDMA_STATE_ACTIVE) 1182 return -EINVAL; 1183 1184 while (used < quota) { 1185 ret = __cpdma_chan_process(chan); 1186 if (ret < 0) 1187 break; 1188 used++; 1189 } 1190 return used; 1191 } 1192 EXPORT_SYMBOL_GPL(cpdma_chan_process); 1193 1194 int cpdma_chan_start(struct cpdma_chan *chan) 1195 { 1196 struct cpdma_ctlr *ctlr = chan->ctlr; 1197 unsigned long flags; 1198 int ret; 1199 1200 spin_lock_irqsave(&ctlr->lock, flags); 1201 ret = cpdma_chan_set_chan_shaper(chan); 1202 spin_unlock_irqrestore(&ctlr->lock, flags); 1203 if (ret) 1204 return ret; 1205 1206 ret = cpdma_chan_on(chan); 1207 if (ret) 1208 return ret; 1209 1210 return 0; 1211 } 1212 EXPORT_SYMBOL_GPL(cpdma_chan_start); 1213 1214 int cpdma_chan_stop(struct cpdma_chan *chan) 1215 { 1216 struct cpdma_ctlr *ctlr = chan->ctlr; 1217 struct cpdma_desc_pool *pool = ctlr->pool; 1218 unsigned long flags; 1219 int ret; 1220 unsigned timeout; 1221 1222 spin_lock_irqsave(&chan->lock, flags); 1223 if (chan->state == CPDMA_STATE_TEARDOWN) { 1224 spin_unlock_irqrestore(&chan->lock, flags); 1225 return -EINVAL; 1226 } 1227 1228 chan->state = CPDMA_STATE_TEARDOWN; 1229 dma_reg_write(ctlr, chan->int_clear, chan->mask); 1230 1231 /* trigger teardown */ 1232 dma_reg_write(ctlr, chan->td, chan_linear(chan)); 1233 1234 /* wait for teardown complete */ 1235 timeout = 100 * 100; /* 100 ms */ 1236 while (timeout) { 1237 u32 cp = chan_read(chan, cp); 1238 if ((cp & CPDMA_TEARDOWN_VALUE) == CPDMA_TEARDOWN_VALUE) 1239 break; 1240 udelay(10); 1241 timeout--; 1242 } 1243 WARN_ON(!timeout); 1244 chan_write(chan, cp, CPDMA_TEARDOWN_VALUE); 1245 1246 /* handle completed packets */ 1247 spin_unlock_irqrestore(&chan->lock, flags); 1248 do { 1249 ret = __cpdma_chan_process(chan); 1250 if (ret < 0) 1251 break; 1252 } while ((ret & CPDMA_DESC_TD_COMPLETE) == 0); 1253 spin_lock_irqsave(&chan->lock, flags); 1254 1255 /* remaining packets haven't been tx/rx'ed, clean them up */ 1256 while (chan->head) { 1257 struct cpdma_desc __iomem *desc = chan->head; 1258 dma_addr_t next_dma; 1259 1260 next_dma = desc_read(desc, hw_next); 1261 chan->head = desc_from_phys(pool, next_dma); 1262 chan->count--; 1263 chan->stats.teardown_dequeue++; 1264 1265 /* issue callback without locks held */ 1266 spin_unlock_irqrestore(&chan->lock, flags); 1267 __cpdma_chan_free(chan, desc, 0, -ENOSYS); 1268 spin_lock_irqsave(&chan->lock, flags); 1269 } 1270 1271 chan->state = CPDMA_STATE_IDLE; 1272 spin_unlock_irqrestore(&chan->lock, flags); 1273 return 0; 1274 } 1275 EXPORT_SYMBOL_GPL(cpdma_chan_stop); 1276 1277 int cpdma_chan_int_ctrl(struct cpdma_chan *chan, bool enable) 1278 { 1279 unsigned long flags; 1280 1281 spin_lock_irqsave(&chan->lock, flags); 1282 if (chan->state != CPDMA_STATE_ACTIVE) { 1283 spin_unlock_irqrestore(&chan->lock, flags); 1284 return -EINVAL; 1285 } 1286 1287 dma_reg_write(chan->ctlr, enable ? chan->int_set : chan->int_clear, 1288 chan->mask); 1289 spin_unlock_irqrestore(&chan->lock, flags); 1290 1291 return 0; 1292 } 1293 1294 int cpdma_control_get(struct cpdma_ctlr *ctlr, int control) 1295 { 1296 unsigned long flags; 1297 int ret; 1298 1299 spin_lock_irqsave(&ctlr->lock, flags); 1300 ret = _cpdma_control_get(ctlr, control); 1301 spin_unlock_irqrestore(&ctlr->lock, flags); 1302 1303 return ret; 1304 } 1305 1306 int cpdma_control_set(struct cpdma_ctlr *ctlr, int control, int value) 1307 { 1308 unsigned long flags; 1309 int ret; 1310 1311 spin_lock_irqsave(&ctlr->lock, flags); 1312 ret = _cpdma_control_set(ctlr, control, value); 1313 spin_unlock_irqrestore(&ctlr->lock, flags); 1314 1315 return ret; 1316 } 1317 EXPORT_SYMBOL_GPL(cpdma_control_set); 1318 1319 MODULE_LICENSE("GPL"); 1320