xref: /linux/drivers/media/pci/netup_unidvb/netup_unidvb_core.c (revision a4cdb556cae05cd3e7b602b3a44c01420c4e2258)
1 /*
2  * netup_unidvb_core.c
3  *
4  * Main module for NetUP Universal Dual DVB-CI
5  *
6  * Copyright (C) 2014 NetUP Inc.
7  * Copyright (C) 2014 Sergey Kozlov <serjk@netup.ru>
8  * Copyright (C) 2014 Abylay Ospan <aospan@netup.ru>
9  *
10  * This program is free software; you can redistribute it and/or modify
11  * it under the terms of the GNU General Public License as published by
12  * the Free Software Foundation; either version 2 of the License, or
13  * (at your option) any later version.
14  *
15  * This program is distributed in the hope that it will be useful,
16  * but WITHOUT ANY WARRANTY; without even the implied warranty of
17  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
18  * GNU General Public License for more details.
19  */
20 
21 #include <linux/init.h>
22 #include <linux/module.h>
23 #include <linux/moduleparam.h>
24 #include <linux/kmod.h>
25 #include <linux/kernel.h>
26 #include <linux/slab.h>
27 #include <linux/interrupt.h>
28 #include <linux/delay.h>
29 #include <linux/list.h>
30 #include <media/videobuf2-v4l2.h>
31 #include <media/videobuf2-vmalloc.h>
32 
33 #include "netup_unidvb.h"
34 #include "cxd2841er.h"
35 #include "horus3a.h"
36 #include "ascot2e.h"
37 #include "lnbh25.h"
38 
39 static int spi_enable;
40 module_param(spi_enable, int, S_IRUSR | S_IWUSR | S_IRGRP | S_IROTH);
41 
42 MODULE_DESCRIPTION("Driver for NetUP Dual Universal DVB CI PCIe card");
43 MODULE_AUTHOR("info@netup.ru");
44 MODULE_VERSION(NETUP_UNIDVB_VERSION);
45 MODULE_LICENSE("GPL");
46 
47 DVB_DEFINE_MOD_OPT_ADAPTER_NR(adapter_nr);
48 
49 /* Avalon-MM PCI-E registers */
50 #define	AVL_PCIE_IENR		0x50
51 #define AVL_PCIE_ISR		0x40
52 #define AVL_IRQ_ENABLE		0x80
53 #define AVL_IRQ_ASSERTED	0x80
54 /* GPIO registers */
55 #define GPIO_REG_IO		0x4880
56 #define GPIO_REG_IO_TOGGLE	0x4882
57 #define GPIO_REG_IO_SET		0x4884
58 #define GPIO_REG_IO_CLEAR	0x4886
59 /* GPIO bits */
60 #define GPIO_FEA_RESET		(1 << 0)
61 #define GPIO_FEB_RESET		(1 << 1)
62 #define GPIO_RFA_CTL		(1 << 2)
63 #define GPIO_RFB_CTL		(1 << 3)
64 #define GPIO_FEA_TU_RESET	(1 << 4)
65 #define GPIO_FEB_TU_RESET	(1 << 5)
66 /* DMA base address */
67 #define NETUP_DMA0_ADDR		0x4900
68 #define NETUP_DMA1_ADDR		0x4940
69 /* 8 DMA blocks * 128 packets * 188 bytes*/
70 #define NETUP_DMA_BLOCKS_COUNT	8
71 #define NETUP_DMA_PACKETS_COUNT	128
72 /* DMA status bits */
73 #define BIT_DMA_RUN		1
74 #define BIT_DMA_ERROR		2
75 #define BIT_DMA_IRQ		0x200
76 
77 /**
78  * struct netup_dma_regs - the map of DMA module registers
79  * @ctrlstat_set:	Control register, write to set control bits
80  * @ctrlstat_clear:	Control register, write to clear control bits
81  * @start_addr_lo:	DMA ring buffer start address, lower part
82  * @start_addr_hi:	DMA ring buffer start address, higher part
83  * @size:		DMA ring buffer size register
84 			Bits [0-7]:	DMA packet size, 188 bytes
85 			Bits [16-23]:	packets count in block, 128 packets
86 			Bits [24-31]:	blocks count, 8 blocks
87  * @timeout:		DMA timeout in units of 8ns
88 			For example, value of 375000000 equals to 3 sec
89  * @curr_addr_lo:	Current ring buffer head address, lower part
90  * @curr_addr_hi:	Current ring buffer head address, higher part
91  * @stat_pkt_received:	Statistic register, not tested
92  * @stat_pkt_accepted:	Statistic register, not tested
93  * @stat_pkt_overruns:	Statistic register, not tested
94  * @stat_pkt_underruns:	Statistic register, not tested
95  * @stat_fifo_overruns:	Statistic register, not tested
96  */
97 struct netup_dma_regs {
98 	__le32	ctrlstat_set;
99 	__le32	ctrlstat_clear;
100 	__le32	start_addr_lo;
101 	__le32	start_addr_hi;
102 	__le32	size;
103 	__le32	timeout;
104 	__le32	curr_addr_lo;
105 	__le32	curr_addr_hi;
106 	__le32	stat_pkt_received;
107 	__le32	stat_pkt_accepted;
108 	__le32	stat_pkt_overruns;
109 	__le32	stat_pkt_underruns;
110 	__le32	stat_fifo_overruns;
111 } __packed __aligned(1);
112 
113 struct netup_unidvb_buffer {
114 	struct vb2_v4l2_buffer vb;
115 	struct list_head	list;
116 	u32			size;
117 };
118 
119 static int netup_unidvb_tuner_ctrl(void *priv, int is_dvb_tc);
120 static void netup_unidvb_queue_cleanup(struct netup_dma *dma);
121 
122 static struct cxd2841er_config demod_config = {
123 	.i2c_addr = 0xc8
124 };
125 
126 static struct horus3a_config horus3a_conf = {
127 	.i2c_address = 0xc0,
128 	.xtal_freq_mhz = 16,
129 	.set_tuner_callback = netup_unidvb_tuner_ctrl
130 };
131 
132 static struct ascot2e_config ascot2e_conf = {
133 	.i2c_address = 0xc2,
134 	.set_tuner_callback = netup_unidvb_tuner_ctrl
135 };
136 
137 static struct lnbh25_config lnbh25_conf = {
138 	.i2c_address = 0x10,
139 	.data2_config = LNBH25_TEN | LNBH25_EXTM
140 };
141 
142 static int netup_unidvb_tuner_ctrl(void *priv, int is_dvb_tc)
143 {
144 	u8 reg, mask;
145 	struct netup_dma *dma = priv;
146 	struct netup_unidvb_dev *ndev;
147 
148 	if (!priv)
149 		return -EINVAL;
150 	ndev = dma->ndev;
151 	dev_dbg(&ndev->pci_dev->dev, "%s(): num %d is_dvb_tc %d\n",
152 		__func__, dma->num, is_dvb_tc);
153 	reg = readb(ndev->bmmio0 + GPIO_REG_IO);
154 	mask = (dma->num == 0) ? GPIO_RFA_CTL : GPIO_RFB_CTL;
155 	if (!is_dvb_tc)
156 		reg |= mask;
157 	else
158 		reg &= ~mask;
159 	writeb(reg, ndev->bmmio0 + GPIO_REG_IO);
160 	return 0;
161 }
162 
163 static void netup_unidvb_dev_enable(struct netup_unidvb_dev *ndev)
164 {
165 	u16 gpio_reg;
166 
167 	/* enable PCI-E interrupts */
168 	writel(AVL_IRQ_ENABLE, ndev->bmmio0 + AVL_PCIE_IENR);
169 	/* unreset frontends bits[0:1] */
170 	writeb(0x00, ndev->bmmio0 + GPIO_REG_IO);
171 	msleep(100);
172 	gpio_reg =
173 		GPIO_FEA_RESET | GPIO_FEB_RESET |
174 		GPIO_FEA_TU_RESET | GPIO_FEB_TU_RESET |
175 		GPIO_RFA_CTL | GPIO_RFB_CTL;
176 	writeb(gpio_reg, ndev->bmmio0 + GPIO_REG_IO);
177 	dev_dbg(&ndev->pci_dev->dev,
178 		"%s(): AVL_PCIE_IENR 0x%x GPIO_REG_IO 0x%x\n",
179 		__func__, readl(ndev->bmmio0 + AVL_PCIE_IENR),
180 		(int)readb(ndev->bmmio0 + GPIO_REG_IO));
181 
182 }
183 
184 static void netup_unidvb_dma_enable(struct netup_dma *dma, int enable)
185 {
186 	u32 irq_mask = (dma->num == 0 ?
187 		NETUP_UNIDVB_IRQ_DMA1 : NETUP_UNIDVB_IRQ_DMA2);
188 
189 	dev_dbg(&dma->ndev->pci_dev->dev,
190 		"%s(): DMA%d enable %d\n", __func__, dma->num, enable);
191 	if (enable) {
192 		writel(BIT_DMA_RUN, &dma->regs->ctrlstat_set);
193 		writew(irq_mask, dma->ndev->bmmio0 + REG_IMASK_SET);
194 	} else {
195 		writel(BIT_DMA_RUN, &dma->regs->ctrlstat_clear);
196 		writew(irq_mask, dma->ndev->bmmio0 + REG_IMASK_CLEAR);
197 	}
198 }
199 
200 static irqreturn_t netup_dma_interrupt(struct netup_dma *dma)
201 {
202 	u64 addr_curr;
203 	u32 size;
204 	unsigned long flags;
205 	struct device *dev = &dma->ndev->pci_dev->dev;
206 
207 	spin_lock_irqsave(&dma->lock, flags);
208 	addr_curr = ((u64)readl(&dma->regs->curr_addr_hi) << 32) |
209 		(u64)readl(&dma->regs->curr_addr_lo) | dma->high_addr;
210 	/* clear IRQ */
211 	writel(BIT_DMA_IRQ, &dma->regs->ctrlstat_clear);
212 	/* sanity check */
213 	if (addr_curr < dma->addr_phys ||
214 			addr_curr > dma->addr_phys +  dma->ring_buffer_size) {
215 		if (addr_curr != 0) {
216 			dev_err(dev,
217 				"%s(): addr 0x%llx not from 0x%llx:0x%llx\n",
218 				__func__, addr_curr, (u64)dma->addr_phys,
219 				(u64)(dma->addr_phys + dma->ring_buffer_size));
220 		}
221 		goto irq_handled;
222 	}
223 	size = (addr_curr >= dma->addr_last) ?
224 		(u32)(addr_curr - dma->addr_last) :
225 		(u32)(dma->ring_buffer_size - (dma->addr_last - addr_curr));
226 	if (dma->data_size != 0) {
227 		printk_ratelimited("%s(): lost interrupt, data size %d\n",
228 			__func__, dma->data_size);
229 		dma->data_size += size;
230 	}
231 	if (dma->data_size == 0 || dma->data_size > dma->ring_buffer_size) {
232 		dma->data_size = size;
233 		dma->data_offset = (u32)(dma->addr_last - dma->addr_phys);
234 	}
235 	dma->addr_last = addr_curr;
236 	queue_work(dma->ndev->wq, &dma->work);
237 irq_handled:
238 	spin_unlock_irqrestore(&dma->lock, flags);
239 	return IRQ_HANDLED;
240 }
241 
242 static irqreturn_t netup_unidvb_isr(int irq, void *dev_id)
243 {
244 	struct pci_dev *pci_dev = (struct pci_dev *)dev_id;
245 	struct netup_unidvb_dev *ndev = pci_get_drvdata(pci_dev);
246 	u32 reg40, reg_isr;
247 	irqreturn_t iret = IRQ_NONE;
248 
249 	/* disable interrupts */
250 	writel(0, ndev->bmmio0 + AVL_PCIE_IENR);
251 	/* check IRQ source */
252 	reg40 = readl(ndev->bmmio0 + AVL_PCIE_ISR);
253 	if ((reg40 & AVL_IRQ_ASSERTED) != 0) {
254 		/* IRQ is being signaled */
255 		reg_isr = readw(ndev->bmmio0 + REG_ISR);
256 		if (reg_isr & NETUP_UNIDVB_IRQ_I2C0) {
257 			iret = netup_i2c_interrupt(&ndev->i2c[0]);
258 		} else if (reg_isr & NETUP_UNIDVB_IRQ_I2C1) {
259 			iret = netup_i2c_interrupt(&ndev->i2c[1]);
260 		} else if (reg_isr & NETUP_UNIDVB_IRQ_SPI) {
261 			iret = netup_spi_interrupt(ndev->spi);
262 		} else if (reg_isr & NETUP_UNIDVB_IRQ_DMA1) {
263 			iret = netup_dma_interrupt(&ndev->dma[0]);
264 		} else if (reg_isr & NETUP_UNIDVB_IRQ_DMA2) {
265 			iret = netup_dma_interrupt(&ndev->dma[1]);
266 		} else if (reg_isr & NETUP_UNIDVB_IRQ_CI) {
267 			iret = netup_ci_interrupt(ndev);
268 		} else {
269 			dev_err(&pci_dev->dev,
270 				"%s(): unknown interrupt 0x%x\n",
271 				__func__, reg_isr);
272 		}
273 	}
274 	/* re-enable interrupts */
275 	writel(AVL_IRQ_ENABLE, ndev->bmmio0 + AVL_PCIE_IENR);
276 	return iret;
277 }
278 
279 static int netup_unidvb_queue_setup(struct vb2_queue *vq,
280 				    const void *parg,
281 				    unsigned int *nbuffers,
282 				    unsigned int *nplanes,
283 				    unsigned int sizes[],
284 				    void *alloc_ctxs[])
285 {
286 	struct netup_dma *dma = vb2_get_drv_priv(vq);
287 
288 	dev_dbg(&dma->ndev->pci_dev->dev, "%s()\n", __func__);
289 
290 	*nplanes = 1;
291 	if (vq->num_buffers + *nbuffers < VIDEO_MAX_FRAME)
292 		*nbuffers = VIDEO_MAX_FRAME - vq->num_buffers;
293 	sizes[0] = PAGE_ALIGN(NETUP_DMA_PACKETS_COUNT * 188);
294 	dev_dbg(&dma->ndev->pci_dev->dev, "%s() nbuffers=%d sizes[0]=%d\n",
295 		__func__, *nbuffers, sizes[0]);
296 	return 0;
297 }
298 
299 static int netup_unidvb_buf_prepare(struct vb2_buffer *vb)
300 {
301 	struct netup_dma *dma = vb2_get_drv_priv(vb->vb2_queue);
302 	struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
303 	struct netup_unidvb_buffer *buf = container_of(vbuf,
304 				struct netup_unidvb_buffer, vb);
305 
306 	dev_dbg(&dma->ndev->pci_dev->dev, "%s(): buf 0x%p\n", __func__, buf);
307 	buf->size = 0;
308 	return 0;
309 }
310 
311 static void netup_unidvb_buf_queue(struct vb2_buffer *vb)
312 {
313 	unsigned long flags;
314 	struct netup_dma *dma = vb2_get_drv_priv(vb->vb2_queue);
315 	struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
316 	struct netup_unidvb_buffer *buf = container_of(vbuf,
317 				struct netup_unidvb_buffer, vb);
318 
319 	dev_dbg(&dma->ndev->pci_dev->dev, "%s(): %p\n", __func__, buf);
320 	spin_lock_irqsave(&dma->lock, flags);
321 	list_add_tail(&buf->list, &dma->free_buffers);
322 	spin_unlock_irqrestore(&dma->lock, flags);
323 	mod_timer(&dma->timeout, jiffies + msecs_to_jiffies(1000));
324 }
325 
326 static int netup_unidvb_start_streaming(struct vb2_queue *q, unsigned int count)
327 {
328 	struct netup_dma *dma = vb2_get_drv_priv(q);
329 
330 	dev_dbg(&dma->ndev->pci_dev->dev, "%s()\n", __func__);
331 	netup_unidvb_dma_enable(dma, 1);
332 	return 0;
333 }
334 
335 static void netup_unidvb_stop_streaming(struct vb2_queue *q)
336 {
337 	struct netup_dma *dma = vb2_get_drv_priv(q);
338 
339 	dev_dbg(&dma->ndev->pci_dev->dev, "%s()\n", __func__);
340 	netup_unidvb_dma_enable(dma, 0);
341 	netup_unidvb_queue_cleanup(dma);
342 }
343 
344 static struct vb2_ops dvb_qops = {
345 	.queue_setup		= netup_unidvb_queue_setup,
346 	.buf_prepare		= netup_unidvb_buf_prepare,
347 	.buf_queue		= netup_unidvb_buf_queue,
348 	.start_streaming	= netup_unidvb_start_streaming,
349 	.stop_streaming		= netup_unidvb_stop_streaming,
350 };
351 
352 static int netup_unidvb_queue_init(struct netup_dma *dma,
353 				   struct vb2_queue *vb_queue)
354 {
355 	int res;
356 
357 	/* Init videobuf2 queue structure */
358 	vb_queue->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
359 	vb_queue->io_modes = VB2_MMAP | VB2_USERPTR | VB2_READ;
360 	vb_queue->drv_priv = dma;
361 	vb_queue->buf_struct_size = sizeof(struct netup_unidvb_buffer);
362 	vb_queue->ops = &dvb_qops;
363 	vb_queue->mem_ops = &vb2_vmalloc_memops;
364 	vb_queue->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
365 	res = vb2_queue_init(vb_queue);
366 	if (res != 0) {
367 		dev_err(&dma->ndev->pci_dev->dev,
368 			"%s(): vb2_queue_init failed (%d)\n", __func__, res);
369 	}
370 	return res;
371 }
372 
373 static int netup_unidvb_dvb_init(struct netup_unidvb_dev *ndev,
374 				 int num)
375 {
376 	struct vb2_dvb_frontend *fe0, *fe1, *fe2;
377 
378 	if (num < 0 || num > 1) {
379 		dev_dbg(&ndev->pci_dev->dev,
380 			"%s(): unable to init DVB bus %d\n", __func__, num);
381 		return -ENODEV;
382 	}
383 	mutex_init(&ndev->frontends[num].lock);
384 	INIT_LIST_HEAD(&ndev->frontends[num].felist);
385 	if (vb2_dvb_alloc_frontend(&ndev->frontends[num], 1) == NULL ||
386 		vb2_dvb_alloc_frontend(
387 			&ndev->frontends[num], 2) == NULL ||
388 		vb2_dvb_alloc_frontend(
389 			&ndev->frontends[num], 3) == NULL) {
390 		dev_dbg(&ndev->pci_dev->dev,
391 			"%s(): unable to to alllocate vb2_dvb_frontend\n",
392 			__func__);
393 		return -ENOMEM;
394 	}
395 	fe0 = vb2_dvb_get_frontend(&ndev->frontends[num], 1);
396 	fe1 = vb2_dvb_get_frontend(&ndev->frontends[num], 2);
397 	fe2 = vb2_dvb_get_frontend(&ndev->frontends[num], 3);
398 	if (fe0 == NULL || fe1 == NULL || fe2 == NULL) {
399 		dev_dbg(&ndev->pci_dev->dev,
400 			"%s(): frontends has not been allocated\n", __func__);
401 		return -EINVAL;
402 	}
403 	netup_unidvb_queue_init(&ndev->dma[num], &fe0->dvb.dvbq);
404 	netup_unidvb_queue_init(&ndev->dma[num], &fe1->dvb.dvbq);
405 	netup_unidvb_queue_init(&ndev->dma[num], &fe2->dvb.dvbq);
406 	fe0->dvb.name = "netup_fe0";
407 	fe1->dvb.name = "netup_fe1";
408 	fe2->dvb.name = "netup_fe2";
409 	fe0->dvb.frontend = dvb_attach(cxd2841er_attach_s,
410 		&demod_config, &ndev->i2c[num].adap);
411 	if (fe0->dvb.frontend == NULL) {
412 		dev_dbg(&ndev->pci_dev->dev,
413 			"%s(): unable to attach DVB-S/S2 frontend\n",
414 			__func__);
415 		goto frontend_detach;
416 	}
417 	horus3a_conf.set_tuner_priv = &ndev->dma[num];
418 	if (!dvb_attach(horus3a_attach, fe0->dvb.frontend,
419 			&horus3a_conf, &ndev->i2c[num].adap)) {
420 		dev_dbg(&ndev->pci_dev->dev,
421 			"%s(): unable to attach DVB-S/S2 tuner frontend\n",
422 			__func__);
423 		goto frontend_detach;
424 	}
425 	if (!dvb_attach(lnbh25_attach, fe0->dvb.frontend,
426 			&lnbh25_conf, &ndev->i2c[num].adap)) {
427 		dev_dbg(&ndev->pci_dev->dev,
428 			"%s(): unable to attach SEC frontend\n", __func__);
429 		goto frontend_detach;
430 	}
431 	/* DVB-T/T2 frontend */
432 	fe1->dvb.frontend = dvb_attach(cxd2841er_attach_t,
433 		&demod_config, &ndev->i2c[num].adap);
434 	if (fe1->dvb.frontend == NULL) {
435 		dev_dbg(&ndev->pci_dev->dev,
436 			"%s(): unable to attach DVB-T frontend\n", __func__);
437 		goto frontend_detach;
438 	}
439 	fe1->dvb.frontend->id = 1;
440 	ascot2e_conf.set_tuner_priv = &ndev->dma[num];
441 	if (!dvb_attach(ascot2e_attach, fe1->dvb.frontend,
442 			&ascot2e_conf, &ndev->i2c[num].adap)) {
443 		dev_dbg(&ndev->pci_dev->dev,
444 			"%s(): unable to attach DVB-T tuner frontend\n",
445 			__func__);
446 		goto frontend_detach;
447 	}
448 	/* DVB-C/C2 frontend */
449 	fe2->dvb.frontend = dvb_attach(cxd2841er_attach_c,
450 				&demod_config, &ndev->i2c[num].adap);
451 	if (fe2->dvb.frontend == NULL) {
452 		dev_dbg(&ndev->pci_dev->dev,
453 			"%s(): unable to attach DVB-C frontend\n", __func__);
454 		goto frontend_detach;
455 	}
456 	fe2->dvb.frontend->id = 2;
457 	if (!dvb_attach(ascot2e_attach, fe2->dvb.frontend,
458 			&ascot2e_conf, &ndev->i2c[num].adap)) {
459 		dev_dbg(&ndev->pci_dev->dev,
460 			"%s(): unable to attach DVB-T/C tuner frontend\n",
461 			__func__);
462 		goto frontend_detach;
463 	}
464 
465 	if (vb2_dvb_register_bus(&ndev->frontends[num],
466 			THIS_MODULE, NULL,
467 			&ndev->pci_dev->dev, adapter_nr, 1)) {
468 		dev_dbg(&ndev->pci_dev->dev,
469 			"%s(): unable to register DVB bus %d\n",
470 			__func__, num);
471 		goto frontend_detach;
472 	}
473 	dev_info(&ndev->pci_dev->dev, "DVB init done, num=%d\n", num);
474 	return 0;
475 frontend_detach:
476 	vb2_dvb_dealloc_frontends(&ndev->frontends[num]);
477 	return -EINVAL;
478 }
479 
480 static void netup_unidvb_dvb_fini(struct netup_unidvb_dev *ndev, int num)
481 {
482 	if (num < 0 || num > 1) {
483 		dev_err(&ndev->pci_dev->dev,
484 			"%s(): unable to unregister DVB bus %d\n",
485 			__func__, num);
486 		return;
487 	}
488 	vb2_dvb_unregister_bus(&ndev->frontends[num]);
489 	dev_info(&ndev->pci_dev->dev,
490 		"%s(): DVB bus %d unregistered\n", __func__, num);
491 }
492 
493 static int netup_unidvb_dvb_setup(struct netup_unidvb_dev *ndev)
494 {
495 	int res;
496 
497 	res = netup_unidvb_dvb_init(ndev, 0);
498 	if (res)
499 		return res;
500 	res = netup_unidvb_dvb_init(ndev, 1);
501 	if (res) {
502 		netup_unidvb_dvb_fini(ndev, 0);
503 		return res;
504 	}
505 	return 0;
506 }
507 
508 static int netup_unidvb_ring_copy(struct netup_dma *dma,
509 				  struct netup_unidvb_buffer *buf)
510 {
511 	u32 copy_bytes, ring_bytes;
512 	u32 buff_bytes = NETUP_DMA_PACKETS_COUNT * 188 - buf->size;
513 	u8 *p = vb2_plane_vaddr(&buf->vb.vb2_buf, 0);
514 	struct netup_unidvb_dev *ndev = dma->ndev;
515 
516 	if (p == NULL) {
517 		dev_err(&ndev->pci_dev->dev,
518 			"%s(): buffer is NULL\n", __func__);
519 		return -EINVAL;
520 	}
521 	p += buf->size;
522 	if (dma->data_offset + dma->data_size > dma->ring_buffer_size) {
523 		ring_bytes = dma->ring_buffer_size - dma->data_offset;
524 		copy_bytes = (ring_bytes > buff_bytes) ?
525 			buff_bytes : ring_bytes;
526 		memcpy_fromio(p, (u8 __iomem *)(dma->addr_virt + dma->data_offset), copy_bytes);
527 		p += copy_bytes;
528 		buf->size += copy_bytes;
529 		buff_bytes -= copy_bytes;
530 		dma->data_size -= copy_bytes;
531 		dma->data_offset += copy_bytes;
532 		if (dma->data_offset == dma->ring_buffer_size)
533 			dma->data_offset = 0;
534 	}
535 	if (buff_bytes > 0) {
536 		ring_bytes = dma->data_size;
537 		copy_bytes = (ring_bytes > buff_bytes) ?
538 				buff_bytes : ring_bytes;
539 		memcpy_fromio(p, (u8 __iomem *)(dma->addr_virt + dma->data_offset), copy_bytes);
540 		buf->size += copy_bytes;
541 		dma->data_size -= copy_bytes;
542 		dma->data_offset += copy_bytes;
543 		if (dma->data_offset == dma->ring_buffer_size)
544 			dma->data_offset = 0;
545 	}
546 	return 0;
547 }
548 
549 static void netup_unidvb_dma_worker(struct work_struct *work)
550 {
551 	struct netup_dma *dma = container_of(work, struct netup_dma, work);
552 	struct netup_unidvb_dev *ndev = dma->ndev;
553 	struct netup_unidvb_buffer *buf;
554 	unsigned long flags;
555 
556 	spin_lock_irqsave(&dma->lock, flags);
557 	if (dma->data_size == 0) {
558 		dev_dbg(&ndev->pci_dev->dev,
559 			"%s(): data_size == 0\n", __func__);
560 		goto work_done;
561 	}
562 	while (dma->data_size > 0) {
563 		if (list_empty(&dma->free_buffers)) {
564 			dev_dbg(&ndev->pci_dev->dev,
565 				"%s(): no free buffers\n", __func__);
566 			goto work_done;
567 		}
568 		buf = list_first_entry(&dma->free_buffers,
569 			struct netup_unidvb_buffer, list);
570 		if (buf->size >= NETUP_DMA_PACKETS_COUNT * 188) {
571 			dev_dbg(&ndev->pci_dev->dev,
572 				"%s(): buffer overflow, size %d\n",
573 				__func__, buf->size);
574 			goto work_done;
575 		}
576 		if (netup_unidvb_ring_copy(dma, buf))
577 			goto work_done;
578 		if (buf->size == NETUP_DMA_PACKETS_COUNT * 188) {
579 			list_del(&buf->list);
580 			dev_dbg(&ndev->pci_dev->dev,
581 				"%s(): buffer %p done, size %d\n",
582 				__func__, buf, buf->size);
583 			v4l2_get_timestamp(&buf->vb.timestamp);
584 			vb2_set_plane_payload(&buf->vb.vb2_buf, 0, buf->size);
585 			vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_DONE);
586 		}
587 	}
588 work_done:
589 	dma->data_size = 0;
590 	spin_unlock_irqrestore(&dma->lock, flags);
591 }
592 
593 static void netup_unidvb_queue_cleanup(struct netup_dma *dma)
594 {
595 	struct netup_unidvb_buffer *buf;
596 	unsigned long flags;
597 
598 	spin_lock_irqsave(&dma->lock, flags);
599 	while (!list_empty(&dma->free_buffers)) {
600 		buf = list_first_entry(&dma->free_buffers,
601 			struct netup_unidvb_buffer, list);
602 		list_del(&buf->list);
603 		vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_ERROR);
604 	}
605 	spin_unlock_irqrestore(&dma->lock, flags);
606 }
607 
608 static void netup_unidvb_dma_timeout(unsigned long data)
609 {
610 	struct netup_dma *dma = (struct netup_dma *)data;
611 	struct netup_unidvb_dev *ndev = dma->ndev;
612 
613 	dev_dbg(&ndev->pci_dev->dev, "%s()\n", __func__);
614 	netup_unidvb_queue_cleanup(dma);
615 }
616 
617 static int netup_unidvb_dma_init(struct netup_unidvb_dev *ndev, int num)
618 {
619 	struct netup_dma *dma;
620 	struct device *dev = &ndev->pci_dev->dev;
621 
622 	if (num < 0 || num > 1) {
623 		dev_err(dev, "%s(): unable to register DMA%d\n",
624 			__func__, num);
625 		return -ENODEV;
626 	}
627 	dma = &ndev->dma[num];
628 	dev_info(dev, "%s(): starting DMA%d\n", __func__, num);
629 	dma->num = num;
630 	dma->ndev = ndev;
631 	spin_lock_init(&dma->lock);
632 	INIT_WORK(&dma->work, netup_unidvb_dma_worker);
633 	INIT_LIST_HEAD(&dma->free_buffers);
634 	dma->timeout.function = netup_unidvb_dma_timeout;
635 	dma->timeout.data = (unsigned long)dma;
636 	init_timer(&dma->timeout);
637 	dma->ring_buffer_size = ndev->dma_size / 2;
638 	dma->addr_virt = ndev->dma_virt + dma->ring_buffer_size * num;
639 	dma->addr_phys = (dma_addr_t)((u64)ndev->dma_phys +
640 		dma->ring_buffer_size * num);
641 	dev_info(dev, "%s(): DMA%d buffer virt/phys 0x%p/0x%llx size %d\n",
642 		__func__, num, dma->addr_virt,
643 		(unsigned long long)dma->addr_phys,
644 		dma->ring_buffer_size);
645 	memset_io((u8 __iomem *)dma->addr_virt, 0, dma->ring_buffer_size);
646 	dma->addr_last = dma->addr_phys;
647 	dma->high_addr = (u32)(dma->addr_phys & 0xC0000000);
648 	dma->regs = (struct netup_dma_regs __iomem *)(num == 0 ?
649 		ndev->bmmio0 + NETUP_DMA0_ADDR :
650 		ndev->bmmio0 + NETUP_DMA1_ADDR);
651 	writel((NETUP_DMA_BLOCKS_COUNT << 24) |
652 		(NETUP_DMA_PACKETS_COUNT << 8) | 188, &dma->regs->size);
653 	writel((u32)(dma->addr_phys & 0x3FFFFFFF), &dma->regs->start_addr_lo);
654 	writel(0, &dma->regs->start_addr_hi);
655 	writel(dma->high_addr, ndev->bmmio0 + 0x1000);
656 	writel(375000000, &dma->regs->timeout);
657 	msleep(1000);
658 	writel(BIT_DMA_IRQ, &dma->regs->ctrlstat_clear);
659 	return 0;
660 }
661 
662 static void netup_unidvb_dma_fini(struct netup_unidvb_dev *ndev, int num)
663 {
664 	struct netup_dma *dma;
665 
666 	if (num < 0 || num > 1)
667 		return;
668 	dev_dbg(&ndev->pci_dev->dev, "%s(): num %d\n", __func__, num);
669 	dma = &ndev->dma[num];
670 	netup_unidvb_dma_enable(dma, 0);
671 	msleep(50);
672 	cancel_work_sync(&dma->work);
673 	del_timer(&dma->timeout);
674 }
675 
676 static int netup_unidvb_dma_setup(struct netup_unidvb_dev *ndev)
677 {
678 	int res;
679 
680 	res = netup_unidvb_dma_init(ndev, 0);
681 	if (res)
682 		return res;
683 	res = netup_unidvb_dma_init(ndev, 1);
684 	if (res) {
685 		netup_unidvb_dma_fini(ndev, 0);
686 		return res;
687 	}
688 	netup_unidvb_dma_enable(&ndev->dma[0], 0);
689 	netup_unidvb_dma_enable(&ndev->dma[1], 0);
690 	return 0;
691 }
692 
693 static int netup_unidvb_ci_setup(struct netup_unidvb_dev *ndev,
694 				 struct pci_dev *pci_dev)
695 {
696 	int res;
697 
698 	writew(NETUP_UNIDVB_IRQ_CI, ndev->bmmio0 + REG_IMASK_SET);
699 	res = netup_unidvb_ci_register(ndev, 0, pci_dev);
700 	if (res)
701 		return res;
702 	res = netup_unidvb_ci_register(ndev, 1, pci_dev);
703 	if (res)
704 		netup_unidvb_ci_unregister(ndev, 0);
705 	return res;
706 }
707 
708 static int netup_unidvb_request_mmio(struct pci_dev *pci_dev)
709 {
710 	if (!request_mem_region(pci_resource_start(pci_dev, 0),
711 			pci_resource_len(pci_dev, 0), NETUP_UNIDVB_NAME)) {
712 		dev_err(&pci_dev->dev,
713 			"%s(): unable to request MMIO bar 0 at 0x%llx\n",
714 			__func__,
715 			(unsigned long long)pci_resource_start(pci_dev, 0));
716 		return -EBUSY;
717 	}
718 	if (!request_mem_region(pci_resource_start(pci_dev, 1),
719 			pci_resource_len(pci_dev, 1), NETUP_UNIDVB_NAME)) {
720 		dev_err(&pci_dev->dev,
721 			"%s(): unable to request MMIO bar 1 at 0x%llx\n",
722 			__func__,
723 			(unsigned long long)pci_resource_start(pci_dev, 1));
724 		release_mem_region(pci_resource_start(pci_dev, 0),
725 			pci_resource_len(pci_dev, 0));
726 		return -EBUSY;
727 	}
728 	return 0;
729 }
730 
731 static int netup_unidvb_request_modules(struct device *dev)
732 {
733 	static const char * const modules[] = {
734 		"lnbh25", "ascot2e", "horus3a", "cxd2841er", NULL
735 	};
736 	const char * const *curr_mod = modules;
737 	int err;
738 
739 	while (*curr_mod != NULL) {
740 		err = request_module(*curr_mod);
741 		if (err) {
742 			dev_warn(dev, "request_module(%s) failed: %d\n",
743 				*curr_mod, err);
744 		}
745 		++curr_mod;
746 	}
747 	return 0;
748 }
749 
750 static int netup_unidvb_initdev(struct pci_dev *pci_dev,
751 				const struct pci_device_id *pci_id)
752 {
753 	u8 board_revision;
754 	u16 board_vendor;
755 	struct netup_unidvb_dev *ndev;
756 	int old_firmware = 0;
757 
758 	netup_unidvb_request_modules(&pci_dev->dev);
759 
760 	/* Check card revision */
761 	if (pci_dev->revision != NETUP_PCI_DEV_REVISION) {
762 		dev_err(&pci_dev->dev,
763 			"netup_unidvb: expected card revision %d, got %d\n",
764 			NETUP_PCI_DEV_REVISION, pci_dev->revision);
765 		dev_err(&pci_dev->dev,
766 			"Please upgrade firmware!\n");
767 		dev_err(&pci_dev->dev,
768 			"Instructions on http://www.netup.tv\n");
769 		old_firmware = 1;
770 		spi_enable = 1;
771 	}
772 
773 	/* allocate device context */
774 	ndev = kzalloc(sizeof(*ndev), GFP_KERNEL);
775 
776 	if (!ndev)
777 		goto dev_alloc_err;
778 	memset(ndev, 0, sizeof(*ndev));
779 	ndev->old_fw = old_firmware;
780 	ndev->wq = create_singlethread_workqueue(NETUP_UNIDVB_NAME);
781 	if (!ndev->wq) {
782 		dev_err(&pci_dev->dev,
783 			"%s(): unable to create workqueue\n", __func__);
784 		goto wq_create_err;
785 	}
786 	ndev->pci_dev = pci_dev;
787 	ndev->pci_bus = pci_dev->bus->number;
788 	ndev->pci_slot = PCI_SLOT(pci_dev->devfn);
789 	ndev->pci_func = PCI_FUNC(pci_dev->devfn);
790 	ndev->board_num = ndev->pci_bus*10 + ndev->pci_slot;
791 	pci_set_drvdata(pci_dev, ndev);
792 	/* PCI init */
793 	dev_info(&pci_dev->dev, "%s(): PCI device (%d). Bus:0x%x Slot:0x%x\n",
794 		__func__, ndev->board_num, ndev->pci_bus, ndev->pci_slot);
795 
796 	if (pci_enable_device(pci_dev)) {
797 		dev_err(&pci_dev->dev, "%s(): pci_enable_device failed\n",
798 			__func__);
799 		goto pci_enable_err;
800 	}
801 	/* read PCI info */
802 	pci_read_config_byte(pci_dev, PCI_CLASS_REVISION, &board_revision);
803 	pci_read_config_word(pci_dev, PCI_VENDOR_ID, &board_vendor);
804 	if (board_vendor != NETUP_VENDOR_ID) {
805 		dev_err(&pci_dev->dev, "%s(): unknown board vendor 0x%x",
806 			__func__, board_vendor);
807 		goto pci_detect_err;
808 	}
809 	dev_info(&pci_dev->dev,
810 		"%s(): board vendor 0x%x, revision 0x%x\n",
811 		__func__, board_vendor, board_revision);
812 	pci_set_master(pci_dev);
813 	if (pci_set_dma_mask(pci_dev, 0xffffffff) < 0) {
814 		dev_err(&pci_dev->dev,
815 			"%s(): 32bit PCI DMA is not supported\n", __func__);
816 		goto pci_detect_err;
817 	}
818 	dev_info(&pci_dev->dev, "%s(): using 32bit PCI DMA\n", __func__);
819 	/* Clear "no snoop" and "relaxed ordering" bits, use default MRRS. */
820 	pcie_capability_clear_and_set_word(pci_dev, PCI_EXP_DEVCTL,
821 		PCI_EXP_DEVCTL_READRQ | PCI_EXP_DEVCTL_RELAX_EN |
822 		PCI_EXP_DEVCTL_NOSNOOP_EN, 0);
823 	/* Adjust PCIe completion timeout. */
824 	pcie_capability_clear_and_set_word(pci_dev,
825 		PCI_EXP_DEVCTL2, 0xf, 0x2);
826 
827 	if (netup_unidvb_request_mmio(pci_dev)) {
828 		dev_err(&pci_dev->dev,
829 			"%s(): unable to request MMIO regions\n", __func__);
830 		goto pci_detect_err;
831 	}
832 	ndev->lmmio0 = ioremap(pci_resource_start(pci_dev, 0),
833 		pci_resource_len(pci_dev, 0));
834 	if (!ndev->lmmio0) {
835 		dev_err(&pci_dev->dev,
836 			"%s(): unable to remap MMIO bar 0\n", __func__);
837 		goto pci_bar0_error;
838 	}
839 	ndev->lmmio1 = ioremap(pci_resource_start(pci_dev, 1),
840 		pci_resource_len(pci_dev, 1));
841 	if (!ndev->lmmio1) {
842 		dev_err(&pci_dev->dev,
843 			"%s(): unable to remap MMIO bar 1\n", __func__);
844 		goto pci_bar1_error;
845 	}
846 	ndev->bmmio0 = (u8 __iomem *)ndev->lmmio0;
847 	ndev->bmmio1 = (u8 __iomem *)ndev->lmmio1;
848 	dev_info(&pci_dev->dev,
849 		"%s(): PCI MMIO at 0x%p (%d); 0x%p (%d); IRQ %d",
850 		__func__,
851 		ndev->lmmio0, (u32)pci_resource_len(pci_dev, 0),
852 		ndev->lmmio1, (u32)pci_resource_len(pci_dev, 1),
853 		pci_dev->irq);
854 	if (request_irq(pci_dev->irq, netup_unidvb_isr, IRQF_SHARED,
855 			"netup_unidvb", pci_dev) < 0) {
856 		dev_err(&pci_dev->dev,
857 			"%s(): can't get IRQ %d\n", __func__, pci_dev->irq);
858 		goto irq_request_err;
859 	}
860 	ndev->dma_size = 2 * 188 *
861 		NETUP_DMA_BLOCKS_COUNT * NETUP_DMA_PACKETS_COUNT;
862 	ndev->dma_virt = dma_alloc_coherent(&pci_dev->dev,
863 		ndev->dma_size, &ndev->dma_phys, GFP_KERNEL);
864 	if (!ndev->dma_virt) {
865 		dev_err(&pci_dev->dev, "%s(): unable to allocate DMA buffer\n",
866 			__func__);
867 		goto dma_alloc_err;
868 	}
869 	netup_unidvb_dev_enable(ndev);
870 	if (spi_enable && netup_spi_init(ndev)) {
871 		dev_warn(&pci_dev->dev,
872 			"netup_unidvb: SPI flash setup failed\n");
873 		goto spi_setup_err;
874 	}
875 	if (old_firmware) {
876 		dev_err(&pci_dev->dev,
877 			"netup_unidvb: card initialization was incomplete\n");
878 		return 0;
879 	}
880 	if (netup_i2c_register(ndev)) {
881 		dev_err(&pci_dev->dev, "netup_unidvb: I2C setup failed\n");
882 		goto i2c_setup_err;
883 	}
884 	/* enable I2C IRQs */
885 	writew(NETUP_UNIDVB_IRQ_I2C0 | NETUP_UNIDVB_IRQ_I2C1,
886 		ndev->bmmio0 + REG_IMASK_SET);
887 	usleep_range(5000, 10000);
888 	if (netup_unidvb_dvb_setup(ndev)) {
889 		dev_err(&pci_dev->dev, "netup_unidvb: DVB setup failed\n");
890 		goto dvb_setup_err;
891 	}
892 	if (netup_unidvb_ci_setup(ndev, pci_dev)) {
893 		dev_err(&pci_dev->dev, "netup_unidvb: CI setup failed\n");
894 		goto ci_setup_err;
895 	}
896 	if (netup_unidvb_dma_setup(ndev)) {
897 		dev_err(&pci_dev->dev, "netup_unidvb: DMA setup failed\n");
898 		goto dma_setup_err;
899 	}
900 	dev_info(&pci_dev->dev,
901 		"netup_unidvb: device has been initialized\n");
902 	return 0;
903 dma_setup_err:
904 	netup_unidvb_ci_unregister(ndev, 0);
905 	netup_unidvb_ci_unregister(ndev, 1);
906 ci_setup_err:
907 	netup_unidvb_dvb_fini(ndev, 0);
908 	netup_unidvb_dvb_fini(ndev, 1);
909 dvb_setup_err:
910 	netup_i2c_unregister(ndev);
911 i2c_setup_err:
912 	if (ndev->spi)
913 		netup_spi_release(ndev);
914 spi_setup_err:
915 	dma_free_coherent(&pci_dev->dev, ndev->dma_size,
916 			ndev->dma_virt, ndev->dma_phys);
917 dma_alloc_err:
918 	free_irq(pci_dev->irq, pci_dev);
919 irq_request_err:
920 	iounmap(ndev->lmmio1);
921 pci_bar1_error:
922 	iounmap(ndev->lmmio0);
923 pci_bar0_error:
924 	release_mem_region(pci_resource_start(pci_dev, 0),
925 		pci_resource_len(pci_dev, 0));
926 	release_mem_region(pci_resource_start(pci_dev, 1),
927 		pci_resource_len(pci_dev, 1));
928 pci_detect_err:
929 	pci_disable_device(pci_dev);
930 pci_enable_err:
931 	pci_set_drvdata(pci_dev, NULL);
932 	destroy_workqueue(ndev->wq);
933 wq_create_err:
934 	kfree(ndev);
935 dev_alloc_err:
936 	dev_err(&pci_dev->dev,
937 		"%s(): failed to initizalize device\n", __func__);
938 	return -EIO;
939 }
940 
941 static void netup_unidvb_finidev(struct pci_dev *pci_dev)
942 {
943 	struct netup_unidvb_dev *ndev = pci_get_drvdata(pci_dev);
944 
945 	dev_info(&pci_dev->dev, "%s(): trying to stop device\n", __func__);
946 	if (!ndev->old_fw) {
947 		netup_unidvb_dma_fini(ndev, 0);
948 		netup_unidvb_dma_fini(ndev, 1);
949 		netup_unidvb_ci_unregister(ndev, 0);
950 		netup_unidvb_ci_unregister(ndev, 1);
951 		netup_unidvb_dvb_fini(ndev, 0);
952 		netup_unidvb_dvb_fini(ndev, 1);
953 		netup_i2c_unregister(ndev);
954 	}
955 	if (ndev->spi)
956 		netup_spi_release(ndev);
957 	writew(0xffff, ndev->bmmio0 + REG_IMASK_CLEAR);
958 	dma_free_coherent(&ndev->pci_dev->dev, ndev->dma_size,
959 			ndev->dma_virt, ndev->dma_phys);
960 	free_irq(pci_dev->irq, pci_dev);
961 	iounmap(ndev->lmmio0);
962 	iounmap(ndev->lmmio1);
963 	release_mem_region(pci_resource_start(pci_dev, 0),
964 		pci_resource_len(pci_dev, 0));
965 	release_mem_region(pci_resource_start(pci_dev, 1),
966 		pci_resource_len(pci_dev, 1));
967 	pci_disable_device(pci_dev);
968 	pci_set_drvdata(pci_dev, NULL);
969 	destroy_workqueue(ndev->wq);
970 	kfree(ndev);
971 	dev_info(&pci_dev->dev,
972 		"%s(): device has been successfully stopped\n", __func__);
973 }
974 
975 
976 static struct pci_device_id netup_unidvb_pci_tbl[] = {
977 	{ PCI_DEVICE(0x1b55, 0x18f6) },
978 	{ 0, }
979 };
980 MODULE_DEVICE_TABLE(pci, netup_unidvb_pci_tbl);
981 
982 static struct pci_driver netup_unidvb_pci_driver = {
983 	.name     = "netup_unidvb",
984 	.id_table = netup_unidvb_pci_tbl,
985 	.probe    = netup_unidvb_initdev,
986 	.remove   = netup_unidvb_finidev,
987 	.suspend  = NULL,
988 	.resume   = NULL,
989 };
990 
991 static int __init netup_unidvb_init(void)
992 {
993 	return pci_register_driver(&netup_unidvb_pci_driver);
994 }
995 
996 static void __exit netup_unidvb_fini(void)
997 {
998 	pci_unregister_driver(&netup_unidvb_pci_driver);
999 }
1000 
1001 module_init(netup_unidvb_init);
1002 module_exit(netup_unidvb_fini);
1003