xref: /linux/drivers/media/pci/netup_unidvb/netup_unidvb_core.c (revision fcc8487d477a3452a1d0ccbdd4c5e0e1e3cb8bed)
1 /*
2  * netup_unidvb_core.c
3  *
4  * Main module for NetUP Universal Dual DVB-CI
5  *
6  * Copyright (C) 2014 NetUP Inc.
7  * Copyright (C) 2014 Sergey Kozlov <serjk@netup.ru>
8  * Copyright (C) 2014 Abylay Ospan <aospan@netup.ru>
9  *
10  * This program is free software; you can redistribute it and/or modify
11  * it under the terms of the GNU General Public License as published by
12  * the Free Software Foundation; either version 2 of the License, or
13  * (at your option) any later version.
14  *
15  * This program is distributed in the hope that it will be useful,
16  * but WITHOUT ANY WARRANTY; without even the implied warranty of
17  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
18  * GNU General Public License for more details.
19  */
20 
21 #include <linux/init.h>
22 #include <linux/module.h>
23 #include <linux/moduleparam.h>
24 #include <linux/kmod.h>
25 #include <linux/kernel.h>
26 #include <linux/slab.h>
27 #include <linux/interrupt.h>
28 #include <linux/delay.h>
29 #include <linux/list.h>
30 #include <media/videobuf2-v4l2.h>
31 #include <media/videobuf2-vmalloc.h>
32 
33 #include "netup_unidvb.h"
34 #include "cxd2841er.h"
35 #include "horus3a.h"
36 #include "ascot2e.h"
37 #include "helene.h"
38 #include "lnbh25.h"
39 
40 static int spi_enable;
41 module_param(spi_enable, int, S_IRUSR | S_IWUSR | S_IRGRP | S_IROTH);
42 
43 MODULE_DESCRIPTION("Driver for NetUP Dual Universal DVB CI PCIe card");
44 MODULE_AUTHOR("info@netup.ru");
45 MODULE_VERSION(NETUP_UNIDVB_VERSION);
46 MODULE_LICENSE("GPL");
47 
48 DVB_DEFINE_MOD_OPT_ADAPTER_NR(adapter_nr);
49 
50 /* Avalon-MM PCI-E registers */
51 #define	AVL_PCIE_IENR		0x50
52 #define AVL_PCIE_ISR		0x40
53 #define AVL_IRQ_ENABLE		0x80
54 #define AVL_IRQ_ASSERTED	0x80
55 /* GPIO registers */
56 #define GPIO_REG_IO		0x4880
57 #define GPIO_REG_IO_TOGGLE	0x4882
58 #define GPIO_REG_IO_SET		0x4884
59 #define GPIO_REG_IO_CLEAR	0x4886
60 /* GPIO bits */
61 #define GPIO_FEA_RESET		(1 << 0)
62 #define GPIO_FEB_RESET		(1 << 1)
63 #define GPIO_RFA_CTL		(1 << 2)
64 #define GPIO_RFB_CTL		(1 << 3)
65 #define GPIO_FEA_TU_RESET	(1 << 4)
66 #define GPIO_FEB_TU_RESET	(1 << 5)
67 /* DMA base address */
68 #define NETUP_DMA0_ADDR		0x4900
69 #define NETUP_DMA1_ADDR		0x4940
70 /* 8 DMA blocks * 128 packets * 188 bytes*/
71 #define NETUP_DMA_BLOCKS_COUNT	8
72 #define NETUP_DMA_PACKETS_COUNT	128
73 /* DMA status bits */
74 #define BIT_DMA_RUN		1
75 #define BIT_DMA_ERROR		2
76 #define BIT_DMA_IRQ		0x200
77 
78 /**
79  * struct netup_dma_regs - the map of DMA module registers
80  * @ctrlstat_set:	Control register, write to set control bits
81  * @ctrlstat_clear:	Control register, write to clear control bits
82  * @start_addr_lo:	DMA ring buffer start address, lower part
83  * @start_addr_hi:	DMA ring buffer start address, higher part
84  * @size:		DMA ring buffer size register
85 			Bits [0-7]:	DMA packet size, 188 bytes
86 			Bits [16-23]:	packets count in block, 128 packets
87 			Bits [24-31]:	blocks count, 8 blocks
88  * @timeout:		DMA timeout in units of 8ns
89 			For example, value of 375000000 equals to 3 sec
90  * @curr_addr_lo:	Current ring buffer head address, lower part
91  * @curr_addr_hi:	Current ring buffer head address, higher part
92  * @stat_pkt_received:	Statistic register, not tested
93  * @stat_pkt_accepted:	Statistic register, not tested
94  * @stat_pkt_overruns:	Statistic register, not tested
95  * @stat_pkt_underruns:	Statistic register, not tested
96  * @stat_fifo_overruns:	Statistic register, not tested
97  */
98 struct netup_dma_regs {
99 	__le32	ctrlstat_set;
100 	__le32	ctrlstat_clear;
101 	__le32	start_addr_lo;
102 	__le32	start_addr_hi;
103 	__le32	size;
104 	__le32	timeout;
105 	__le32	curr_addr_lo;
106 	__le32	curr_addr_hi;
107 	__le32	stat_pkt_received;
108 	__le32	stat_pkt_accepted;
109 	__le32	stat_pkt_overruns;
110 	__le32	stat_pkt_underruns;
111 	__le32	stat_fifo_overruns;
112 } __packed __aligned(1);
113 
114 struct netup_unidvb_buffer {
115 	struct vb2_v4l2_buffer vb;
116 	struct list_head	list;
117 	u32			size;
118 };
119 
120 static int netup_unidvb_tuner_ctrl(void *priv, int is_dvb_tc);
121 static void netup_unidvb_queue_cleanup(struct netup_dma *dma);
122 
123 static struct cxd2841er_config demod_config = {
124 	.i2c_addr = 0xc8,
125 	.xtal = SONY_XTAL_24000
126 };
127 
128 static struct horus3a_config horus3a_conf = {
129 	.i2c_address = 0xc0,
130 	.xtal_freq_mhz = 16,
131 	.set_tuner_callback = netup_unidvb_tuner_ctrl
132 };
133 
134 static struct ascot2e_config ascot2e_conf = {
135 	.i2c_address = 0xc2,
136 	.set_tuner_callback = netup_unidvb_tuner_ctrl
137 };
138 
139 static struct helene_config helene_conf = {
140 	.i2c_address = 0xc0,
141 	.xtal = SONY_HELENE_XTAL_24000,
142 	.set_tuner_callback = netup_unidvb_tuner_ctrl
143 };
144 
145 static struct lnbh25_config lnbh25_conf = {
146 	.i2c_address = 0x10,
147 	.data2_config = LNBH25_TEN | LNBH25_EXTM
148 };
149 
150 static int netup_unidvb_tuner_ctrl(void *priv, int is_dvb_tc)
151 {
152 	u8 reg, mask;
153 	struct netup_dma *dma = priv;
154 	struct netup_unidvb_dev *ndev;
155 
156 	if (!priv)
157 		return -EINVAL;
158 	ndev = dma->ndev;
159 	dev_dbg(&ndev->pci_dev->dev, "%s(): num %d is_dvb_tc %d\n",
160 		__func__, dma->num, is_dvb_tc);
161 	reg = readb(ndev->bmmio0 + GPIO_REG_IO);
162 	mask = (dma->num == 0) ? GPIO_RFA_CTL : GPIO_RFB_CTL;
163 
164 	/* inverted tuner control in hw rev. 1.4 */
165 	if (ndev->rev == NETUP_HW_REV_1_4)
166 		is_dvb_tc = !is_dvb_tc;
167 
168 	if (!is_dvb_tc)
169 		reg |= mask;
170 	else
171 		reg &= ~mask;
172 	writeb(reg, ndev->bmmio0 + GPIO_REG_IO);
173 	return 0;
174 }
175 
176 static void netup_unidvb_dev_enable(struct netup_unidvb_dev *ndev)
177 {
178 	u16 gpio_reg;
179 
180 	/* enable PCI-E interrupts */
181 	writel(AVL_IRQ_ENABLE, ndev->bmmio0 + AVL_PCIE_IENR);
182 	/* unreset frontends bits[0:1] */
183 	writeb(0x00, ndev->bmmio0 + GPIO_REG_IO);
184 	msleep(100);
185 	gpio_reg =
186 		GPIO_FEA_RESET | GPIO_FEB_RESET |
187 		GPIO_FEA_TU_RESET | GPIO_FEB_TU_RESET |
188 		GPIO_RFA_CTL | GPIO_RFB_CTL;
189 	writeb(gpio_reg, ndev->bmmio0 + GPIO_REG_IO);
190 	dev_dbg(&ndev->pci_dev->dev,
191 		"%s(): AVL_PCIE_IENR 0x%x GPIO_REG_IO 0x%x\n",
192 		__func__, readl(ndev->bmmio0 + AVL_PCIE_IENR),
193 		(int)readb(ndev->bmmio0 + GPIO_REG_IO));
194 
195 }
196 
197 static void netup_unidvb_dma_enable(struct netup_dma *dma, int enable)
198 {
199 	u32 irq_mask = (dma->num == 0 ?
200 		NETUP_UNIDVB_IRQ_DMA1 : NETUP_UNIDVB_IRQ_DMA2);
201 
202 	dev_dbg(&dma->ndev->pci_dev->dev,
203 		"%s(): DMA%d enable %d\n", __func__, dma->num, enable);
204 	if (enable) {
205 		writel(BIT_DMA_RUN, &dma->regs->ctrlstat_set);
206 		writew(irq_mask, dma->ndev->bmmio0 + REG_IMASK_SET);
207 	} else {
208 		writel(BIT_DMA_RUN, &dma->regs->ctrlstat_clear);
209 		writew(irq_mask, dma->ndev->bmmio0 + REG_IMASK_CLEAR);
210 	}
211 }
212 
213 static irqreturn_t netup_dma_interrupt(struct netup_dma *dma)
214 {
215 	u64 addr_curr;
216 	u32 size;
217 	unsigned long flags;
218 	struct device *dev = &dma->ndev->pci_dev->dev;
219 
220 	spin_lock_irqsave(&dma->lock, flags);
221 	addr_curr = ((u64)readl(&dma->regs->curr_addr_hi) << 32) |
222 		(u64)readl(&dma->regs->curr_addr_lo) | dma->high_addr;
223 	/* clear IRQ */
224 	writel(BIT_DMA_IRQ, &dma->regs->ctrlstat_clear);
225 	/* sanity check */
226 	if (addr_curr < dma->addr_phys ||
227 			addr_curr > dma->addr_phys +  dma->ring_buffer_size) {
228 		if (addr_curr != 0) {
229 			dev_err(dev,
230 				"%s(): addr 0x%llx not from 0x%llx:0x%llx\n",
231 				__func__, addr_curr, (u64)dma->addr_phys,
232 				(u64)(dma->addr_phys + dma->ring_buffer_size));
233 		}
234 		goto irq_handled;
235 	}
236 	size = (addr_curr >= dma->addr_last) ?
237 		(u32)(addr_curr - dma->addr_last) :
238 		(u32)(dma->ring_buffer_size - (dma->addr_last - addr_curr));
239 	if (dma->data_size != 0) {
240 		printk_ratelimited("%s(): lost interrupt, data size %d\n",
241 			__func__, dma->data_size);
242 		dma->data_size += size;
243 	}
244 	if (dma->data_size == 0 || dma->data_size > dma->ring_buffer_size) {
245 		dma->data_size = size;
246 		dma->data_offset = (u32)(dma->addr_last - dma->addr_phys);
247 	}
248 	dma->addr_last = addr_curr;
249 	queue_work(dma->ndev->wq, &dma->work);
250 irq_handled:
251 	spin_unlock_irqrestore(&dma->lock, flags);
252 	return IRQ_HANDLED;
253 }
254 
255 static irqreturn_t netup_unidvb_isr(int irq, void *dev_id)
256 {
257 	struct pci_dev *pci_dev = (struct pci_dev *)dev_id;
258 	struct netup_unidvb_dev *ndev = pci_get_drvdata(pci_dev);
259 	u32 reg40, reg_isr;
260 	irqreturn_t iret = IRQ_NONE;
261 
262 	/* disable interrupts */
263 	writel(0, ndev->bmmio0 + AVL_PCIE_IENR);
264 	/* check IRQ source */
265 	reg40 = readl(ndev->bmmio0 + AVL_PCIE_ISR);
266 	if ((reg40 & AVL_IRQ_ASSERTED) != 0) {
267 		/* IRQ is being signaled */
268 		reg_isr = readw(ndev->bmmio0 + REG_ISR);
269 		if (reg_isr & NETUP_UNIDVB_IRQ_I2C0) {
270 			iret = netup_i2c_interrupt(&ndev->i2c[0]);
271 		} else if (reg_isr & NETUP_UNIDVB_IRQ_I2C1) {
272 			iret = netup_i2c_interrupt(&ndev->i2c[1]);
273 		} else if (reg_isr & NETUP_UNIDVB_IRQ_SPI) {
274 			iret = netup_spi_interrupt(ndev->spi);
275 		} else if (reg_isr & NETUP_UNIDVB_IRQ_DMA1) {
276 			iret = netup_dma_interrupt(&ndev->dma[0]);
277 		} else if (reg_isr & NETUP_UNIDVB_IRQ_DMA2) {
278 			iret = netup_dma_interrupt(&ndev->dma[1]);
279 		} else if (reg_isr & NETUP_UNIDVB_IRQ_CI) {
280 			iret = netup_ci_interrupt(ndev);
281 		} else {
282 			dev_err(&pci_dev->dev,
283 				"%s(): unknown interrupt 0x%x\n",
284 				__func__, reg_isr);
285 		}
286 	}
287 	/* re-enable interrupts */
288 	writel(AVL_IRQ_ENABLE, ndev->bmmio0 + AVL_PCIE_IENR);
289 	return iret;
290 }
291 
292 static int netup_unidvb_queue_setup(struct vb2_queue *vq,
293 				    unsigned int *nbuffers,
294 				    unsigned int *nplanes,
295 				    unsigned int sizes[],
296 				    struct device *alloc_devs[])
297 {
298 	struct netup_dma *dma = vb2_get_drv_priv(vq);
299 
300 	dev_dbg(&dma->ndev->pci_dev->dev, "%s()\n", __func__);
301 
302 	*nplanes = 1;
303 	if (vq->num_buffers + *nbuffers < VIDEO_MAX_FRAME)
304 		*nbuffers = VIDEO_MAX_FRAME - vq->num_buffers;
305 	sizes[0] = PAGE_ALIGN(NETUP_DMA_PACKETS_COUNT * 188);
306 	dev_dbg(&dma->ndev->pci_dev->dev, "%s() nbuffers=%d sizes[0]=%d\n",
307 		__func__, *nbuffers, sizes[0]);
308 	return 0;
309 }
310 
311 static int netup_unidvb_buf_prepare(struct vb2_buffer *vb)
312 {
313 	struct netup_dma *dma = vb2_get_drv_priv(vb->vb2_queue);
314 	struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
315 	struct netup_unidvb_buffer *buf = container_of(vbuf,
316 				struct netup_unidvb_buffer, vb);
317 
318 	dev_dbg(&dma->ndev->pci_dev->dev, "%s(): buf 0x%p\n", __func__, buf);
319 	buf->size = 0;
320 	return 0;
321 }
322 
323 static void netup_unidvb_buf_queue(struct vb2_buffer *vb)
324 {
325 	unsigned long flags;
326 	struct netup_dma *dma = vb2_get_drv_priv(vb->vb2_queue);
327 	struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
328 	struct netup_unidvb_buffer *buf = container_of(vbuf,
329 				struct netup_unidvb_buffer, vb);
330 
331 	dev_dbg(&dma->ndev->pci_dev->dev, "%s(): %p\n", __func__, buf);
332 	spin_lock_irqsave(&dma->lock, flags);
333 	list_add_tail(&buf->list, &dma->free_buffers);
334 	spin_unlock_irqrestore(&dma->lock, flags);
335 	mod_timer(&dma->timeout, jiffies + msecs_to_jiffies(1000));
336 }
337 
338 static int netup_unidvb_start_streaming(struct vb2_queue *q, unsigned int count)
339 {
340 	struct netup_dma *dma = vb2_get_drv_priv(q);
341 
342 	dev_dbg(&dma->ndev->pci_dev->dev, "%s()\n", __func__);
343 	netup_unidvb_dma_enable(dma, 1);
344 	return 0;
345 }
346 
347 static void netup_unidvb_stop_streaming(struct vb2_queue *q)
348 {
349 	struct netup_dma *dma = vb2_get_drv_priv(q);
350 
351 	dev_dbg(&dma->ndev->pci_dev->dev, "%s()\n", __func__);
352 	netup_unidvb_dma_enable(dma, 0);
353 	netup_unidvb_queue_cleanup(dma);
354 }
355 
356 static const struct vb2_ops dvb_qops = {
357 	.queue_setup		= netup_unidvb_queue_setup,
358 	.buf_prepare		= netup_unidvb_buf_prepare,
359 	.buf_queue		= netup_unidvb_buf_queue,
360 	.start_streaming	= netup_unidvb_start_streaming,
361 	.stop_streaming		= netup_unidvb_stop_streaming,
362 };
363 
364 static int netup_unidvb_queue_init(struct netup_dma *dma,
365 				   struct vb2_queue *vb_queue)
366 {
367 	int res;
368 
369 	/* Init videobuf2 queue structure */
370 	vb_queue->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
371 	vb_queue->io_modes = VB2_MMAP | VB2_USERPTR | VB2_READ;
372 	vb_queue->drv_priv = dma;
373 	vb_queue->buf_struct_size = sizeof(struct netup_unidvb_buffer);
374 	vb_queue->ops = &dvb_qops;
375 	vb_queue->mem_ops = &vb2_vmalloc_memops;
376 	vb_queue->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
377 	res = vb2_queue_init(vb_queue);
378 	if (res != 0) {
379 		dev_err(&dma->ndev->pci_dev->dev,
380 			"%s(): vb2_queue_init failed (%d)\n", __func__, res);
381 	}
382 	return res;
383 }
384 
385 static int netup_unidvb_dvb_init(struct netup_unidvb_dev *ndev,
386 				 int num)
387 {
388 	int fe_count = 2;
389 	int i = 0;
390 	struct vb2_dvb_frontend *fes[2];
391 	u8 fe_name[32];
392 
393 	if (ndev->rev == NETUP_HW_REV_1_3)
394 		demod_config.xtal = SONY_XTAL_20500;
395 	else
396 		demod_config.xtal = SONY_XTAL_24000;
397 
398 	if (num < 0 || num > 1) {
399 		dev_dbg(&ndev->pci_dev->dev,
400 			"%s(): unable to init DVB bus %d\n", __func__, num);
401 		return -ENODEV;
402 	}
403 	mutex_init(&ndev->frontends[num].lock);
404 	INIT_LIST_HEAD(&ndev->frontends[num].felist);
405 
406 	for (i = 0; i < fe_count; i++) {
407 		if (vb2_dvb_alloc_frontend(&ndev->frontends[num], i+1)
408 				== NULL) {
409 			dev_err(&ndev->pci_dev->dev,
410 					"%s(): unable to allocate vb2_dvb_frontend\n",
411 					__func__);
412 			return -ENOMEM;
413 		}
414 	}
415 
416 	for (i = 0; i < fe_count; i++) {
417 		fes[i] = vb2_dvb_get_frontend(&ndev->frontends[num], i+1);
418 		if (fes[i] == NULL) {
419 			dev_err(&ndev->pci_dev->dev,
420 				"%s(): frontends has not been allocated\n",
421 				__func__);
422 			return -EINVAL;
423 		}
424 	}
425 
426 	for (i = 0; i < fe_count; i++) {
427 		netup_unidvb_queue_init(&ndev->dma[num], &fes[i]->dvb.dvbq);
428 		snprintf(fe_name, sizeof(fe_name), "netup_fe%d", i);
429 		fes[i]->dvb.name = fe_name;
430 	}
431 
432 	fes[0]->dvb.frontend = dvb_attach(cxd2841er_attach_s,
433 		&demod_config, &ndev->i2c[num].adap);
434 	if (fes[0]->dvb.frontend == NULL) {
435 		dev_dbg(&ndev->pci_dev->dev,
436 			"%s(): unable to attach DVB-S/S2 frontend\n",
437 			__func__);
438 		goto frontend_detach;
439 	}
440 
441 	if (ndev->rev == NETUP_HW_REV_1_3) {
442 		horus3a_conf.set_tuner_priv = &ndev->dma[num];
443 		if (!dvb_attach(horus3a_attach, fes[0]->dvb.frontend,
444 					&horus3a_conf, &ndev->i2c[num].adap)) {
445 			dev_dbg(&ndev->pci_dev->dev,
446 					"%s(): unable to attach HORUS3A DVB-S/S2 tuner frontend\n",
447 					__func__);
448 			goto frontend_detach;
449 		}
450 	} else {
451 		helene_conf.set_tuner_priv = &ndev->dma[num];
452 		if (!dvb_attach(helene_attach_s, fes[0]->dvb.frontend,
453 					&helene_conf, &ndev->i2c[num].adap)) {
454 			dev_err(&ndev->pci_dev->dev,
455 					"%s(): unable to attach HELENE DVB-S/S2 tuner frontend\n",
456 					__func__);
457 			goto frontend_detach;
458 		}
459 	}
460 
461 	if (!dvb_attach(lnbh25_attach, fes[0]->dvb.frontend,
462 			&lnbh25_conf, &ndev->i2c[num].adap)) {
463 		dev_dbg(&ndev->pci_dev->dev,
464 			"%s(): unable to attach SEC frontend\n", __func__);
465 		goto frontend_detach;
466 	}
467 
468 	/* DVB-T/T2 frontend */
469 	fes[1]->dvb.frontend = dvb_attach(cxd2841er_attach_t_c,
470 		&demod_config, &ndev->i2c[num].adap);
471 	if (fes[1]->dvb.frontend == NULL) {
472 		dev_dbg(&ndev->pci_dev->dev,
473 			"%s(): unable to attach Ter frontend\n", __func__);
474 		goto frontend_detach;
475 	}
476 	fes[1]->dvb.frontend->id = 1;
477 	if (ndev->rev == NETUP_HW_REV_1_3) {
478 		ascot2e_conf.set_tuner_priv = &ndev->dma[num];
479 		if (!dvb_attach(ascot2e_attach, fes[1]->dvb.frontend,
480 					&ascot2e_conf, &ndev->i2c[num].adap)) {
481 			dev_dbg(&ndev->pci_dev->dev,
482 					"%s(): unable to attach Ter tuner frontend\n",
483 					__func__);
484 			goto frontend_detach;
485 		}
486 	} else {
487 		helene_conf.set_tuner_priv = &ndev->dma[num];
488 		if (!dvb_attach(helene_attach, fes[1]->dvb.frontend,
489 					&helene_conf, &ndev->i2c[num].adap)) {
490 			dev_err(&ndev->pci_dev->dev,
491 					"%s(): unable to attach HELENE Ter tuner frontend\n",
492 					__func__);
493 			goto frontend_detach;
494 		}
495 	}
496 
497 	if (vb2_dvb_register_bus(&ndev->frontends[num],
498 				 THIS_MODULE, NULL,
499 				 &ndev->pci_dev->dev, NULL, adapter_nr, 1)) {
500 		dev_dbg(&ndev->pci_dev->dev,
501 			"%s(): unable to register DVB bus %d\n",
502 			__func__, num);
503 		goto frontend_detach;
504 	}
505 	dev_info(&ndev->pci_dev->dev, "DVB init done, num=%d\n", num);
506 	return 0;
507 frontend_detach:
508 	vb2_dvb_dealloc_frontends(&ndev->frontends[num]);
509 	return -EINVAL;
510 }
511 
512 static void netup_unidvb_dvb_fini(struct netup_unidvb_dev *ndev, int num)
513 {
514 	if (num < 0 || num > 1) {
515 		dev_err(&ndev->pci_dev->dev,
516 			"%s(): unable to unregister DVB bus %d\n",
517 			__func__, num);
518 		return;
519 	}
520 	vb2_dvb_unregister_bus(&ndev->frontends[num]);
521 	dev_info(&ndev->pci_dev->dev,
522 		"%s(): DVB bus %d unregistered\n", __func__, num);
523 }
524 
525 static int netup_unidvb_dvb_setup(struct netup_unidvb_dev *ndev)
526 {
527 	int res;
528 
529 	res = netup_unidvb_dvb_init(ndev, 0);
530 	if (res)
531 		return res;
532 	res = netup_unidvb_dvb_init(ndev, 1);
533 	if (res) {
534 		netup_unidvb_dvb_fini(ndev, 0);
535 		return res;
536 	}
537 	return 0;
538 }
539 
540 static int netup_unidvb_ring_copy(struct netup_dma *dma,
541 				  struct netup_unidvb_buffer *buf)
542 {
543 	u32 copy_bytes, ring_bytes;
544 	u32 buff_bytes = NETUP_DMA_PACKETS_COUNT * 188 - buf->size;
545 	u8 *p = vb2_plane_vaddr(&buf->vb.vb2_buf, 0);
546 	struct netup_unidvb_dev *ndev = dma->ndev;
547 
548 	if (p == NULL) {
549 		dev_err(&ndev->pci_dev->dev,
550 			"%s(): buffer is NULL\n", __func__);
551 		return -EINVAL;
552 	}
553 	p += buf->size;
554 	if (dma->data_offset + dma->data_size > dma->ring_buffer_size) {
555 		ring_bytes = dma->ring_buffer_size - dma->data_offset;
556 		copy_bytes = (ring_bytes > buff_bytes) ?
557 			buff_bytes : ring_bytes;
558 		memcpy_fromio(p, (u8 __iomem *)(dma->addr_virt + dma->data_offset), copy_bytes);
559 		p += copy_bytes;
560 		buf->size += copy_bytes;
561 		buff_bytes -= copy_bytes;
562 		dma->data_size -= copy_bytes;
563 		dma->data_offset += copy_bytes;
564 		if (dma->data_offset == dma->ring_buffer_size)
565 			dma->data_offset = 0;
566 	}
567 	if (buff_bytes > 0) {
568 		ring_bytes = dma->data_size;
569 		copy_bytes = (ring_bytes > buff_bytes) ?
570 				buff_bytes : ring_bytes;
571 		memcpy_fromio(p, (u8 __iomem *)(dma->addr_virt + dma->data_offset), copy_bytes);
572 		buf->size += copy_bytes;
573 		dma->data_size -= copy_bytes;
574 		dma->data_offset += copy_bytes;
575 		if (dma->data_offset == dma->ring_buffer_size)
576 			dma->data_offset = 0;
577 	}
578 	return 0;
579 }
580 
581 static void netup_unidvb_dma_worker(struct work_struct *work)
582 {
583 	struct netup_dma *dma = container_of(work, struct netup_dma, work);
584 	struct netup_unidvb_dev *ndev = dma->ndev;
585 	struct netup_unidvb_buffer *buf;
586 	unsigned long flags;
587 
588 	spin_lock_irqsave(&dma->lock, flags);
589 	if (dma->data_size == 0) {
590 		dev_dbg(&ndev->pci_dev->dev,
591 			"%s(): data_size == 0\n", __func__);
592 		goto work_done;
593 	}
594 	while (dma->data_size > 0) {
595 		if (list_empty(&dma->free_buffers)) {
596 			dev_dbg(&ndev->pci_dev->dev,
597 				"%s(): no free buffers\n", __func__);
598 			goto work_done;
599 		}
600 		buf = list_first_entry(&dma->free_buffers,
601 			struct netup_unidvb_buffer, list);
602 		if (buf->size >= NETUP_DMA_PACKETS_COUNT * 188) {
603 			dev_dbg(&ndev->pci_dev->dev,
604 				"%s(): buffer overflow, size %d\n",
605 				__func__, buf->size);
606 			goto work_done;
607 		}
608 		if (netup_unidvb_ring_copy(dma, buf))
609 			goto work_done;
610 		if (buf->size == NETUP_DMA_PACKETS_COUNT * 188) {
611 			list_del(&buf->list);
612 			dev_dbg(&ndev->pci_dev->dev,
613 				"%s(): buffer %p done, size %d\n",
614 				__func__, buf, buf->size);
615 			buf->vb.vb2_buf.timestamp = ktime_get_ns();
616 			vb2_set_plane_payload(&buf->vb.vb2_buf, 0, buf->size);
617 			vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_DONE);
618 		}
619 	}
620 work_done:
621 	dma->data_size = 0;
622 	spin_unlock_irqrestore(&dma->lock, flags);
623 }
624 
625 static void netup_unidvb_queue_cleanup(struct netup_dma *dma)
626 {
627 	struct netup_unidvb_buffer *buf;
628 	unsigned long flags;
629 
630 	spin_lock_irqsave(&dma->lock, flags);
631 	while (!list_empty(&dma->free_buffers)) {
632 		buf = list_first_entry(&dma->free_buffers,
633 			struct netup_unidvb_buffer, list);
634 		list_del(&buf->list);
635 		vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_ERROR);
636 	}
637 	spin_unlock_irqrestore(&dma->lock, flags);
638 }
639 
640 static void netup_unidvb_dma_timeout(unsigned long data)
641 {
642 	struct netup_dma *dma = (struct netup_dma *)data;
643 	struct netup_unidvb_dev *ndev = dma->ndev;
644 
645 	dev_dbg(&ndev->pci_dev->dev, "%s()\n", __func__);
646 	netup_unidvb_queue_cleanup(dma);
647 }
648 
649 static int netup_unidvb_dma_init(struct netup_unidvb_dev *ndev, int num)
650 {
651 	struct netup_dma *dma;
652 	struct device *dev = &ndev->pci_dev->dev;
653 
654 	if (num < 0 || num > 1) {
655 		dev_err(dev, "%s(): unable to register DMA%d\n",
656 			__func__, num);
657 		return -ENODEV;
658 	}
659 	dma = &ndev->dma[num];
660 	dev_info(dev, "%s(): starting DMA%d\n", __func__, num);
661 	dma->num = num;
662 	dma->ndev = ndev;
663 	spin_lock_init(&dma->lock);
664 	INIT_WORK(&dma->work, netup_unidvb_dma_worker);
665 	INIT_LIST_HEAD(&dma->free_buffers);
666 	setup_timer(&dma->timeout, netup_unidvb_dma_timeout,
667 		    (unsigned long)dma);
668 	dma->ring_buffer_size = ndev->dma_size / 2;
669 	dma->addr_virt = ndev->dma_virt + dma->ring_buffer_size * num;
670 	dma->addr_phys = (dma_addr_t)((u64)ndev->dma_phys +
671 		dma->ring_buffer_size * num);
672 	dev_info(dev, "%s(): DMA%d buffer virt/phys 0x%p/0x%llx size %d\n",
673 		__func__, num, dma->addr_virt,
674 		(unsigned long long)dma->addr_phys,
675 		dma->ring_buffer_size);
676 	memset_io((u8 __iomem *)dma->addr_virt, 0, dma->ring_buffer_size);
677 	dma->addr_last = dma->addr_phys;
678 	dma->high_addr = (u32)(dma->addr_phys & 0xC0000000);
679 	dma->regs = (struct netup_dma_regs __iomem *)(num == 0 ?
680 		ndev->bmmio0 + NETUP_DMA0_ADDR :
681 		ndev->bmmio0 + NETUP_DMA1_ADDR);
682 	writel((NETUP_DMA_BLOCKS_COUNT << 24) |
683 		(NETUP_DMA_PACKETS_COUNT << 8) | 188, &dma->regs->size);
684 	writel((u32)(dma->addr_phys & 0x3FFFFFFF), &dma->regs->start_addr_lo);
685 	writel(0, &dma->regs->start_addr_hi);
686 	writel(dma->high_addr, ndev->bmmio0 + 0x1000);
687 	writel(375000000, &dma->regs->timeout);
688 	msleep(1000);
689 	writel(BIT_DMA_IRQ, &dma->regs->ctrlstat_clear);
690 	return 0;
691 }
692 
693 static void netup_unidvb_dma_fini(struct netup_unidvb_dev *ndev, int num)
694 {
695 	struct netup_dma *dma;
696 
697 	if (num < 0 || num > 1)
698 		return;
699 	dev_dbg(&ndev->pci_dev->dev, "%s(): num %d\n", __func__, num);
700 	dma = &ndev->dma[num];
701 	netup_unidvb_dma_enable(dma, 0);
702 	msleep(50);
703 	cancel_work_sync(&dma->work);
704 	del_timer(&dma->timeout);
705 }
706 
707 static int netup_unidvb_dma_setup(struct netup_unidvb_dev *ndev)
708 {
709 	int res;
710 
711 	res = netup_unidvb_dma_init(ndev, 0);
712 	if (res)
713 		return res;
714 	res = netup_unidvb_dma_init(ndev, 1);
715 	if (res) {
716 		netup_unidvb_dma_fini(ndev, 0);
717 		return res;
718 	}
719 	netup_unidvb_dma_enable(&ndev->dma[0], 0);
720 	netup_unidvb_dma_enable(&ndev->dma[1], 0);
721 	return 0;
722 }
723 
724 static int netup_unidvb_ci_setup(struct netup_unidvb_dev *ndev,
725 				 struct pci_dev *pci_dev)
726 {
727 	int res;
728 
729 	writew(NETUP_UNIDVB_IRQ_CI, ndev->bmmio0 + REG_IMASK_SET);
730 	res = netup_unidvb_ci_register(ndev, 0, pci_dev);
731 	if (res)
732 		return res;
733 	res = netup_unidvb_ci_register(ndev, 1, pci_dev);
734 	if (res)
735 		netup_unidvb_ci_unregister(ndev, 0);
736 	return res;
737 }
738 
739 static int netup_unidvb_request_mmio(struct pci_dev *pci_dev)
740 {
741 	if (!request_mem_region(pci_resource_start(pci_dev, 0),
742 			pci_resource_len(pci_dev, 0), NETUP_UNIDVB_NAME)) {
743 		dev_err(&pci_dev->dev,
744 			"%s(): unable to request MMIO bar 0 at 0x%llx\n",
745 			__func__,
746 			(unsigned long long)pci_resource_start(pci_dev, 0));
747 		return -EBUSY;
748 	}
749 	if (!request_mem_region(pci_resource_start(pci_dev, 1),
750 			pci_resource_len(pci_dev, 1), NETUP_UNIDVB_NAME)) {
751 		dev_err(&pci_dev->dev,
752 			"%s(): unable to request MMIO bar 1 at 0x%llx\n",
753 			__func__,
754 			(unsigned long long)pci_resource_start(pci_dev, 1));
755 		release_mem_region(pci_resource_start(pci_dev, 0),
756 			pci_resource_len(pci_dev, 0));
757 		return -EBUSY;
758 	}
759 	return 0;
760 }
761 
762 static int netup_unidvb_request_modules(struct device *dev)
763 {
764 	static const char * const modules[] = {
765 		"lnbh25", "ascot2e", "horus3a", "cxd2841er", "helene", NULL
766 	};
767 	const char * const *curr_mod = modules;
768 	int err;
769 
770 	while (*curr_mod != NULL) {
771 		err = request_module(*curr_mod);
772 		if (err) {
773 			dev_warn(dev, "request_module(%s) failed: %d\n",
774 				*curr_mod, err);
775 		}
776 		++curr_mod;
777 	}
778 	return 0;
779 }
780 
781 static int netup_unidvb_initdev(struct pci_dev *pci_dev,
782 				const struct pci_device_id *pci_id)
783 {
784 	u8 board_revision;
785 	u16 board_vendor;
786 	struct netup_unidvb_dev *ndev;
787 	int old_firmware = 0;
788 
789 	netup_unidvb_request_modules(&pci_dev->dev);
790 
791 	/* Check card revision */
792 	if (pci_dev->revision != NETUP_PCI_DEV_REVISION) {
793 		dev_err(&pci_dev->dev,
794 			"netup_unidvb: expected card revision %d, got %d\n",
795 			NETUP_PCI_DEV_REVISION, pci_dev->revision);
796 		dev_err(&pci_dev->dev,
797 			"Please upgrade firmware!\n");
798 		dev_err(&pci_dev->dev,
799 			"Instructions on http://www.netup.tv\n");
800 		old_firmware = 1;
801 		spi_enable = 1;
802 	}
803 
804 	/* allocate device context */
805 	ndev = kzalloc(sizeof(*ndev), GFP_KERNEL);
806 	if (!ndev)
807 		goto dev_alloc_err;
808 
809 	/* detect hardware revision */
810 	if (pci_dev->device == NETUP_HW_REV_1_3)
811 		ndev->rev = NETUP_HW_REV_1_3;
812 	else
813 		ndev->rev = NETUP_HW_REV_1_4;
814 
815 	dev_info(&pci_dev->dev,
816 		"%s(): board (0x%x) hardware revision 0x%x\n",
817 		__func__, pci_dev->device, ndev->rev);
818 
819 	ndev->old_fw = old_firmware;
820 	ndev->wq = create_singlethread_workqueue(NETUP_UNIDVB_NAME);
821 	if (!ndev->wq) {
822 		dev_err(&pci_dev->dev,
823 			"%s(): unable to create workqueue\n", __func__);
824 		goto wq_create_err;
825 	}
826 	ndev->pci_dev = pci_dev;
827 	ndev->pci_bus = pci_dev->bus->number;
828 	ndev->pci_slot = PCI_SLOT(pci_dev->devfn);
829 	ndev->pci_func = PCI_FUNC(pci_dev->devfn);
830 	ndev->board_num = ndev->pci_bus*10 + ndev->pci_slot;
831 	pci_set_drvdata(pci_dev, ndev);
832 	/* PCI init */
833 	dev_info(&pci_dev->dev, "%s(): PCI device (%d). Bus:0x%x Slot:0x%x\n",
834 		__func__, ndev->board_num, ndev->pci_bus, ndev->pci_slot);
835 
836 	if (pci_enable_device(pci_dev)) {
837 		dev_err(&pci_dev->dev, "%s(): pci_enable_device failed\n",
838 			__func__);
839 		goto pci_enable_err;
840 	}
841 	/* read PCI info */
842 	pci_read_config_byte(pci_dev, PCI_CLASS_REVISION, &board_revision);
843 	pci_read_config_word(pci_dev, PCI_VENDOR_ID, &board_vendor);
844 	if (board_vendor != NETUP_VENDOR_ID) {
845 		dev_err(&pci_dev->dev, "%s(): unknown board vendor 0x%x",
846 			__func__, board_vendor);
847 		goto pci_detect_err;
848 	}
849 	dev_info(&pci_dev->dev,
850 		"%s(): board vendor 0x%x, revision 0x%x\n",
851 		__func__, board_vendor, board_revision);
852 	pci_set_master(pci_dev);
853 	if (pci_set_dma_mask(pci_dev, 0xffffffff) < 0) {
854 		dev_err(&pci_dev->dev,
855 			"%s(): 32bit PCI DMA is not supported\n", __func__);
856 		goto pci_detect_err;
857 	}
858 	dev_info(&pci_dev->dev, "%s(): using 32bit PCI DMA\n", __func__);
859 	/* Clear "no snoop" and "relaxed ordering" bits, use default MRRS. */
860 	pcie_capability_clear_and_set_word(pci_dev, PCI_EXP_DEVCTL,
861 		PCI_EXP_DEVCTL_READRQ | PCI_EXP_DEVCTL_RELAX_EN |
862 		PCI_EXP_DEVCTL_NOSNOOP_EN, 0);
863 	/* Adjust PCIe completion timeout. */
864 	pcie_capability_clear_and_set_word(pci_dev,
865 		PCI_EXP_DEVCTL2, 0xf, 0x2);
866 
867 	if (netup_unidvb_request_mmio(pci_dev)) {
868 		dev_err(&pci_dev->dev,
869 			"%s(): unable to request MMIO regions\n", __func__);
870 		goto pci_detect_err;
871 	}
872 	ndev->lmmio0 = ioremap(pci_resource_start(pci_dev, 0),
873 		pci_resource_len(pci_dev, 0));
874 	if (!ndev->lmmio0) {
875 		dev_err(&pci_dev->dev,
876 			"%s(): unable to remap MMIO bar 0\n", __func__);
877 		goto pci_bar0_error;
878 	}
879 	ndev->lmmio1 = ioremap(pci_resource_start(pci_dev, 1),
880 		pci_resource_len(pci_dev, 1));
881 	if (!ndev->lmmio1) {
882 		dev_err(&pci_dev->dev,
883 			"%s(): unable to remap MMIO bar 1\n", __func__);
884 		goto pci_bar1_error;
885 	}
886 	ndev->bmmio0 = (u8 __iomem *)ndev->lmmio0;
887 	ndev->bmmio1 = (u8 __iomem *)ndev->lmmio1;
888 	dev_info(&pci_dev->dev,
889 		"%s(): PCI MMIO at 0x%p (%d); 0x%p (%d); IRQ %d",
890 		__func__,
891 		ndev->lmmio0, (u32)pci_resource_len(pci_dev, 0),
892 		ndev->lmmio1, (u32)pci_resource_len(pci_dev, 1),
893 		pci_dev->irq);
894 	if (request_irq(pci_dev->irq, netup_unidvb_isr, IRQF_SHARED,
895 			"netup_unidvb", pci_dev) < 0) {
896 		dev_err(&pci_dev->dev,
897 			"%s(): can't get IRQ %d\n", __func__, pci_dev->irq);
898 		goto irq_request_err;
899 	}
900 	ndev->dma_size = 2 * 188 *
901 		NETUP_DMA_BLOCKS_COUNT * NETUP_DMA_PACKETS_COUNT;
902 	ndev->dma_virt = dma_alloc_coherent(&pci_dev->dev,
903 		ndev->dma_size, &ndev->dma_phys, GFP_KERNEL);
904 	if (!ndev->dma_virt) {
905 		dev_err(&pci_dev->dev, "%s(): unable to allocate DMA buffer\n",
906 			__func__);
907 		goto dma_alloc_err;
908 	}
909 	netup_unidvb_dev_enable(ndev);
910 	if (spi_enable && netup_spi_init(ndev)) {
911 		dev_warn(&pci_dev->dev,
912 			"netup_unidvb: SPI flash setup failed\n");
913 		goto spi_setup_err;
914 	}
915 	if (old_firmware) {
916 		dev_err(&pci_dev->dev,
917 			"netup_unidvb: card initialization was incomplete\n");
918 		return 0;
919 	}
920 	if (netup_i2c_register(ndev)) {
921 		dev_err(&pci_dev->dev, "netup_unidvb: I2C setup failed\n");
922 		goto i2c_setup_err;
923 	}
924 	/* enable I2C IRQs */
925 	writew(NETUP_UNIDVB_IRQ_I2C0 | NETUP_UNIDVB_IRQ_I2C1,
926 		ndev->bmmio0 + REG_IMASK_SET);
927 	usleep_range(5000, 10000);
928 	if (netup_unidvb_dvb_setup(ndev)) {
929 		dev_err(&pci_dev->dev, "netup_unidvb: DVB setup failed\n");
930 		goto dvb_setup_err;
931 	}
932 	if (netup_unidvb_ci_setup(ndev, pci_dev)) {
933 		dev_err(&pci_dev->dev, "netup_unidvb: CI setup failed\n");
934 		goto ci_setup_err;
935 	}
936 	if (netup_unidvb_dma_setup(ndev)) {
937 		dev_err(&pci_dev->dev, "netup_unidvb: DMA setup failed\n");
938 		goto dma_setup_err;
939 	}
940 	dev_info(&pci_dev->dev,
941 		"netup_unidvb: device has been initialized\n");
942 	return 0;
943 dma_setup_err:
944 	netup_unidvb_ci_unregister(ndev, 0);
945 	netup_unidvb_ci_unregister(ndev, 1);
946 ci_setup_err:
947 	netup_unidvb_dvb_fini(ndev, 0);
948 	netup_unidvb_dvb_fini(ndev, 1);
949 dvb_setup_err:
950 	netup_i2c_unregister(ndev);
951 i2c_setup_err:
952 	if (ndev->spi)
953 		netup_spi_release(ndev);
954 spi_setup_err:
955 	dma_free_coherent(&pci_dev->dev, ndev->dma_size,
956 			ndev->dma_virt, ndev->dma_phys);
957 dma_alloc_err:
958 	free_irq(pci_dev->irq, pci_dev);
959 irq_request_err:
960 	iounmap(ndev->lmmio1);
961 pci_bar1_error:
962 	iounmap(ndev->lmmio0);
963 pci_bar0_error:
964 	release_mem_region(pci_resource_start(pci_dev, 0),
965 		pci_resource_len(pci_dev, 0));
966 	release_mem_region(pci_resource_start(pci_dev, 1),
967 		pci_resource_len(pci_dev, 1));
968 pci_detect_err:
969 	pci_disable_device(pci_dev);
970 pci_enable_err:
971 	pci_set_drvdata(pci_dev, NULL);
972 	destroy_workqueue(ndev->wq);
973 wq_create_err:
974 	kfree(ndev);
975 dev_alloc_err:
976 	dev_err(&pci_dev->dev,
977 		"%s(): failed to initialize device\n", __func__);
978 	return -EIO;
979 }
980 
981 static void netup_unidvb_finidev(struct pci_dev *pci_dev)
982 {
983 	struct netup_unidvb_dev *ndev = pci_get_drvdata(pci_dev);
984 
985 	dev_info(&pci_dev->dev, "%s(): trying to stop device\n", __func__);
986 	if (!ndev->old_fw) {
987 		netup_unidvb_dma_fini(ndev, 0);
988 		netup_unidvb_dma_fini(ndev, 1);
989 		netup_unidvb_ci_unregister(ndev, 0);
990 		netup_unidvb_ci_unregister(ndev, 1);
991 		netup_unidvb_dvb_fini(ndev, 0);
992 		netup_unidvb_dvb_fini(ndev, 1);
993 		netup_i2c_unregister(ndev);
994 	}
995 	if (ndev->spi)
996 		netup_spi_release(ndev);
997 	writew(0xffff, ndev->bmmio0 + REG_IMASK_CLEAR);
998 	dma_free_coherent(&ndev->pci_dev->dev, ndev->dma_size,
999 			ndev->dma_virt, ndev->dma_phys);
1000 	free_irq(pci_dev->irq, pci_dev);
1001 	iounmap(ndev->lmmio0);
1002 	iounmap(ndev->lmmio1);
1003 	release_mem_region(pci_resource_start(pci_dev, 0),
1004 		pci_resource_len(pci_dev, 0));
1005 	release_mem_region(pci_resource_start(pci_dev, 1),
1006 		pci_resource_len(pci_dev, 1));
1007 	pci_disable_device(pci_dev);
1008 	pci_set_drvdata(pci_dev, NULL);
1009 	destroy_workqueue(ndev->wq);
1010 	kfree(ndev);
1011 	dev_info(&pci_dev->dev,
1012 		"%s(): device has been successfully stopped\n", __func__);
1013 }
1014 
1015 
1016 static struct pci_device_id netup_unidvb_pci_tbl[] = {
1017 	{ PCI_DEVICE(0x1b55, 0x18f6) }, /* hw rev. 1.3 */
1018 	{ PCI_DEVICE(0x1b55, 0x18f7) }, /* hw rev. 1.4 */
1019 	{ 0, }
1020 };
1021 MODULE_DEVICE_TABLE(pci, netup_unidvb_pci_tbl);
1022 
1023 static struct pci_driver netup_unidvb_pci_driver = {
1024 	.name     = "netup_unidvb",
1025 	.id_table = netup_unidvb_pci_tbl,
1026 	.probe    = netup_unidvb_initdev,
1027 	.remove   = netup_unidvb_finidev,
1028 	.suspend  = NULL,
1029 	.resume   = NULL,
1030 };
1031 
1032 module_pci_driver(netup_unidvb_pci_driver);
1033