xref: /linux/drivers/net/wireless/mediatek/mt76/mmio.c (revision bf5802238dc181b1f7375d358af1d01cd72d1c11)
1 // SPDX-License-Identifier: ISC
2 /*
3  * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
4  */
5 
6 #include "mt76.h"
7 #include "dma.h"
8 #include "trace.h"
9 
10 static u32 mt76_mmio_rr(struct mt76_dev *dev, u32 offset)
11 {
12 	u32 val;
13 
14 	val = readl(dev->mmio.regs + offset);
15 	trace_reg_rr(dev, offset, val);
16 
17 	return val;
18 }
19 
20 static void mt76_mmio_wr(struct mt76_dev *dev, u32 offset, u32 val)
21 {
22 	trace_reg_wr(dev, offset, val);
23 	writel(val, dev->mmio.regs + offset);
24 }
25 
26 static u32 mt76_mmio_rmw(struct mt76_dev *dev, u32 offset, u32 mask, u32 val)
27 {
28 	val |= mt76_mmio_rr(dev, offset) & ~mask;
29 	mt76_mmio_wr(dev, offset, val);
30 	return val;
31 }
32 
33 static void mt76_mmio_write_copy(struct mt76_dev *dev, u32 offset,
34 				 const void *data, int len)
35 {
36 	__iowrite32_copy(dev->mmio.regs + offset, data, DIV_ROUND_UP(len, 4));
37 }
38 
39 static void mt76_mmio_read_copy(struct mt76_dev *dev, u32 offset,
40 				void *data, int len)
41 {
42 	__ioread32_copy(data, dev->mmio.regs + offset, DIV_ROUND_UP(len, 4));
43 }
44 
45 static int mt76_mmio_wr_rp(struct mt76_dev *dev, u32 base,
46 			   const struct mt76_reg_pair *data, int len)
47 {
48 	while (len > 0) {
49 		mt76_mmio_wr(dev, data->reg, data->value);
50 		data++;
51 		len--;
52 	}
53 
54 	return 0;
55 }
56 
57 static int mt76_mmio_rd_rp(struct mt76_dev *dev, u32 base,
58 			   struct mt76_reg_pair *data, int len)
59 {
60 	while (len > 0) {
61 		data->value = mt76_mmio_rr(dev, data->reg);
62 		data++;
63 		len--;
64 	}
65 
66 	return 0;
67 }
68 
69 void mt76_set_irq_mask(struct mt76_dev *dev, u32 addr,
70 		       u32 clear, u32 set)
71 {
72 	unsigned long flags;
73 
74 	spin_lock_irqsave(&dev->mmio.irq_lock, flags);
75 	dev->mmio.irqmask &= ~clear;
76 	dev->mmio.irqmask |= set;
77 	if (addr) {
78 		if (mtk_wed_device_active(&dev->mmio.wed))
79 			mtk_wed_device_irq_set_mask(&dev->mmio.wed,
80 						    dev->mmio.irqmask);
81 		else
82 			mt76_mmio_wr(dev, addr, dev->mmio.irqmask);
83 	}
84 	spin_unlock_irqrestore(&dev->mmio.irq_lock, flags);
85 }
86 EXPORT_SYMBOL_GPL(mt76_set_irq_mask);
87 
88 #ifdef CONFIG_NET_MEDIATEK_SOC_WED
89 void mt76_mmio_wed_release_rx_buf(struct mtk_wed_device *wed)
90 {
91 	struct mt76_dev *dev = container_of(wed, struct mt76_dev, mmio.wed);
92 	int i;
93 
94 	for (i = 0; i < dev->rx_token_size; i++) {
95 		struct mt76_txwi_cache *t;
96 
97 		t = mt76_rx_token_release(dev, i);
98 		if (!t || !t->ptr)
99 			continue;
100 
101 		mt76_put_page_pool_buf(t->ptr, false);
102 		t->ptr = NULL;
103 
104 		mt76_put_rxwi(dev, t);
105 	}
106 
107 	mt76_free_pending_rxwi(dev);
108 }
109 EXPORT_SYMBOL_GPL(mt76_mmio_wed_release_rx_buf);
110 
111 u32 mt76_mmio_wed_init_rx_buf(struct mtk_wed_device *wed, int size)
112 {
113 	struct mt76_dev *dev = container_of(wed, struct mt76_dev, mmio.wed);
114 	struct mtk_wed_bm_desc *desc = wed->rx_buf_ring.desc;
115 	struct mt76_queue *q = &dev->q_rx[MT_RXQ_MAIN];
116 	int i, len = SKB_WITH_OVERHEAD(q->buf_size);
117 	struct mt76_txwi_cache *t = NULL;
118 
119 	for (i = 0; i < size; i++) {
120 		enum dma_data_direction dir;
121 		dma_addr_t addr;
122 		u32 offset;
123 		int token;
124 		void *buf;
125 
126 		t = mt76_get_rxwi(dev);
127 		if (!t)
128 			goto unmap;
129 
130 		buf = mt76_get_page_pool_buf(q, &offset, q->buf_size);
131 		if (!buf)
132 			goto unmap;
133 
134 		addr = page_pool_get_dma_addr(virt_to_head_page(buf)) + offset;
135 		dir = page_pool_get_dma_dir(q->page_pool);
136 		dma_sync_single_for_device(dev->dma_dev, addr, len, dir);
137 
138 		desc->buf0 = cpu_to_le32(addr);
139 		token = mt76_rx_token_consume(dev, buf, t, addr);
140 		if (token < 0) {
141 			mt76_put_page_pool_buf(buf, false);
142 			goto unmap;
143 		}
144 
145 		token = FIELD_PREP(MT_DMA_CTL_TOKEN, token);
146 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
147 		token |= FIELD_PREP(MT_DMA_CTL_SDP0_H, addr >> 32);
148 #endif
149 		desc->token |= cpu_to_le32(token);
150 		desc++;
151 	}
152 
153 	return 0;
154 
155 unmap:
156 	if (t)
157 		mt76_put_rxwi(dev, t);
158 	mt76_mmio_wed_release_rx_buf(wed);
159 
160 	return -ENOMEM;
161 }
162 EXPORT_SYMBOL_GPL(mt76_mmio_wed_init_rx_buf);
163 
164 int mt76_mmio_wed_offload_enable(struct mtk_wed_device *wed)
165 {
166 	struct mt76_dev *dev = container_of(wed, struct mt76_dev, mmio.wed);
167 
168 	spin_lock_bh(&dev->token_lock);
169 	dev->token_size = wed->wlan.token_start;
170 	spin_unlock_bh(&dev->token_lock);
171 
172 	return !wait_event_timeout(dev->tx_wait, !dev->wed_token_count, HZ);
173 }
174 EXPORT_SYMBOL_GPL(mt76_mmio_wed_offload_enable);
175 
176 void mt76_mmio_wed_offload_disable(struct mtk_wed_device *wed)
177 {
178 	struct mt76_dev *dev = container_of(wed, struct mt76_dev, mmio.wed);
179 
180 	spin_lock_bh(&dev->token_lock);
181 	dev->token_size = dev->drv->token_size;
182 	spin_unlock_bh(&dev->token_lock);
183 }
184 EXPORT_SYMBOL_GPL(mt76_mmio_wed_offload_disable);
185 
186 void mt76_mmio_wed_reset_complete(struct mtk_wed_device *wed)
187 {
188 	struct mt76_dev *dev = container_of(wed, struct mt76_dev, mmio.wed);
189 
190 	complete(&dev->mmio.wed_reset_complete);
191 }
192 EXPORT_SYMBOL_GPL(mt76_mmio_wed_reset_complete);
193 #endif /*CONFIG_NET_MEDIATEK_SOC_WED */
194 
195 void mt76_mmio_init(struct mt76_dev *dev, void __iomem *regs)
196 {
197 	static const struct mt76_bus_ops mt76_mmio_ops = {
198 		.rr = mt76_mmio_rr,
199 		.rmw = mt76_mmio_rmw,
200 		.wr = mt76_mmio_wr,
201 		.write_copy = mt76_mmio_write_copy,
202 		.read_copy = mt76_mmio_read_copy,
203 		.wr_rp = mt76_mmio_wr_rp,
204 		.rd_rp = mt76_mmio_rd_rp,
205 		.type = MT76_BUS_MMIO,
206 	};
207 
208 	dev->bus = &mt76_mmio_ops;
209 	dev->mmio.regs = regs;
210 
211 	spin_lock_init(&dev->mmio.irq_lock);
212 }
213 EXPORT_SYMBOL_GPL(mt76_mmio_init);
214