xref: /linux/drivers/net/wireless/mediatek/mt76/mt76x02_usb_mcu.c (revision e5a52fd2b8cdb700b3c07b030e050a49ef3156b9)
1 // SPDX-License-Identifier: ISC
2 /*
3  * Copyright (C) 2018 Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>
4  */
5 
6 #include <linux/module.h>
7 #include <linux/firmware.h>
8 
9 #include "mt76x02.h"
10 #include "mt76x02_mcu.h"
11 #include "mt76x02_usb.h"
12 
13 #define MT_CMD_HDR_LEN			4
14 
15 #define MT_FCE_DMA_ADDR			0x0230
16 #define MT_FCE_DMA_LEN			0x0234
17 
18 #define MT_TX_CPU_FROM_FCE_CPU_DESC_IDX	0x09a8
19 
20 static void
21 mt76x02u_multiple_mcu_reads(struct mt76_dev *dev, u8 *data, int len)
22 {
23 	struct mt76_usb *usb = &dev->usb;
24 	u32 reg, val;
25 	int i;
26 
27 	if (usb->mcu.burst) {
28 		WARN_ON_ONCE(len / 4 != usb->mcu.rp_len);
29 
30 		reg = usb->mcu.rp[0].reg - usb->mcu.base;
31 		for (i = 0; i < usb->mcu.rp_len; i++) {
32 			val = get_unaligned_le32(data + 4 * i);
33 			usb->mcu.rp[i].reg = reg++;
34 			usb->mcu.rp[i].value = val;
35 		}
36 	} else {
37 		WARN_ON_ONCE(len / 8 != usb->mcu.rp_len);
38 
39 		for (i = 0; i < usb->mcu.rp_len; i++) {
40 			reg = get_unaligned_le32(data + 8 * i) -
41 			      usb->mcu.base;
42 			val = get_unaligned_le32(data + 8 * i + 4);
43 
44 			WARN_ON_ONCE(usb->mcu.rp[i].reg != reg);
45 			usb->mcu.rp[i].value = val;
46 		}
47 	}
48 }
49 
50 static int mt76x02u_mcu_wait_resp(struct mt76_dev *dev, u8 seq)
51 {
52 	struct mt76_usb *usb = &dev->usb;
53 	u8 *data = usb->mcu.data;
54 	int i, len, ret;
55 	u32 rxfce;
56 
57 	for (i = 0; i < 5; i++) {
58 		ret = mt76u_bulk_msg(dev, data, MCU_RESP_URB_SIZE, &len,
59 				     300, MT_EP_IN_CMD_RESP);
60 		if (ret == -ETIMEDOUT)
61 			continue;
62 		if (ret)
63 			goto out;
64 
65 		if (usb->mcu.rp)
66 			mt76x02u_multiple_mcu_reads(dev, data + 4, len - 8);
67 
68 		rxfce = get_unaligned_le32(data);
69 		if (seq == FIELD_GET(MT_RX_FCE_INFO_CMD_SEQ, rxfce) &&
70 		    FIELD_GET(MT_RX_FCE_INFO_EVT_TYPE, rxfce) == EVT_CMD_DONE)
71 			return 0;
72 
73 		dev_err(dev->dev, "error: MCU resp evt:%lx seq:%hhx-%lx\n",
74 			FIELD_GET(MT_RX_FCE_INFO_EVT_TYPE, rxfce),
75 			seq, FIELD_GET(MT_RX_FCE_INFO_CMD_SEQ, rxfce));
76 	}
77 out:
78 	dev_err(dev->dev, "error: %s failed with %d\n", __func__, ret);
79 	return ret;
80 }
81 
82 static int
83 __mt76x02u_mcu_send_msg(struct mt76_dev *dev, struct sk_buff *skb,
84 			int cmd, bool wait_resp)
85 {
86 	u8 seq = 0;
87 	u32 info;
88 	int ret;
89 
90 	if (test_bit(MT76_REMOVED, &dev->phy.state))
91 		return 0;
92 
93 	if (wait_resp) {
94 		seq = ++dev->mcu.msg_seq & 0xf;
95 		if (!seq)
96 			seq = ++dev->mcu.msg_seq & 0xf;
97 	}
98 
99 	info = FIELD_PREP(MT_MCU_MSG_CMD_SEQ, seq) |
100 	       FIELD_PREP(MT_MCU_MSG_CMD_TYPE, cmd) |
101 	       MT_MCU_MSG_TYPE_CMD;
102 	ret = mt76x02u_skb_dma_info(skb, CPU_TX_PORT, info);
103 	if (ret)
104 		return ret;
105 
106 	ret = mt76u_bulk_msg(dev, skb->data, skb->len, NULL, 500,
107 			     MT_EP_OUT_INBAND_CMD);
108 	if (ret)
109 		return ret;
110 
111 	if (wait_resp)
112 		ret = mt76x02u_mcu_wait_resp(dev, seq);
113 
114 	consume_skb(skb);
115 
116 	return ret;
117 }
118 
119 static int
120 mt76x02u_mcu_send_msg(struct mt76_dev *dev, int cmd, const void *data,
121 		      int len, bool wait_resp)
122 {
123 	struct sk_buff *skb;
124 	int err;
125 
126 	skb = mt76_mcu_msg_alloc(dev, data, len);
127 	if (!skb)
128 		return -ENOMEM;
129 
130 	mutex_lock(&dev->mcu.mutex);
131 	err = __mt76x02u_mcu_send_msg(dev, skb, cmd, wait_resp);
132 	mutex_unlock(&dev->mcu.mutex);
133 
134 	return err;
135 }
136 
137 static inline void skb_put_le32(struct sk_buff *skb, u32 val)
138 {
139 	put_unaligned_le32(val, skb_put(skb, 4));
140 }
141 
142 static int
143 mt76x02u_mcu_wr_rp(struct mt76_dev *dev, u32 base,
144 		   const struct mt76_reg_pair *data, int n)
145 {
146 	const int max_vals_per_cmd = MT_INBAND_PACKET_MAX_LEN / 8;
147 	const int CMD_RANDOM_WRITE = 12;
148 	struct sk_buff *skb;
149 	int cnt, i, ret;
150 
151 	if (!n)
152 		return 0;
153 
154 	cnt = min(max_vals_per_cmd, n);
155 
156 	skb = alloc_skb(cnt * 8 + MT_DMA_HDR_LEN + 4, GFP_KERNEL);
157 	if (!skb)
158 		return -ENOMEM;
159 	skb_reserve(skb, MT_DMA_HDR_LEN);
160 
161 	for (i = 0; i < cnt; i++) {
162 		skb_put_le32(skb, base + data[i].reg);
163 		skb_put_le32(skb, data[i].value);
164 	}
165 
166 	mutex_lock(&dev->mcu.mutex);
167 	ret = __mt76x02u_mcu_send_msg(dev, skb, CMD_RANDOM_WRITE, cnt == n);
168 	mutex_unlock(&dev->mcu.mutex);
169 	if (ret)
170 		return ret;
171 
172 	return mt76x02u_mcu_wr_rp(dev, base, data + cnt, n - cnt);
173 }
174 
175 static int
176 mt76x02u_mcu_rd_rp(struct mt76_dev *dev, u32 base,
177 		   struct mt76_reg_pair *data, int n)
178 {
179 	const int CMD_RANDOM_READ = 10;
180 	const int max_vals_per_cmd = MT_INBAND_PACKET_MAX_LEN / 8;
181 	struct mt76_usb *usb = &dev->usb;
182 	struct sk_buff *skb;
183 	int cnt, i, ret;
184 
185 	if (!n)
186 		return 0;
187 
188 	cnt = min(max_vals_per_cmd, n);
189 	if (cnt != n)
190 		return -EINVAL;
191 
192 	skb = alloc_skb(cnt * 8 + MT_DMA_HDR_LEN + 4, GFP_KERNEL);
193 	if (!skb)
194 		return -ENOMEM;
195 	skb_reserve(skb, MT_DMA_HDR_LEN);
196 
197 	for (i = 0; i < cnt; i++) {
198 		skb_put_le32(skb, base + data[i].reg);
199 		skb_put_le32(skb, data[i].value);
200 	}
201 
202 	mutex_lock(&dev->mcu.mutex);
203 
204 	usb->mcu.rp = data;
205 	usb->mcu.rp_len = n;
206 	usb->mcu.base = base;
207 	usb->mcu.burst = false;
208 
209 	ret = __mt76x02u_mcu_send_msg(dev, skb, CMD_RANDOM_READ, true);
210 
211 	usb->mcu.rp = NULL;
212 
213 	mutex_unlock(&dev->mcu.mutex);
214 
215 	return ret;
216 }
217 
218 void mt76x02u_mcu_fw_reset(struct mt76x02_dev *dev)
219 {
220 	mt76u_vendor_request(&dev->mt76, MT_VEND_DEV_MODE,
221 			     USB_DIR_OUT | USB_TYPE_VENDOR,
222 			     0x1, 0, NULL, 0);
223 }
224 EXPORT_SYMBOL_GPL(mt76x02u_mcu_fw_reset);
225 
226 static int
227 __mt76x02u_mcu_fw_send_data(struct mt76x02_dev *dev, u8 *data,
228 			    const void *fw_data, int len, u32 dst_addr)
229 {
230 	__le32 info;
231 	u32 val;
232 	int err, data_len;
233 
234 	info = cpu_to_le32(FIELD_PREP(MT_MCU_MSG_PORT, CPU_TX_PORT) |
235 			   FIELD_PREP(MT_MCU_MSG_LEN, len) |
236 			   MT_MCU_MSG_TYPE_CMD);
237 
238 	memcpy(data, &info, sizeof(info));
239 	memcpy(data + sizeof(info), fw_data, len);
240 	memset(data + sizeof(info) + len, 0, 4);
241 
242 	mt76u_single_wr(&dev->mt76, MT_VEND_WRITE_FCE,
243 			MT_FCE_DMA_ADDR, dst_addr);
244 	len = roundup(len, 4);
245 	mt76u_single_wr(&dev->mt76, MT_VEND_WRITE_FCE,
246 			MT_FCE_DMA_LEN, len << 16);
247 
248 	data_len = MT_CMD_HDR_LEN + len + sizeof(info);
249 
250 	err = mt76u_bulk_msg(&dev->mt76, data, data_len, NULL, 1000,
251 			     MT_EP_OUT_INBAND_CMD);
252 	if (err) {
253 		dev_err(dev->mt76.dev, "firmware upload failed: %d\n", err);
254 		return err;
255 	}
256 
257 	val = mt76_rr(dev, MT_TX_CPU_FROM_FCE_CPU_DESC_IDX);
258 	val++;
259 	mt76_wr(dev, MT_TX_CPU_FROM_FCE_CPU_DESC_IDX, val);
260 
261 	return 0;
262 }
263 
264 int mt76x02u_mcu_fw_send_data(struct mt76x02_dev *dev, const void *data,
265 			      int data_len, u32 max_payload, u32 offset)
266 {
267 	int len, err = 0, pos = 0, max_len = max_payload - 8;
268 	u8 *buf;
269 
270 	buf = kmalloc(max_payload, GFP_KERNEL);
271 	if (!buf)
272 		return -ENOMEM;
273 
274 	while (data_len > 0) {
275 		len = min_t(int, data_len, max_len);
276 		err = __mt76x02u_mcu_fw_send_data(dev, buf, data + pos,
277 						  len, offset + pos);
278 		if (err < 0)
279 			break;
280 
281 		data_len -= len;
282 		pos += len;
283 		usleep_range(5000, 10000);
284 	}
285 	kfree(buf);
286 
287 	return err;
288 }
289 EXPORT_SYMBOL_GPL(mt76x02u_mcu_fw_send_data);
290 
291 void mt76x02u_init_mcu(struct mt76_dev *dev)
292 {
293 	static const struct mt76_mcu_ops mt76x02u_mcu_ops = {
294 		.headroom = MT_CMD_HDR_LEN,
295 		.tailroom = 8,
296 		.mcu_send_msg = mt76x02u_mcu_send_msg,
297 		.mcu_wr_rp = mt76x02u_mcu_wr_rp,
298 		.mcu_rd_rp = mt76x02u_mcu_rd_rp,
299 	};
300 
301 	dev->mcu_ops = &mt76x02u_mcu_ops;
302 }
303 EXPORT_SYMBOL_GPL(mt76x02u_init_mcu);
304 
305 MODULE_AUTHOR("Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>");
306 MODULE_LICENSE("Dual BSD/GPL");
307