xref: /linux/drivers/net/wireless/mediatek/mt76/mt7921/mcu.c (revision eeb9f5c2dcec90009d7cf12e780e7f9631993fc5)
1 // SPDX-License-Identifier: ISC
2 /* Copyright (C) 2020 MediaTek Inc. */
3 
4 #include <linux/fs.h>
5 #include <linux/firmware.h>
6 #include "mt7921.h"
7 #include "mcu.h"
8 #include "../mt76_connac2_mac.h"
9 #include "../mt792x_trace.h"
10 
11 #define MT_STA_BFER			BIT(0)
12 #define MT_STA_BFEE			BIT(1)
13 
14 static bool mt7921_disable_clc;
15 module_param_named(disable_clc, mt7921_disable_clc, bool, 0644);
16 MODULE_PARM_DESC(disable_clc, "disable CLC support");
17 
18 int mt7921_mcu_parse_response(struct mt76_dev *mdev, int cmd,
19 			      struct sk_buff *skb, int seq)
20 {
21 	int mcu_cmd = FIELD_GET(__MCU_CMD_FIELD_ID, cmd);
22 	struct mt76_connac2_mcu_rxd *rxd;
23 	int ret = 0;
24 
25 	if (!skb) {
26 		dev_err(mdev->dev, "Message %08x (seq %d) timeout\n",
27 			cmd, seq);
28 		mt792x_reset(mdev);
29 
30 		return -ETIMEDOUT;
31 	}
32 
33 	rxd = (struct mt76_connac2_mcu_rxd *)skb->data;
34 	if (seq != rxd->seq)
35 		return -EAGAIN;
36 
37 	if (cmd == MCU_CMD(PATCH_SEM_CONTROL) ||
38 	    cmd == MCU_CMD(PATCH_FINISH_REQ)) {
39 		skb_pull(skb, sizeof(*rxd) - 4);
40 		ret = *skb->data;
41 	} else if (cmd == MCU_EXT_CMD(THERMAL_CTRL)) {
42 		skb_pull(skb, sizeof(*rxd) + 4);
43 		ret = le32_to_cpu(*(__le32 *)skb->data);
44 	} else if (cmd == MCU_UNI_CMD(DEV_INFO_UPDATE) ||
45 		   cmd == MCU_UNI_CMD(BSS_INFO_UPDATE) ||
46 		   cmd == MCU_UNI_CMD(STA_REC_UPDATE) ||
47 		   cmd == MCU_UNI_CMD(HIF_CTRL) ||
48 		   cmd == MCU_UNI_CMD(OFFLOAD) ||
49 		   cmd == MCU_UNI_CMD(SUSPEND)) {
50 		struct mt76_connac_mcu_uni_event *event;
51 
52 		skb_pull(skb, sizeof(*rxd));
53 		event = (struct mt76_connac_mcu_uni_event *)skb->data;
54 		ret = le32_to_cpu(event->status);
55 		/* skip invalid event */
56 		if (mcu_cmd != event->cid)
57 			ret = -EAGAIN;
58 	} else if (cmd == MCU_CE_QUERY(REG_READ)) {
59 		struct mt76_connac_mcu_reg_event *event;
60 
61 		skb_pull(skb, sizeof(*rxd));
62 		event = (struct mt76_connac_mcu_reg_event *)skb->data;
63 		ret = (int)le32_to_cpu(event->val);
64 	} else {
65 		skb_pull(skb, sizeof(struct mt76_connac2_mcu_rxd));
66 	}
67 
68 	return ret;
69 }
70 EXPORT_SYMBOL_GPL(mt7921_mcu_parse_response);
71 
72 static int mt7921_mcu_read_eeprom(struct mt792x_dev *dev, u32 offset, u8 *val)
73 {
74 	struct mt7921_mcu_eeprom_info *res, req = {
75 		.addr = cpu_to_le32(round_down(offset,
76 				    MT7921_EEPROM_BLOCK_SIZE)),
77 	};
78 	struct sk_buff *skb;
79 	int ret;
80 
81 	ret = mt76_mcu_send_and_get_msg(&dev->mt76, MCU_EXT_QUERY(EFUSE_ACCESS),
82 					&req, sizeof(req), true, &skb);
83 	if (ret)
84 		return ret;
85 
86 	res = (struct mt7921_mcu_eeprom_info *)skb->data;
87 	*val = res->data[offset % MT7921_EEPROM_BLOCK_SIZE];
88 	dev_kfree_skb(skb);
89 
90 	return 0;
91 }
92 
93 #ifdef CONFIG_PM
94 
95 static int
96 mt7921_mcu_set_ipv6_ns_filter(struct mt76_dev *dev,
97 			      struct ieee80211_vif *vif, bool suspend)
98 {
99 	struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv;
100 	struct {
101 		struct {
102 			u8 bss_idx;
103 			u8 pad[3];
104 		} __packed hdr;
105 		struct mt76_connac_arpns_tlv arpns;
106 	} req = {
107 		.hdr = {
108 			.bss_idx = mvif->mt76.idx,
109 		},
110 		.arpns = {
111 			.tag = cpu_to_le16(UNI_OFFLOAD_OFFLOAD_ND),
112 			.len = cpu_to_le16(sizeof(struct mt76_connac_arpns_tlv)),
113 			.mode = suspend,
114 		},
115 	};
116 
117 	return mt76_mcu_send_msg(dev, MCU_UNI_CMD_OFFLOAD, &req, sizeof(req),
118 				 true);
119 }
120 
121 void mt7921_mcu_set_suspend_iter(void *priv, u8 *mac, struct ieee80211_vif *vif)
122 {
123 	if (IS_ENABLED(CONFIG_IPV6)) {
124 		struct mt76_phy *phy = priv;
125 
126 		mt7921_mcu_set_ipv6_ns_filter(phy->dev, vif,
127 					      !test_bit(MT76_STATE_RUNNING,
128 					      &phy->state));
129 	}
130 
131 	mt76_connac_mcu_set_suspend_iter(priv, mac, vif);
132 }
133 
134 #endif /* CONFIG_PM */
135 
136 static void
137 mt7921_mcu_uni_roc_event(struct mt792x_dev *dev, struct sk_buff *skb)
138 {
139 	struct mt7921_roc_grant_tlv *grant;
140 	struct mt76_connac2_mcu_rxd *rxd;
141 	int duration;
142 
143 	rxd = (struct mt76_connac2_mcu_rxd *)skb->data;
144 	grant = (struct mt7921_roc_grant_tlv *)(rxd->tlv + 4);
145 
146 	/* should never happen */
147 	WARN_ON_ONCE((le16_to_cpu(grant->tag) != UNI_EVENT_ROC_GRANT));
148 
149 	if (grant->reqtype == MT7921_ROC_REQ_ROC)
150 		ieee80211_ready_on_channel(dev->mt76.phy.hw);
151 
152 	dev->phy.roc_grant = true;
153 	wake_up(&dev->phy.roc_wait);
154 	duration = le32_to_cpu(grant->max_interval);
155 	mod_timer(&dev->phy.roc_timer,
156 		  jiffies + msecs_to_jiffies(duration));
157 }
158 
159 static void
160 mt7921_mcu_scan_event(struct mt792x_dev *dev, struct sk_buff *skb)
161 {
162 	struct mt76_phy *mphy = &dev->mt76.phy;
163 	struct mt792x_phy *phy = mphy->priv;
164 
165 	spin_lock_bh(&dev->mt76.lock);
166 	__skb_queue_tail(&phy->scan_event_list, skb);
167 	spin_unlock_bh(&dev->mt76.lock);
168 
169 	ieee80211_queue_delayed_work(mphy->hw, &phy->scan_work,
170 				     MT792x_HW_SCAN_TIMEOUT);
171 }
172 
173 static void
174 mt7921_mcu_connection_loss_iter(void *priv, u8 *mac,
175 				struct ieee80211_vif *vif)
176 {
177 	struct mt76_vif *mvif = (struct mt76_vif *)vif->drv_priv;
178 	struct mt76_connac_beacon_loss_event *event = priv;
179 
180 	if (mvif->idx != event->bss_idx)
181 		return;
182 
183 	if (!(vif->driver_flags & IEEE80211_VIF_BEACON_FILTER) ||
184 	    vif->type != NL80211_IFTYPE_STATION)
185 		return;
186 
187 	ieee80211_connection_loss(vif);
188 }
189 
190 static void
191 mt7921_mcu_connection_loss_event(struct mt792x_dev *dev, struct sk_buff *skb)
192 {
193 	struct mt76_connac_beacon_loss_event *event;
194 	struct mt76_phy *mphy = &dev->mt76.phy;
195 
196 	skb_pull(skb, sizeof(struct mt76_connac2_mcu_rxd));
197 	event = (struct mt76_connac_beacon_loss_event *)skb->data;
198 
199 	ieee80211_iterate_active_interfaces_atomic(mphy->hw,
200 					IEEE80211_IFACE_ITER_RESUME_ALL,
201 					mt7921_mcu_connection_loss_iter, event);
202 }
203 
204 static void
205 mt7921_mcu_debug_msg_event(struct mt792x_dev *dev, struct sk_buff *skb)
206 {
207 	struct mt7921_debug_msg {
208 		__le16 id;
209 		u8 type;
210 		u8 flag;
211 		__le32 value;
212 		__le16 len;
213 		u8 content[512];
214 	} __packed * msg;
215 
216 	skb_pull(skb, sizeof(struct mt76_connac2_mcu_rxd));
217 	msg = (struct mt7921_debug_msg *)skb->data;
218 
219 	if (msg->type == 3) { /* fw log */
220 		u16 len = min_t(u16, le16_to_cpu(msg->len), 512);
221 		int i;
222 
223 		for (i = 0 ; i < len; i++) {
224 			if (!msg->content[i])
225 				msg->content[i] = ' ';
226 		}
227 		wiphy_info(mt76_hw(dev)->wiphy, "%.*s", len, msg->content);
228 	}
229 }
230 
231 static void
232 mt7921_mcu_low_power_event(struct mt792x_dev *dev, struct sk_buff *skb)
233 {
234 	struct mt7921_mcu_lp_event {
235 		u8 state;
236 		u8 reserved[3];
237 	} __packed * event;
238 
239 	skb_pull(skb, sizeof(struct mt76_connac2_mcu_rxd));
240 	event = (struct mt7921_mcu_lp_event *)skb->data;
241 
242 	trace_lp_event(dev, event->state);
243 }
244 
245 static void
246 mt7921_mcu_tx_done_event(struct mt792x_dev *dev, struct sk_buff *skb)
247 {
248 	struct mt7921_mcu_tx_done_event *event;
249 
250 	skb_pull(skb, sizeof(struct mt76_connac2_mcu_rxd));
251 	event = (struct mt7921_mcu_tx_done_event *)skb->data;
252 
253 	mt7921_mac_add_txs(dev, event->txs);
254 }
255 
256 static void
257 mt7921_mcu_rx_unsolicited_event(struct mt792x_dev *dev, struct sk_buff *skb)
258 {
259 	struct mt76_connac2_mcu_rxd *rxd;
260 
261 	rxd = (struct mt76_connac2_mcu_rxd *)skb->data;
262 	switch (rxd->eid) {
263 	case MCU_EVENT_BSS_BEACON_LOSS:
264 		mt7921_mcu_connection_loss_event(dev, skb);
265 		break;
266 	case MCU_EVENT_SCHED_SCAN_DONE:
267 	case MCU_EVENT_SCAN_DONE:
268 		mt7921_mcu_scan_event(dev, skb);
269 		return;
270 	case MCU_EVENT_DBG_MSG:
271 		mt7921_mcu_debug_msg_event(dev, skb);
272 		break;
273 	case MCU_EVENT_COREDUMP:
274 		dev->fw_assert = true;
275 		mt76_connac_mcu_coredump_event(&dev->mt76, skb,
276 					       &dev->coredump);
277 		return;
278 	case MCU_EVENT_LP_INFO:
279 		mt7921_mcu_low_power_event(dev, skb);
280 		break;
281 	case MCU_EVENT_TX_DONE:
282 		mt7921_mcu_tx_done_event(dev, skb);
283 		break;
284 	default:
285 		break;
286 	}
287 	dev_kfree_skb(skb);
288 }
289 
290 static void
291 mt7921_mcu_uni_rx_unsolicited_event(struct mt792x_dev *dev,
292 				    struct sk_buff *skb)
293 {
294 	struct mt76_connac2_mcu_rxd *rxd;
295 
296 	rxd = (struct mt76_connac2_mcu_rxd *)skb->data;
297 
298 	switch (rxd->eid) {
299 	case MCU_UNI_EVENT_ROC:
300 		mt7921_mcu_uni_roc_event(dev, skb);
301 		break;
302 	default:
303 		break;
304 	}
305 	dev_kfree_skb(skb);
306 }
307 
308 void mt7921_mcu_rx_event(struct mt792x_dev *dev, struct sk_buff *skb)
309 {
310 	struct mt76_connac2_mcu_rxd *rxd;
311 
312 	if (skb_linearize(skb))
313 		return;
314 
315 	rxd = (struct mt76_connac2_mcu_rxd *)skb->data;
316 
317 	if (rxd->option & MCU_UNI_CMD_UNSOLICITED_EVENT) {
318 		mt7921_mcu_uni_rx_unsolicited_event(dev, skb);
319 		return;
320 	}
321 
322 	if (rxd->eid == 0x6) {
323 		mt76_mcu_rx_event(&dev->mt76, skb);
324 		return;
325 	}
326 
327 	if (rxd->ext_eid == MCU_EXT_EVENT_RATE_REPORT ||
328 	    rxd->eid == MCU_EVENT_BSS_BEACON_LOSS ||
329 	    rxd->eid == MCU_EVENT_SCHED_SCAN_DONE ||
330 	    rxd->eid == MCU_EVENT_SCAN_DONE ||
331 	    rxd->eid == MCU_EVENT_TX_DONE ||
332 	    rxd->eid == MCU_EVENT_DBG_MSG ||
333 	    rxd->eid == MCU_EVENT_COREDUMP ||
334 	    rxd->eid == MCU_EVENT_LP_INFO ||
335 	    !rxd->seq)
336 		mt7921_mcu_rx_unsolicited_event(dev, skb);
337 	else
338 		mt76_mcu_rx_event(&dev->mt76, skb);
339 }
340 
341 /** starec & wtbl **/
342 int mt7921_mcu_uni_tx_ba(struct mt792x_dev *dev,
343 			 struct ieee80211_ampdu_params *params,
344 			 bool enable)
345 {
346 	struct mt792x_sta *msta = (struct mt792x_sta *)params->sta->drv_priv;
347 
348 	if (enable && !params->amsdu)
349 		msta->wcid.amsdu = false;
350 
351 	return mt76_connac_mcu_sta_ba(&dev->mt76, &msta->vif->mt76, params,
352 				      MCU_UNI_CMD(STA_REC_UPDATE),
353 				      enable, true);
354 }
355 
356 int mt7921_mcu_uni_rx_ba(struct mt792x_dev *dev,
357 			 struct ieee80211_ampdu_params *params,
358 			 bool enable)
359 {
360 	struct mt792x_sta *msta = (struct mt792x_sta *)params->sta->drv_priv;
361 
362 	return mt76_connac_mcu_sta_ba(&dev->mt76, &msta->vif->mt76, params,
363 				      MCU_UNI_CMD(STA_REC_UPDATE),
364 				      enable, false);
365 }
366 
367 static int mt7921_load_clc(struct mt792x_dev *dev, const char *fw_name)
368 {
369 	const struct mt76_connac2_fw_trailer *hdr;
370 	const struct mt76_connac2_fw_region *region;
371 	const struct mt7921_clc *clc;
372 	struct mt76_dev *mdev = &dev->mt76;
373 	struct mt792x_phy *phy = &dev->phy;
374 	const struct firmware *fw;
375 	int ret, i, len, offset = 0;
376 	u8 *clc_base = NULL, hw_encap = 0;
377 
378 	dev->phy.clc_chan_conf = 0xff;
379 	if (mt7921_disable_clc ||
380 	    mt76_is_usb(&dev->mt76))
381 		return 0;
382 
383 	if (mt76_is_mmio(&dev->mt76)) {
384 		ret = mt7921_mcu_read_eeprom(dev, MT_EE_HW_TYPE, &hw_encap);
385 		if (ret)
386 			return ret;
387 		hw_encap = u8_get_bits(hw_encap, MT_EE_HW_TYPE_ENCAP);
388 	}
389 
390 	ret = request_firmware(&fw, fw_name, mdev->dev);
391 	if (ret)
392 		return ret;
393 
394 	if (!fw || !fw->data || fw->size < sizeof(*hdr)) {
395 		dev_err(mdev->dev, "Invalid firmware\n");
396 		ret = -EINVAL;
397 		goto out;
398 	}
399 
400 	hdr = (const void *)(fw->data + fw->size - sizeof(*hdr));
401 	for (i = 0; i < hdr->n_region; i++) {
402 		region = (const void *)((const u8 *)hdr -
403 					(hdr->n_region - i) * sizeof(*region));
404 		len = le32_to_cpu(region->len);
405 
406 		/* check if we have valid buffer size */
407 		if (offset + len > fw->size) {
408 			dev_err(mdev->dev, "Invalid firmware region\n");
409 			ret = -EINVAL;
410 			goto out;
411 		}
412 
413 		if ((region->feature_set & FW_FEATURE_NON_DL) &&
414 		    region->type == FW_TYPE_CLC) {
415 			clc_base = (u8 *)(fw->data + offset);
416 			break;
417 		}
418 		offset += len;
419 	}
420 
421 	if (!clc_base)
422 		goto out;
423 
424 	for (offset = 0; offset < len; offset += le32_to_cpu(clc->len)) {
425 		clc = (const struct mt7921_clc *)(clc_base + offset);
426 
427 		/* do not init buf again if chip reset triggered */
428 		if (phy->clc[clc->idx])
429 			continue;
430 
431 		/* header content sanity */
432 		if (clc->idx == MT7921_CLC_POWER &&
433 		    u8_get_bits(clc->type, MT_EE_HW_TYPE_ENCAP) != hw_encap)
434 			continue;
435 
436 		phy->clc[clc->idx] = devm_kmemdup(mdev->dev, clc,
437 						  le32_to_cpu(clc->len),
438 						  GFP_KERNEL);
439 
440 		if (!phy->clc[clc->idx]) {
441 			ret = -ENOMEM;
442 			goto out;
443 		}
444 	}
445 	ret = mt7921_mcu_set_clc(dev, "00", ENVIRON_INDOOR);
446 out:
447 	release_firmware(fw);
448 
449 	return ret;
450 }
451 
452 static void mt7921_mcu_parse_tx_resource(struct mt76_dev *dev,
453 					 struct sk_buff *skb)
454 {
455 	struct mt76_sdio *sdio = &dev->sdio;
456 	struct mt7921_tx_resource {
457 		__le32 version;
458 		__le32 pse_data_quota;
459 		__le32 pse_mcu_quota;
460 		__le32 ple_data_quota;
461 		__le32 ple_mcu_quota;
462 		__le16 pse_page_size;
463 		__le16 ple_page_size;
464 		u8 pp_padding;
465 		u8 pad[3];
466 	} __packed * tx_res;
467 
468 	tx_res = (struct mt7921_tx_resource *)skb->data;
469 	sdio->sched.pse_data_quota = le32_to_cpu(tx_res->pse_data_quota);
470 	sdio->sched.pse_mcu_quota = le32_to_cpu(tx_res->pse_mcu_quota);
471 	sdio->sched.ple_data_quota = le32_to_cpu(tx_res->ple_data_quota);
472 	sdio->sched.pse_page_size = le16_to_cpu(tx_res->pse_page_size);
473 	sdio->sched.deficit = tx_res->pp_padding;
474 }
475 
476 static void mt7921_mcu_parse_phy_cap(struct mt76_dev *dev,
477 				     struct sk_buff *skb)
478 {
479 	struct mt7921_phy_cap {
480 		u8 ht;
481 		u8 vht;
482 		u8 _5g;
483 		u8 max_bw;
484 		u8 nss;
485 		u8 dbdc;
486 		u8 tx_ldpc;
487 		u8 rx_ldpc;
488 		u8 tx_stbc;
489 		u8 rx_stbc;
490 		u8 hw_path;
491 		u8 he;
492 	} __packed * cap;
493 
494 	enum {
495 		WF0_24G,
496 		WF0_5G
497 	};
498 
499 	cap = (struct mt7921_phy_cap *)skb->data;
500 
501 	dev->phy.antenna_mask = BIT(cap->nss) - 1;
502 	dev->phy.chainmask = dev->phy.antenna_mask;
503 	dev->phy.cap.has_2ghz = cap->hw_path & BIT(WF0_24G);
504 	dev->phy.cap.has_5ghz = cap->hw_path & BIT(WF0_5G);
505 }
506 
507 static int mt7921_mcu_get_nic_capability(struct mt792x_phy *mphy)
508 {
509 	struct mt76_connac_cap_hdr {
510 		__le16 n_element;
511 		u8 rsv[2];
512 	} __packed * hdr;
513 	struct sk_buff *skb;
514 	struct mt76_phy *phy = mphy->mt76;
515 	int ret, i;
516 
517 	ret = mt76_mcu_send_and_get_msg(phy->dev, MCU_CE_CMD(GET_NIC_CAPAB),
518 					NULL, 0, true, &skb);
519 	if (ret)
520 		return ret;
521 
522 	hdr = (struct mt76_connac_cap_hdr *)skb->data;
523 	if (skb->len < sizeof(*hdr)) {
524 		ret = -EINVAL;
525 		goto out;
526 	}
527 
528 	skb_pull(skb, sizeof(*hdr));
529 
530 	for (i = 0; i < le16_to_cpu(hdr->n_element); i++) {
531 		struct tlv_hdr {
532 			__le32 type;
533 			__le32 len;
534 		} __packed * tlv = (struct tlv_hdr *)skb->data;
535 		int len;
536 
537 		if (skb->len < sizeof(*tlv))
538 			break;
539 
540 		skb_pull(skb, sizeof(*tlv));
541 
542 		len = le32_to_cpu(tlv->len);
543 		if (skb->len < len)
544 			break;
545 
546 		switch (le32_to_cpu(tlv->type)) {
547 		case MT_NIC_CAP_6G:
548 			phy->cap.has_6ghz = skb->data[0];
549 			break;
550 		case MT_NIC_CAP_MAC_ADDR:
551 			memcpy(phy->macaddr, (void *)skb->data, ETH_ALEN);
552 			break;
553 		case MT_NIC_CAP_PHY:
554 			mt7921_mcu_parse_phy_cap(phy->dev, skb);
555 			break;
556 		case MT_NIC_CAP_TX_RESOURCE:
557 			if (mt76_is_sdio(phy->dev))
558 				mt7921_mcu_parse_tx_resource(phy->dev,
559 							     skb);
560 			break;
561 		case MT_NIC_CAP_CHIP_CAP:
562 			memcpy(&mphy->chip_cap, (void *)skb->data, sizeof(u64));
563 			break;
564 		default:
565 			break;
566 		}
567 		skb_pull(skb, len);
568 	}
569 out:
570 	dev_kfree_skb(skb);
571 
572 	return ret;
573 }
574 
575 int mt7921_mcu_fw_log_2_host(struct mt792x_dev *dev, u8 ctrl)
576 {
577 	struct {
578 		u8 ctrl_val;
579 		u8 pad[3];
580 	} data = {
581 		.ctrl_val = ctrl
582 	};
583 
584 	return mt76_mcu_send_msg(&dev->mt76, MCU_CE_CMD(FWLOG_2_HOST),
585 				 &data, sizeof(data), false);
586 }
587 
588 int mt7921_run_firmware(struct mt792x_dev *dev)
589 {
590 	int err;
591 
592 	err = mt792x_load_firmware(dev);
593 	if (err)
594 		return err;
595 
596 	err = mt7921_mcu_get_nic_capability(&dev->phy);
597 	if (err)
598 		return err;
599 
600 	set_bit(MT76_STATE_MCU_RUNNING, &dev->mphy.state);
601 	err = mt7921_load_clc(dev, mt792x_ram_name(dev));
602 	if (err)
603 		return err;
604 
605 	return mt7921_mcu_fw_log_2_host(dev, 1);
606 }
607 EXPORT_SYMBOL_GPL(mt7921_run_firmware);
608 
609 int mt7921_mcu_set_tx(struct mt792x_dev *dev, struct ieee80211_vif *vif)
610 {
611 	struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv;
612 	struct edca {
613 		__le16 cw_min;
614 		__le16 cw_max;
615 		__le16 txop;
616 		__le16 aifs;
617 		u8 guardtime;
618 		u8 acm;
619 	} __packed;
620 	struct mt7921_mcu_tx {
621 		struct edca edca[IEEE80211_NUM_ACS];
622 		u8 bss_idx;
623 		u8 qos;
624 		u8 wmm_idx;
625 		u8 pad;
626 	} __packed req = {
627 		.bss_idx = mvif->mt76.idx,
628 		.qos = vif->bss_conf.qos,
629 		.wmm_idx = mvif->mt76.wmm_idx,
630 	};
631 	struct mu_edca {
632 		u8 cw_min;
633 		u8 cw_max;
634 		u8 aifsn;
635 		u8 acm;
636 		u8 timer;
637 		u8 padding[3];
638 	};
639 	struct mt7921_mcu_mu_tx {
640 		u8 ver;
641 		u8 pad0;
642 		__le16 len;
643 		u8 bss_idx;
644 		u8 qos;
645 		u8 wmm_idx;
646 		u8 pad1;
647 		struct mu_edca edca[IEEE80211_NUM_ACS];
648 		u8 pad3[32];
649 	} __packed req_mu = {
650 		.bss_idx = mvif->mt76.idx,
651 		.qos = vif->bss_conf.qos,
652 		.wmm_idx = mvif->mt76.wmm_idx,
653 	};
654 	static const int to_aci[] = { 1, 0, 2, 3 };
655 	int ac, ret;
656 
657 	for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) {
658 		struct ieee80211_tx_queue_params *q = &mvif->queue_params[ac];
659 		struct edca *e = &req.edca[to_aci[ac]];
660 
661 		e->aifs = cpu_to_le16(q->aifs);
662 		e->txop = cpu_to_le16(q->txop);
663 
664 		if (q->cw_min)
665 			e->cw_min = cpu_to_le16(q->cw_min);
666 		else
667 			e->cw_min = cpu_to_le16(5);
668 
669 		if (q->cw_max)
670 			e->cw_max = cpu_to_le16(q->cw_max);
671 		else
672 			e->cw_max = cpu_to_le16(10);
673 	}
674 
675 	ret = mt76_mcu_send_msg(&dev->mt76, MCU_CE_CMD(SET_EDCA_PARMS), &req,
676 				sizeof(req), false);
677 	if (ret)
678 		return ret;
679 
680 	if (!vif->bss_conf.he_support)
681 		return 0;
682 
683 	for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) {
684 		struct ieee80211_he_mu_edca_param_ac_rec *q;
685 		struct mu_edca *e;
686 
687 		if (!mvif->queue_params[ac].mu_edca)
688 			break;
689 
690 		q = &mvif->queue_params[ac].mu_edca_param_rec;
691 		e = &(req_mu.edca[to_aci[ac]]);
692 
693 		e->cw_min = q->ecw_min_max & 0xf;
694 		e->cw_max = (q->ecw_min_max & 0xf0) >> 4;
695 		e->aifsn = q->aifsn;
696 		e->timer = q->mu_edca_timer;
697 	}
698 
699 	return mt76_mcu_send_msg(&dev->mt76, MCU_CE_CMD(SET_MU_EDCA_PARMS),
700 				 &req_mu, sizeof(req_mu), false);
701 }
702 
703 int mt7921_mcu_set_roc(struct mt792x_phy *phy, struct mt792x_vif *vif,
704 		       struct ieee80211_channel *chan, int duration,
705 		       enum mt7921_roc_req type, u8 token_id)
706 {
707 	int center_ch = ieee80211_frequency_to_channel(chan->center_freq);
708 	struct mt792x_dev *dev = phy->dev;
709 	struct {
710 		struct {
711 			u8 rsv[4];
712 		} __packed hdr;
713 		struct roc_acquire_tlv {
714 			__le16 tag;
715 			__le16 len;
716 			u8 bss_idx;
717 			u8 tokenid;
718 			u8 control_channel;
719 			u8 sco;
720 			u8 band;
721 			u8 bw;
722 			u8 center_chan;
723 			u8 center_chan2;
724 			u8 bw_from_ap;
725 			u8 center_chan_from_ap;
726 			u8 center_chan2_from_ap;
727 			u8 reqtype;
728 			__le32 maxinterval;
729 			u8 dbdcband;
730 			u8 rsv[3];
731 		} __packed roc;
732 	} __packed req = {
733 		.roc = {
734 			.tag = cpu_to_le16(UNI_ROC_ACQUIRE),
735 			.len = cpu_to_le16(sizeof(struct roc_acquire_tlv)),
736 			.tokenid = token_id,
737 			.reqtype = type,
738 			.maxinterval = cpu_to_le32(duration),
739 			.bss_idx = vif->mt76.idx,
740 			.control_channel = chan->hw_value,
741 			.bw = CMD_CBW_20MHZ,
742 			.bw_from_ap = CMD_CBW_20MHZ,
743 			.center_chan = center_ch,
744 			.center_chan_from_ap = center_ch,
745 			.dbdcband = 0xff, /* auto */
746 		},
747 	};
748 
749 	if (chan->hw_value < center_ch)
750 		req.roc.sco = 1; /* SCA */
751 	else if (chan->hw_value > center_ch)
752 		req.roc.sco = 3; /* SCB */
753 
754 	switch (chan->band) {
755 	case NL80211_BAND_6GHZ:
756 		req.roc.band = 3;
757 		break;
758 	case NL80211_BAND_5GHZ:
759 		req.roc.band = 2;
760 		break;
761 	default:
762 		req.roc.band = 1;
763 		break;
764 	}
765 
766 	return mt76_mcu_send_msg(&dev->mt76, MCU_UNI_CMD(ROC),
767 				 &req, sizeof(req), false);
768 }
769 
770 int mt7921_mcu_abort_roc(struct mt792x_phy *phy, struct mt792x_vif *vif,
771 			 u8 token_id)
772 {
773 	struct mt792x_dev *dev = phy->dev;
774 	struct {
775 		struct {
776 			u8 rsv[4];
777 		} __packed hdr;
778 		struct roc_abort_tlv {
779 			__le16 tag;
780 			__le16 len;
781 			u8 bss_idx;
782 			u8 tokenid;
783 			u8 dbdcband;
784 			u8 rsv[5];
785 		} __packed abort;
786 	} __packed req = {
787 		.abort = {
788 			.tag = cpu_to_le16(UNI_ROC_ABORT),
789 			.len = cpu_to_le16(sizeof(struct roc_abort_tlv)),
790 			.tokenid = token_id,
791 			.bss_idx = vif->mt76.idx,
792 			.dbdcband = 0xff, /* auto*/
793 		},
794 	};
795 
796 	return mt76_mcu_send_msg(&dev->mt76, MCU_UNI_CMD(ROC),
797 				 &req, sizeof(req), false);
798 }
799 
800 int mt7921_mcu_set_chan_info(struct mt792x_phy *phy, int cmd)
801 {
802 	struct mt792x_dev *dev = phy->dev;
803 	struct cfg80211_chan_def *chandef = &phy->mt76->chandef;
804 	int freq1 = chandef->center_freq1;
805 	struct {
806 		u8 control_ch;
807 		u8 center_ch;
808 		u8 bw;
809 		u8 tx_streams_num;
810 		u8 rx_streams;	/* mask or num */
811 		u8 switch_reason;
812 		u8 band_idx;
813 		u8 center_ch2;	/* for 80+80 only */
814 		__le16 cac_case;
815 		u8 channel_band;
816 		u8 rsv0;
817 		__le32 outband_freq;
818 		u8 txpower_drop;
819 		u8 ap_bw;
820 		u8 ap_center_ch;
821 		u8 rsv1[57];
822 	} __packed req = {
823 		.control_ch = chandef->chan->hw_value,
824 		.center_ch = ieee80211_frequency_to_channel(freq1),
825 		.bw = mt76_connac_chan_bw(chandef),
826 		.tx_streams_num = hweight8(phy->mt76->antenna_mask),
827 		.rx_streams = phy->mt76->antenna_mask,
828 		.band_idx = phy != &dev->phy,
829 	};
830 
831 	if (chandef->chan->band == NL80211_BAND_6GHZ)
832 		req.channel_band = 2;
833 	else
834 		req.channel_band = chandef->chan->band;
835 
836 	if (cmd == MCU_EXT_CMD(SET_RX_PATH) ||
837 	    dev->mt76.hw->conf.flags & IEEE80211_CONF_MONITOR)
838 		req.switch_reason = CH_SWITCH_NORMAL;
839 	else if (dev->mt76.hw->conf.flags & IEEE80211_CONF_OFFCHANNEL)
840 		req.switch_reason = CH_SWITCH_SCAN_BYPASS_DPD;
841 	else if (!cfg80211_reg_can_beacon(dev->mt76.hw->wiphy, chandef,
842 					  NL80211_IFTYPE_AP))
843 		req.switch_reason = CH_SWITCH_DFS;
844 	else
845 		req.switch_reason = CH_SWITCH_NORMAL;
846 
847 	if (cmd == MCU_EXT_CMD(CHANNEL_SWITCH))
848 		req.rx_streams = hweight8(req.rx_streams);
849 
850 	if (chandef->width == NL80211_CHAN_WIDTH_80P80) {
851 		int freq2 = chandef->center_freq2;
852 
853 		req.center_ch2 = ieee80211_frequency_to_channel(freq2);
854 	}
855 
856 	return mt76_mcu_send_msg(&dev->mt76, cmd, &req, sizeof(req), true);
857 }
858 
859 int mt7921_mcu_set_eeprom(struct mt792x_dev *dev)
860 {
861 	struct req_hdr {
862 		u8 buffer_mode;
863 		u8 format;
864 		__le16 len;
865 	} __packed req = {
866 		.buffer_mode = EE_MODE_EFUSE,
867 		.format = EE_FORMAT_WHOLE,
868 	};
869 
870 	return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(EFUSE_BUFFER_MODE),
871 				 &req, sizeof(req), true);
872 }
873 EXPORT_SYMBOL_GPL(mt7921_mcu_set_eeprom);
874 
875 int mt7921_mcu_uni_bss_ps(struct mt792x_dev *dev, struct ieee80211_vif *vif)
876 {
877 	struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv;
878 	struct {
879 		struct {
880 			u8 bss_idx;
881 			u8 pad[3];
882 		} __packed hdr;
883 		struct ps_tlv {
884 			__le16 tag;
885 			__le16 len;
886 			u8 ps_state; /* 0: device awake
887 				      * 1: static power save
888 				      * 2: dynamic power saving
889 				      * 3: enter TWT power saving
890 				      * 4: leave TWT power saving
891 				      */
892 			u8 pad[3];
893 		} __packed ps;
894 	} __packed ps_req = {
895 		.hdr = {
896 			.bss_idx = mvif->mt76.idx,
897 		},
898 		.ps = {
899 			.tag = cpu_to_le16(UNI_BSS_INFO_PS),
900 			.len = cpu_to_le16(sizeof(struct ps_tlv)),
901 			.ps_state = vif->cfg.ps ? 2 : 0,
902 		},
903 	};
904 
905 	if (vif->type != NL80211_IFTYPE_STATION)
906 		return -EOPNOTSUPP;
907 
908 	return mt76_mcu_send_msg(&dev->mt76, MCU_UNI_CMD(BSS_INFO_UPDATE),
909 				 &ps_req, sizeof(ps_req), true);
910 }
911 
912 static int
913 mt7921_mcu_uni_bss_bcnft(struct mt792x_dev *dev, struct ieee80211_vif *vif,
914 			 bool enable)
915 {
916 	struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv;
917 	struct {
918 		struct {
919 			u8 bss_idx;
920 			u8 pad[3];
921 		} __packed hdr;
922 		struct bcnft_tlv {
923 			__le16 tag;
924 			__le16 len;
925 			__le16 bcn_interval;
926 			u8 dtim_period;
927 			u8 pad;
928 		} __packed bcnft;
929 	} __packed bcnft_req = {
930 		.hdr = {
931 			.bss_idx = mvif->mt76.idx,
932 		},
933 		.bcnft = {
934 			.tag = cpu_to_le16(UNI_BSS_INFO_BCNFT),
935 			.len = cpu_to_le16(sizeof(struct bcnft_tlv)),
936 			.bcn_interval = cpu_to_le16(vif->bss_conf.beacon_int),
937 			.dtim_period = vif->bss_conf.dtim_period,
938 		},
939 	};
940 
941 	if (vif->type != NL80211_IFTYPE_STATION)
942 		return 0;
943 
944 	return mt76_mcu_send_msg(&dev->mt76, MCU_UNI_CMD(BSS_INFO_UPDATE),
945 				 &bcnft_req, sizeof(bcnft_req), true);
946 }
947 
948 int
949 mt7921_mcu_set_bss_pm(struct mt792x_dev *dev, struct ieee80211_vif *vif,
950 		      bool enable)
951 {
952 	struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv;
953 	struct {
954 		u8 bss_idx;
955 		u8 dtim_period;
956 		__le16 aid;
957 		__le16 bcn_interval;
958 		__le16 atim_window;
959 		u8 uapsd;
960 		u8 bmc_delivered_ac;
961 		u8 bmc_triggered_ac;
962 		u8 pad;
963 	} req = {
964 		.bss_idx = mvif->mt76.idx,
965 		.aid = cpu_to_le16(vif->cfg.aid),
966 		.dtim_period = vif->bss_conf.dtim_period,
967 		.bcn_interval = cpu_to_le16(vif->bss_conf.beacon_int),
968 	};
969 	struct {
970 		u8 bss_idx;
971 		u8 pad[3];
972 	} req_hdr = {
973 		.bss_idx = mvif->mt76.idx,
974 	};
975 	int err;
976 
977 	err = mt76_mcu_send_msg(&dev->mt76, MCU_CE_CMD(SET_BSS_ABORT),
978 				&req_hdr, sizeof(req_hdr), false);
979 	if (err < 0 || !enable)
980 		return err;
981 
982 	return mt76_mcu_send_msg(&dev->mt76, MCU_CE_CMD(SET_BSS_CONNECTED),
983 				 &req, sizeof(req), false);
984 }
985 
986 int mt7921_mcu_sta_update(struct mt792x_dev *dev, struct ieee80211_sta *sta,
987 			  struct ieee80211_vif *vif, bool enable,
988 			  enum mt76_sta_info_state state)
989 {
990 	struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv;
991 	int rssi = -ewma_rssi_read(&mvif->rssi);
992 	struct mt76_sta_cmd_info info = {
993 		.sta = sta,
994 		.vif = vif,
995 		.enable = enable,
996 		.cmd = MCU_UNI_CMD(STA_REC_UPDATE),
997 		.state = state,
998 		.offload_fw = true,
999 		.rcpi = to_rcpi(rssi),
1000 	};
1001 	struct mt792x_sta *msta;
1002 
1003 	msta = sta ? (struct mt792x_sta *)sta->drv_priv : NULL;
1004 	info.wcid = msta ? &msta->wcid : &mvif->sta.wcid;
1005 	info.newly = msta ? state != MT76_STA_INFO_STATE_ASSOC : true;
1006 
1007 	return mt76_connac_mcu_sta_cmd(&dev->mphy, &info);
1008 }
1009 
1010 int mt7921_mcu_set_beacon_filter(struct mt792x_dev *dev,
1011 				 struct ieee80211_vif *vif,
1012 				 bool enable)
1013 {
1014 #define MT7921_FIF_BIT_CLR		BIT(1)
1015 #define MT7921_FIF_BIT_SET		BIT(0)
1016 	int err;
1017 
1018 	if (enable) {
1019 		err = mt7921_mcu_uni_bss_bcnft(dev, vif, true);
1020 		if (err)
1021 			return err;
1022 
1023 		err = mt7921_mcu_set_rxfilter(dev, 0,
1024 					      MT7921_FIF_BIT_SET,
1025 					      MT_WF_RFCR_DROP_OTHER_BEACON);
1026 		if (err)
1027 			return err;
1028 
1029 		return 0;
1030 	}
1031 
1032 	err = mt7921_mcu_set_bss_pm(dev, vif, false);
1033 	if (err)
1034 		return err;
1035 
1036 	err = mt7921_mcu_set_rxfilter(dev, 0,
1037 				      MT7921_FIF_BIT_CLR,
1038 				      MT_WF_RFCR_DROP_OTHER_BEACON);
1039 	if (err)
1040 		return err;
1041 
1042 	return 0;
1043 }
1044 
1045 int mt7921_get_txpwr_info(struct mt792x_dev *dev, struct mt7921_txpwr *txpwr)
1046 {
1047 	struct mt7921_txpwr_event *event;
1048 	struct mt7921_txpwr_req req = {
1049 		.dbdc_idx = 0,
1050 	};
1051 	struct sk_buff *skb;
1052 	int ret;
1053 
1054 	ret = mt76_mcu_send_and_get_msg(&dev->mt76, MCU_CE_CMD(GET_TXPWR),
1055 					&req, sizeof(req), true, &skb);
1056 	if (ret)
1057 		return ret;
1058 
1059 	event = (struct mt7921_txpwr_event *)skb->data;
1060 	WARN_ON(skb->len != le16_to_cpu(event->len));
1061 	memcpy(txpwr, &event->txpwr, sizeof(event->txpwr));
1062 
1063 	dev_kfree_skb(skb);
1064 
1065 	return 0;
1066 }
1067 
1068 int mt7921_mcu_set_sniffer(struct mt792x_dev *dev, struct ieee80211_vif *vif,
1069 			   bool enable)
1070 {
1071 	struct mt76_vif *mvif = (struct mt76_vif *)vif->drv_priv;
1072 	struct {
1073 		struct {
1074 			u8 band_idx;
1075 			u8 pad[3];
1076 		} __packed hdr;
1077 		struct sniffer_enable_tlv {
1078 			__le16 tag;
1079 			__le16 len;
1080 			u8 enable;
1081 			u8 pad[3];
1082 		} __packed enable;
1083 	} req = {
1084 		.hdr = {
1085 			.band_idx = mvif->band_idx,
1086 		},
1087 		.enable = {
1088 			.tag = cpu_to_le16(0),
1089 			.len = cpu_to_le16(sizeof(struct sniffer_enable_tlv)),
1090 			.enable = enable,
1091 		},
1092 	};
1093 
1094 	return mt76_mcu_send_msg(&dev->mt76, MCU_UNI_CMD(SNIFFER), &req, sizeof(req),
1095 				 true);
1096 }
1097 
1098 int mt7921_mcu_config_sniffer(struct mt792x_vif *vif,
1099 			      struct ieee80211_chanctx_conf *ctx)
1100 {
1101 	struct cfg80211_chan_def *chandef = &ctx->def;
1102 	int freq1 = chandef->center_freq1, freq2 = chandef->center_freq2;
1103 	const u8 ch_band[] = {
1104 		[NL80211_BAND_2GHZ] = 1,
1105 		[NL80211_BAND_5GHZ] = 2,
1106 		[NL80211_BAND_6GHZ] = 3,
1107 	};
1108 	const u8 ch_width[] = {
1109 		[NL80211_CHAN_WIDTH_20_NOHT] = 0,
1110 		[NL80211_CHAN_WIDTH_20] = 0,
1111 		[NL80211_CHAN_WIDTH_40] = 0,
1112 		[NL80211_CHAN_WIDTH_80] = 1,
1113 		[NL80211_CHAN_WIDTH_160] = 2,
1114 		[NL80211_CHAN_WIDTH_80P80] = 3,
1115 		[NL80211_CHAN_WIDTH_5] = 4,
1116 		[NL80211_CHAN_WIDTH_10] = 5,
1117 		[NL80211_CHAN_WIDTH_320] = 6,
1118 	};
1119 	struct {
1120 		struct {
1121 			u8 band_idx;
1122 			u8 pad[3];
1123 		} __packed hdr;
1124 		struct config_tlv {
1125 			__le16 tag;
1126 			__le16 len;
1127 			u16 aid;
1128 			u8 ch_band;
1129 			u8 bw;
1130 			u8 control_ch;
1131 			u8 sco;
1132 			u8 center_ch;
1133 			u8 center_ch2;
1134 			u8 drop_err;
1135 			u8 pad[3];
1136 		} __packed tlv;
1137 	} __packed req = {
1138 		.hdr = {
1139 			.band_idx = vif->mt76.band_idx,
1140 		},
1141 		.tlv = {
1142 			.tag = cpu_to_le16(1),
1143 			.len = cpu_to_le16(sizeof(req.tlv)),
1144 			.control_ch = chandef->chan->hw_value,
1145 			.center_ch = ieee80211_frequency_to_channel(freq1),
1146 			.drop_err = 1,
1147 		},
1148 	};
1149 	if (chandef->chan->band < ARRAY_SIZE(ch_band))
1150 		req.tlv.ch_band = ch_band[chandef->chan->band];
1151 	if (chandef->width < ARRAY_SIZE(ch_width))
1152 		req.tlv.bw = ch_width[chandef->width];
1153 
1154 	if (freq2)
1155 		req.tlv.center_ch2 = ieee80211_frequency_to_channel(freq2);
1156 
1157 	if (req.tlv.control_ch < req.tlv.center_ch)
1158 		req.tlv.sco = 1; /* SCA */
1159 	else if (req.tlv.control_ch > req.tlv.center_ch)
1160 		req.tlv.sco = 3; /* SCB */
1161 
1162 	return mt76_mcu_send_msg(vif->phy->mt76->dev, MCU_UNI_CMD(SNIFFER),
1163 				 &req, sizeof(req), true);
1164 }
1165 
1166 int
1167 mt7921_mcu_uni_add_beacon_offload(struct mt792x_dev *dev,
1168 				  struct ieee80211_hw *hw,
1169 				  struct ieee80211_vif *vif,
1170 				  bool enable)
1171 {
1172 	struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv;
1173 	struct mt76_wcid *wcid = &dev->mt76.global_wcid;
1174 	struct ieee80211_mutable_offsets offs;
1175 	struct {
1176 		struct req_hdr {
1177 			u8 bss_idx;
1178 			u8 pad[3];
1179 		} __packed hdr;
1180 		struct bcn_content_tlv {
1181 			__le16 tag;
1182 			__le16 len;
1183 			__le16 tim_ie_pos;
1184 			__le16 csa_ie_pos;
1185 			__le16 bcc_ie_pos;
1186 			/* 0: disable beacon offload
1187 			 * 1: enable beacon offload
1188 			 * 2: update probe respond offload
1189 			 */
1190 			u8 enable;
1191 			/* 0: legacy format (TXD + payload)
1192 			 * 1: only cap field IE
1193 			 */
1194 			u8 type;
1195 			__le16 pkt_len;
1196 			u8 pkt[512];
1197 		} __packed beacon_tlv;
1198 	} req = {
1199 		.hdr = {
1200 			.bss_idx = mvif->mt76.idx,
1201 		},
1202 		.beacon_tlv = {
1203 			.tag = cpu_to_le16(UNI_BSS_INFO_BCN_CONTENT),
1204 			.len = cpu_to_le16(sizeof(struct bcn_content_tlv)),
1205 			.enable = enable,
1206 		},
1207 	};
1208 	struct sk_buff *skb;
1209 
1210 	/* support enable/update process only
1211 	 * disable flow would be handled in bss stop handler automatically
1212 	 */
1213 	if (!enable)
1214 		return -EOPNOTSUPP;
1215 
1216 	skb = ieee80211_beacon_get_template(mt76_hw(dev), vif, &offs, 0);
1217 	if (!skb)
1218 		return -EINVAL;
1219 
1220 	if (skb->len > 512 - MT_TXD_SIZE) {
1221 		dev_err(dev->mt76.dev, "beacon size limit exceed\n");
1222 		dev_kfree_skb(skb);
1223 		return -EINVAL;
1224 	}
1225 
1226 	mt76_connac2_mac_write_txwi(&dev->mt76, (__le32 *)(req.beacon_tlv.pkt),
1227 				    skb, wcid, NULL, 0, 0, BSS_CHANGED_BEACON);
1228 	memcpy(req.beacon_tlv.pkt + MT_TXD_SIZE, skb->data, skb->len);
1229 	req.beacon_tlv.pkt_len = cpu_to_le16(MT_TXD_SIZE + skb->len);
1230 	req.beacon_tlv.tim_ie_pos = cpu_to_le16(MT_TXD_SIZE + offs.tim_offset);
1231 
1232 	if (offs.cntdwn_counter_offs[0]) {
1233 		u16 csa_offs;
1234 
1235 		csa_offs = MT_TXD_SIZE + offs.cntdwn_counter_offs[0] - 4;
1236 		req.beacon_tlv.csa_ie_pos = cpu_to_le16(csa_offs);
1237 	}
1238 	dev_kfree_skb(skb);
1239 
1240 	return mt76_mcu_send_msg(&dev->mt76, MCU_UNI_CMD(BSS_INFO_UPDATE),
1241 				 &req, sizeof(req), true);
1242 }
1243 
1244 static
1245 int __mt7921_mcu_set_clc(struct mt792x_dev *dev, u8 *alpha2,
1246 			 enum environment_cap env_cap,
1247 			 struct mt7921_clc *clc,
1248 			 u8 idx)
1249 {
1250 #define CLC_CAP_EVT_EN BIT(0)
1251 #define CLC_CAP_DTS_EN BIT(1)
1252 	struct sk_buff *skb, *ret_skb = NULL;
1253 	struct {
1254 		u8 ver;
1255 		u8 pad0;
1256 		__le16 len;
1257 		u8 idx;
1258 		u8 env;
1259 		u8 acpi_conf;
1260 		u8 cap;
1261 		u8 alpha2[2];
1262 		u8 type[2];
1263 		u8 env_6g;
1264 		u8 mtcl_conf;
1265 		u8 rsvd[62];
1266 	} __packed req = {
1267 		.ver = 1,
1268 		.idx = idx,
1269 		.env = env_cap,
1270 		.env_6g = dev->phy.power_type,
1271 		.acpi_conf = mt792x_acpi_get_flags(&dev->phy),
1272 		.mtcl_conf = mt792x_acpi_get_mtcl_conf(&dev->phy, alpha2),
1273 	};
1274 	int ret, valid_cnt = 0;
1275 	u16 buf_len = 0;
1276 	u8 *pos;
1277 
1278 	if (!clc)
1279 		return 0;
1280 
1281 	if (dev->phy.chip_cap & MT792x_CHIP_CAP_CLC_EVT_EN)
1282 		req.cap |= CLC_CAP_EVT_EN;
1283 	if (mt76_find_power_limits_node(&dev->mt76))
1284 		req.cap |= CLC_CAP_DTS_EN;
1285 
1286 	buf_len = le16_to_cpu(clc->len) - sizeof(*clc);
1287 	pos = clc->data;
1288 	while (buf_len > 16) {
1289 		struct mt7921_clc_rule *rule = (struct mt7921_clc_rule *)pos;
1290 		u16 len = le16_to_cpu(rule->len);
1291 		u16 offset = len + sizeof(*rule);
1292 
1293 		pos += offset;
1294 		buf_len -= offset;
1295 		if (rule->alpha2[0] != alpha2[0] ||
1296 		    rule->alpha2[1] != alpha2[1])
1297 			continue;
1298 
1299 		memcpy(req.alpha2, rule->alpha2, 2);
1300 		memcpy(req.type, rule->type, 2);
1301 
1302 		req.len = cpu_to_le16(sizeof(req) + len);
1303 		skb = __mt76_mcu_msg_alloc(&dev->mt76, &req,
1304 					   le16_to_cpu(req.len),
1305 					   sizeof(req), GFP_KERNEL);
1306 		if (!skb)
1307 			return -ENOMEM;
1308 		skb_put_data(skb, rule->data, len);
1309 
1310 		ret = mt76_mcu_skb_send_and_get_msg(&dev->mt76, skb,
1311 						    MCU_CE_CMD(SET_CLC),
1312 						    !!(req.cap & CLC_CAP_EVT_EN),
1313 						    &ret_skb);
1314 		if (ret < 0)
1315 			return ret;
1316 
1317 		if (ret_skb) {
1318 			struct mt7921_clc_info_tlv *info;
1319 
1320 			info = (struct mt7921_clc_info_tlv *)(ret_skb->data + 4);
1321 			dev->phy.clc_chan_conf = info->chan_conf;
1322 			dev_kfree_skb(ret_skb);
1323 		}
1324 
1325 		valid_cnt++;
1326 	}
1327 
1328 	if (!valid_cnt)
1329 		return -ENOENT;
1330 
1331 	return 0;
1332 }
1333 
1334 int mt7921_mcu_set_clc(struct mt792x_dev *dev, u8 *alpha2,
1335 		       enum environment_cap env_cap)
1336 {
1337 	struct mt792x_phy *phy = (struct mt792x_phy *)&dev->phy;
1338 	int i, ret;
1339 
1340 	/* submit all clc config */
1341 	for (i = 0; i < ARRAY_SIZE(phy->clc); i++) {
1342 		ret = __mt7921_mcu_set_clc(dev, alpha2, env_cap,
1343 					   phy->clc[i], i);
1344 
1345 		/* If no country found, set "00" as default */
1346 		if (ret == -ENOENT)
1347 			ret = __mt7921_mcu_set_clc(dev, "00",
1348 						   ENVIRON_INDOOR,
1349 						   phy->clc[i], i);
1350 		if (ret < 0)
1351 			return ret;
1352 	}
1353 	return 0;
1354 }
1355 
1356 int mt7921_mcu_get_temperature(struct mt792x_phy *phy)
1357 {
1358 	struct mt792x_dev *dev = phy->dev;
1359 	struct {
1360 		u8 ctrl_id;
1361 		u8 action;
1362 		u8 band_idx;
1363 		u8 rsv[5];
1364 	} req = {
1365 		.ctrl_id = THERMAL_SENSOR_TEMP_QUERY,
1366 		.band_idx = phy->mt76->band_idx,
1367 	};
1368 
1369 	return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(THERMAL_CTRL), &req,
1370 				 sizeof(req), true);
1371 }
1372 
1373 int mt7921_mcu_set_rxfilter(struct mt792x_dev *dev, u32 fif,
1374 			    u8 bit_op, u32 bit_map)
1375 {
1376 	struct {
1377 		u8 rsv[4];
1378 		u8 mode;
1379 		u8 rsv2[3];
1380 		__le32 fif;
1381 		__le32 bit_map; /* bit_* for bitmap update */
1382 		u8 bit_op;
1383 		u8 pad[51];
1384 	} __packed data = {
1385 		.mode = fif ? 1 : 2,
1386 		.fif = cpu_to_le32(fif),
1387 		.bit_map = cpu_to_le32(bit_map),
1388 		.bit_op = bit_op,
1389 	};
1390 
1391 	return mt76_mcu_send_msg(&dev->mt76, MCU_CE_CMD(SET_RX_FILTER),
1392 				 &data, sizeof(data), false);
1393 }
1394