xref: /linux/drivers/usb/typec/tcpm/qcom/qcom_pmic_typec_pdphy.c (revision 58f6259b7a08f8d47d4629609703d358b042f0fd)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (c) 2023, Linaro Ltd. All rights reserved.
4  */
5 
6 #include <linux/err.h>
7 #include <linux/interrupt.h>
8 #include <linux/kernel.h>
9 #include <linux/mod_devicetable.h>
10 #include <linux/module.h>
11 #include <linux/of_device.h>
12 #include <linux/of_irq.h>
13 #include <linux/platform_device.h>
14 #include <linux/regmap.h>
15 #include <linux/regulator/consumer.h>
16 #include <linux/slab.h>
17 #include <linux/usb/pd.h>
18 #include <linux/usb/tcpm.h>
19 #include "qcom_pmic_typec_pdphy.h"
20 
21 struct pmic_typec_pdphy_irq_data {
22 	int				virq;
23 	int				irq;
24 	struct pmic_typec_pdphy		*pmic_typec_pdphy;
25 };
26 
27 struct pmic_typec_pdphy {
28 	struct device			*dev;
29 	struct tcpm_port		*tcpm_port;
30 	struct regmap			*regmap;
31 	u32				base;
32 
33 	unsigned int			nr_irqs;
34 	struct pmic_typec_pdphy_irq_data	*irq_data;
35 
36 	struct work_struct		reset_work;
37 	struct work_struct		receive_work;
38 	struct regulator		*vdd_pdphy;
39 	spinlock_t			lock;		/* Register atomicity */
40 };
41 
42 static void qcom_pmic_typec_pdphy_reset_on(struct pmic_typec_pdphy *pmic_typec_pdphy)
43 {
44 	struct device *dev = pmic_typec_pdphy->dev;
45 	int ret;
46 
47 	/* Terminate TX */
48 	ret = regmap_write(pmic_typec_pdphy->regmap,
49 			   pmic_typec_pdphy->base + USB_PDPHY_TX_CONTROL_REG, 0);
50 	if (ret)
51 		goto err;
52 
53 	ret = regmap_write(pmic_typec_pdphy->regmap,
54 			   pmic_typec_pdphy->base + USB_PDPHY_FRAME_FILTER_REG, 0);
55 	if (ret)
56 		goto err;
57 
58 	return;
59 err:
60 	dev_err(dev, "pd_reset_on error\n");
61 }
62 
63 static void qcom_pmic_typec_pdphy_reset_off(struct pmic_typec_pdphy *pmic_typec_pdphy)
64 {
65 	struct device *dev = pmic_typec_pdphy->dev;
66 	int ret;
67 
68 	ret = regmap_write(pmic_typec_pdphy->regmap,
69 			   pmic_typec_pdphy->base + USB_PDPHY_FRAME_FILTER_REG,
70 			   FRAME_FILTER_EN_SOP | FRAME_FILTER_EN_HARD_RESET);
71 	if (ret)
72 		dev_err(dev, "pd_reset_off error\n");
73 }
74 
75 static void qcom_pmic_typec_pdphy_sig_reset_work(struct work_struct *work)
76 {
77 	struct pmic_typec_pdphy *pmic_typec_pdphy = container_of(work, struct pmic_typec_pdphy,
78 						     reset_work);
79 	unsigned long flags;
80 
81 	spin_lock_irqsave(&pmic_typec_pdphy->lock, flags);
82 
83 	qcom_pmic_typec_pdphy_reset_on(pmic_typec_pdphy);
84 	qcom_pmic_typec_pdphy_reset_off(pmic_typec_pdphy);
85 
86 	spin_unlock_irqrestore(&pmic_typec_pdphy->lock, flags);
87 
88 	tcpm_pd_hard_reset(pmic_typec_pdphy->tcpm_port);
89 }
90 
91 static int
92 qcom_pmic_typec_pdphy_clear_tx_control_reg(struct pmic_typec_pdphy *pmic_typec_pdphy)
93 {
94 	struct device *dev = pmic_typec_pdphy->dev;
95 	unsigned int val;
96 	int ret;
97 
98 	/* Clear TX control register */
99 	ret = regmap_write(pmic_typec_pdphy->regmap,
100 			   pmic_typec_pdphy->base + USB_PDPHY_TX_CONTROL_REG, 0);
101 	if (ret)
102 		goto done;
103 
104 	/* Perform readback to ensure sufficient delay for command to latch */
105 	ret = regmap_read(pmic_typec_pdphy->regmap,
106 			  pmic_typec_pdphy->base + USB_PDPHY_TX_CONTROL_REG, &val);
107 
108 done:
109 	if (ret)
110 		dev_err(dev, "pd_clear_tx_control_reg: clear tx flag\n");
111 
112 	return ret;
113 }
114 
115 static int
116 qcom_pmic_typec_pdphy_pd_transmit_signal(struct pmic_typec_pdphy *pmic_typec_pdphy,
117 					 enum tcpm_transmit_type type,
118 					 unsigned int negotiated_rev)
119 {
120 	struct device *dev = pmic_typec_pdphy->dev;
121 	unsigned int val;
122 	unsigned long flags;
123 	int ret;
124 
125 	spin_lock_irqsave(&pmic_typec_pdphy->lock, flags);
126 
127 	/* Clear TX control register */
128 	ret = qcom_pmic_typec_pdphy_clear_tx_control_reg(pmic_typec_pdphy);
129 	if (ret)
130 		goto done;
131 
132 	val = TX_CONTROL_SEND_SIGNAL;
133 	if (negotiated_rev == PD_REV30)
134 		val |= TX_CONTROL_RETRY_COUNT(2);
135 	else
136 		val |= TX_CONTROL_RETRY_COUNT(3);
137 
138 	if (type == TCPC_TX_CABLE_RESET || type == TCPC_TX_HARD_RESET)
139 		val |= TX_CONTROL_FRAME_TYPE(1);
140 
141 	ret = regmap_write(pmic_typec_pdphy->regmap,
142 			   pmic_typec_pdphy->base + USB_PDPHY_TX_CONTROL_REG, val);
143 
144 done:
145 	spin_unlock_irqrestore(&pmic_typec_pdphy->lock, flags);
146 
147 	dev_vdbg(dev, "pd_transmit_signal: type %d negotiate_rev %d send %d\n",
148 		 type, negotiated_rev, ret);
149 
150 	return ret;
151 }
152 
153 static int
154 qcom_pmic_typec_pdphy_pd_transmit_payload(struct pmic_typec_pdphy *pmic_typec_pdphy,
155 					  enum tcpm_transmit_type type,
156 					  const struct pd_message *msg,
157 					  unsigned int negotiated_rev)
158 {
159 	struct device *dev = pmic_typec_pdphy->dev;
160 	unsigned int val, hdr_len, txbuf_len, txsize_len;
161 	unsigned long flags;
162 	int ret;
163 
164 	spin_lock_irqsave(&pmic_typec_pdphy->lock, flags);
165 
166 	ret = regmap_read(pmic_typec_pdphy->regmap,
167 			  pmic_typec_pdphy->base + USB_PDPHY_RX_ACKNOWLEDGE_REG,
168 			  &val);
169 	if (ret)
170 		goto done;
171 
172 	if (val) {
173 		dev_err(dev, "pd_transmit_payload: RX message pending\n");
174 		ret = -EBUSY;
175 		goto done;
176 	}
177 
178 	/* Clear TX control register */
179 	ret = qcom_pmic_typec_pdphy_clear_tx_control_reg(pmic_typec_pdphy);
180 	if (ret)
181 		goto done;
182 
183 	hdr_len = sizeof(msg->header);
184 	txbuf_len = pd_header_cnt_le(msg->header) * 4;
185 	txsize_len = hdr_len + txbuf_len - 1;
186 
187 	/* Write message header sizeof(u16) to USB_PDPHY_TX_BUFFER_HDR_REG */
188 	ret = regmap_bulk_write(pmic_typec_pdphy->regmap,
189 				pmic_typec_pdphy->base + USB_PDPHY_TX_BUFFER_HDR_REG,
190 				&msg->header, hdr_len);
191 	if (ret)
192 		goto done;
193 
194 	/* Write payload to USB_PDPHY_TX_BUFFER_DATA_REG for txbuf_len */
195 	if (txbuf_len) {
196 		ret = regmap_bulk_write(pmic_typec_pdphy->regmap,
197 					pmic_typec_pdphy->base + USB_PDPHY_TX_BUFFER_DATA_REG,
198 					&msg->payload, txbuf_len);
199 		if (ret)
200 			goto done;
201 	}
202 
203 	/* Write total length ((header + data) - 1) to USB_PDPHY_TX_SIZE_REG */
204 	ret = regmap_write(pmic_typec_pdphy->regmap,
205 			   pmic_typec_pdphy->base + USB_PDPHY_TX_SIZE_REG,
206 			   txsize_len);
207 	if (ret)
208 		goto done;
209 
210 	/* Clear TX control register */
211 	ret = qcom_pmic_typec_pdphy_clear_tx_control_reg(pmic_typec_pdphy);
212 	if (ret)
213 		goto done;
214 
215 	/* Initiate transmit with retry count as indicated by PD revision */
216 	val = TX_CONTROL_FRAME_TYPE(type) | TX_CONTROL_SEND_MSG;
217 	if (pd_header_rev(msg->header) == PD_REV30)
218 		val |= TX_CONTROL_RETRY_COUNT(2);
219 	else
220 		val |= TX_CONTROL_RETRY_COUNT(3);
221 
222 	ret = regmap_write(pmic_typec_pdphy->regmap,
223 			   pmic_typec_pdphy->base + USB_PDPHY_TX_CONTROL_REG, val);
224 
225 done:
226 	spin_unlock_irqrestore(&pmic_typec_pdphy->lock, flags);
227 
228 	if (ret) {
229 		dev_err(dev, "pd_transmit_payload: hdr %*ph data %*ph ret %d\n",
230 			hdr_len, &msg->header, txbuf_len, &msg->payload, ret);
231 	}
232 
233 	return ret;
234 }
235 
236 int qcom_pmic_typec_pdphy_pd_transmit(struct pmic_typec_pdphy *pmic_typec_pdphy,
237 				      enum tcpm_transmit_type type,
238 				      const struct pd_message *msg,
239 				      unsigned int negotiated_rev)
240 {
241 	struct device *dev = pmic_typec_pdphy->dev;
242 	int ret;
243 
244 	if (msg) {
245 		ret = qcom_pmic_typec_pdphy_pd_transmit_payload(pmic_typec_pdphy,
246 								type, msg,
247 								negotiated_rev);
248 	} else {
249 		ret = qcom_pmic_typec_pdphy_pd_transmit_signal(pmic_typec_pdphy,
250 							       type,
251 							       negotiated_rev);
252 	}
253 
254 	if (ret)
255 		dev_dbg(dev, "pd_transmit: type %x result %d\n", type, ret);
256 
257 	return ret;
258 }
259 
260 static void qcom_pmic_typec_pdphy_pd_receive(struct pmic_typec_pdphy *pmic_typec_pdphy)
261 {
262 	struct device *dev = pmic_typec_pdphy->dev;
263 	struct pd_message msg;
264 	unsigned int size, rx_status;
265 	unsigned long flags;
266 	int ret;
267 
268 	spin_lock_irqsave(&pmic_typec_pdphy->lock, flags);
269 
270 	ret = regmap_read(pmic_typec_pdphy->regmap,
271 			  pmic_typec_pdphy->base + USB_PDPHY_RX_SIZE_REG, &size);
272 	if (ret)
273 		goto done;
274 
275 	/* Hardware requires +1 of the real read value to be passed */
276 	if (size < 1 || size > sizeof(msg.payload) + 1) {
277 		dev_dbg(dev, "pd_receive: invalid size %d\n", size);
278 		goto done;
279 	}
280 
281 	size += 1;
282 	ret = regmap_read(pmic_typec_pdphy->regmap,
283 			  pmic_typec_pdphy->base + USB_PDPHY_RX_STATUS_REG,
284 			  &rx_status);
285 
286 	if (ret)
287 		goto done;
288 
289 	ret = regmap_bulk_read(pmic_typec_pdphy->regmap,
290 			       pmic_typec_pdphy->base + USB_PDPHY_RX_BUFFER_REG,
291 			       (u8 *)&msg, size);
292 	if (ret)
293 		goto done;
294 
295 	/* Return ownership of RX buffer to hardware */
296 	ret = regmap_write(pmic_typec_pdphy->regmap,
297 			   pmic_typec_pdphy->base + USB_PDPHY_RX_ACKNOWLEDGE_REG, 0);
298 
299 done:
300 	spin_unlock_irqrestore(&pmic_typec_pdphy->lock, flags);
301 
302 	if (!ret) {
303 		dev_vdbg(dev, "pd_receive: handing %d bytes to tcpm\n", size);
304 		tcpm_pd_receive(pmic_typec_pdphy->tcpm_port, &msg);
305 	}
306 }
307 
308 static irqreturn_t qcom_pmic_typec_pdphy_isr(int irq, void *dev_id)
309 {
310 	struct pmic_typec_pdphy_irq_data *irq_data = dev_id;
311 	struct pmic_typec_pdphy *pmic_typec_pdphy = irq_data->pmic_typec_pdphy;
312 	struct device *dev = pmic_typec_pdphy->dev;
313 
314 	switch (irq_data->virq) {
315 	case PMIC_PDPHY_SIG_TX_IRQ:
316 		dev_err(dev, "isr: tx_sig\n");
317 		break;
318 	case PMIC_PDPHY_SIG_RX_IRQ:
319 		schedule_work(&pmic_typec_pdphy->reset_work);
320 		break;
321 	case PMIC_PDPHY_MSG_TX_IRQ:
322 		tcpm_pd_transmit_complete(pmic_typec_pdphy->tcpm_port,
323 					  TCPC_TX_SUCCESS);
324 		break;
325 	case PMIC_PDPHY_MSG_RX_IRQ:
326 		qcom_pmic_typec_pdphy_pd_receive(pmic_typec_pdphy);
327 		break;
328 	case PMIC_PDPHY_MSG_TX_FAIL_IRQ:
329 		tcpm_pd_transmit_complete(pmic_typec_pdphy->tcpm_port,
330 					  TCPC_TX_FAILED);
331 		break;
332 	case PMIC_PDPHY_MSG_TX_DISCARD_IRQ:
333 		tcpm_pd_transmit_complete(pmic_typec_pdphy->tcpm_port,
334 					  TCPC_TX_DISCARDED);
335 		break;
336 	}
337 
338 	return IRQ_HANDLED;
339 }
340 
341 int qcom_pmic_typec_pdphy_set_pd_rx(struct pmic_typec_pdphy *pmic_typec_pdphy, bool on)
342 {
343 	unsigned long flags;
344 	int ret;
345 
346 	spin_lock_irqsave(&pmic_typec_pdphy->lock, flags);
347 
348 	ret = regmap_write(pmic_typec_pdphy->regmap,
349 			   pmic_typec_pdphy->base + USB_PDPHY_RX_ACKNOWLEDGE_REG, !on);
350 
351 	spin_unlock_irqrestore(&pmic_typec_pdphy->lock, flags);
352 
353 	dev_dbg(pmic_typec_pdphy->dev, "set_pd_rx: %s\n", on ? "on" : "off");
354 
355 	return ret;
356 }
357 
358 int qcom_pmic_typec_pdphy_set_roles(struct pmic_typec_pdphy *pmic_typec_pdphy,
359 				    bool data_role_host, bool power_role_src)
360 {
361 	struct device *dev = pmic_typec_pdphy->dev;
362 	unsigned long flags;
363 	int ret;
364 
365 	spin_lock_irqsave(&pmic_typec_pdphy->lock, flags);
366 
367 	ret = regmap_update_bits(pmic_typec_pdphy->regmap,
368 				 pmic_typec_pdphy->base + USB_PDPHY_MSG_CONFIG_REG,
369 				 MSG_CONFIG_PORT_DATA_ROLE |
370 				 MSG_CONFIG_PORT_POWER_ROLE,
371 				 data_role_host << 3 | power_role_src << 2);
372 
373 	spin_unlock_irqrestore(&pmic_typec_pdphy->lock, flags);
374 
375 	dev_dbg(dev, "pdphy_set_roles: data_role_host=%d power_role_src=%d\n",
376 		data_role_host, power_role_src);
377 
378 	return ret;
379 }
380 
381 static int qcom_pmic_typec_pdphy_enable(struct pmic_typec_pdphy *pmic_typec_pdphy)
382 {
383 	struct device *dev = pmic_typec_pdphy->dev;
384 	int ret;
385 
386 	ret = regulator_enable(pmic_typec_pdphy->vdd_pdphy);
387 	if (ret)
388 		return ret;
389 
390 	/* PD 2.0, DR=TYPEC_DEVICE, PR=TYPEC_SINK */
391 	ret = regmap_update_bits(pmic_typec_pdphy->regmap,
392 				 pmic_typec_pdphy->base + USB_PDPHY_MSG_CONFIG_REG,
393 				 MSG_CONFIG_SPEC_REV_MASK, PD_REV20);
394 	if (ret)
395 		goto done;
396 
397 	ret = regmap_write(pmic_typec_pdphy->regmap,
398 			   pmic_typec_pdphy->base + USB_PDPHY_EN_CONTROL_REG, 0);
399 	if (ret)
400 		goto done;
401 
402 	ret = regmap_write(pmic_typec_pdphy->regmap,
403 			   pmic_typec_pdphy->base + USB_PDPHY_EN_CONTROL_REG,
404 			   CONTROL_ENABLE);
405 	if (ret)
406 		goto done;
407 
408 	qcom_pmic_typec_pdphy_reset_off(pmic_typec_pdphy);
409 done:
410 	if (ret) {
411 		regulator_disable(pmic_typec_pdphy->vdd_pdphy);
412 		dev_err(dev, "pdphy_enable fail %d\n", ret);
413 	}
414 
415 	return ret;
416 }
417 
418 static int qcom_pmic_typec_pdphy_disable(struct pmic_typec_pdphy *pmic_typec_pdphy)
419 {
420 	int ret;
421 
422 	qcom_pmic_typec_pdphy_reset_on(pmic_typec_pdphy);
423 
424 	ret = regmap_write(pmic_typec_pdphy->regmap,
425 			   pmic_typec_pdphy->base + USB_PDPHY_EN_CONTROL_REG, 0);
426 
427 	regulator_disable(pmic_typec_pdphy->vdd_pdphy);
428 
429 	return ret;
430 }
431 
432 static int pmic_typec_pdphy_reset(struct pmic_typec_pdphy *pmic_typec_pdphy)
433 {
434 	int ret;
435 
436 	ret = qcom_pmic_typec_pdphy_disable(pmic_typec_pdphy);
437 	if (ret)
438 		goto done;
439 
440 	usleep_range(400, 500);
441 	ret = qcom_pmic_typec_pdphy_enable(pmic_typec_pdphy);
442 done:
443 	return ret;
444 }
445 
446 int qcom_pmic_typec_pdphy_start(struct pmic_typec_pdphy *pmic_typec_pdphy,
447 				struct tcpm_port *tcpm_port)
448 {
449 	int i;
450 	int ret;
451 
452 	pmic_typec_pdphy->tcpm_port = tcpm_port;
453 
454 	ret = pmic_typec_pdphy_reset(pmic_typec_pdphy);
455 	if (ret)
456 		return ret;
457 
458 	for (i = 0; i < pmic_typec_pdphy->nr_irqs; i++)
459 		enable_irq(pmic_typec_pdphy->irq_data[i].irq);
460 
461 	return 0;
462 }
463 
464 void qcom_pmic_typec_pdphy_stop(struct pmic_typec_pdphy *pmic_typec_pdphy)
465 {
466 	int i;
467 
468 	for (i = 0; i < pmic_typec_pdphy->nr_irqs; i++)
469 		disable_irq(pmic_typec_pdphy->irq_data[i].irq);
470 
471 	qcom_pmic_typec_pdphy_reset_on(pmic_typec_pdphy);
472 }
473 
474 struct pmic_typec_pdphy *qcom_pmic_typec_pdphy_alloc(struct device *dev)
475 {
476 	return devm_kzalloc(dev, sizeof(struct pmic_typec_pdphy), GFP_KERNEL);
477 }
478 
479 int qcom_pmic_typec_pdphy_probe(struct platform_device *pdev,
480 				struct pmic_typec_pdphy *pmic_typec_pdphy,
481 				struct pmic_typec_pdphy_resources *res,
482 				struct regmap *regmap,
483 				u32 base)
484 {
485 	struct device *dev = &pdev->dev;
486 	struct pmic_typec_pdphy_irq_data *irq_data;
487 	int i, ret, irq;
488 
489 	if (!res->nr_irqs || res->nr_irqs > PMIC_PDPHY_MAX_IRQS)
490 		return -EINVAL;
491 
492 	irq_data = devm_kzalloc(dev, sizeof(*irq_data) * res->nr_irqs,
493 				GFP_KERNEL);
494 	if (!irq_data)
495 		return -ENOMEM;
496 
497 	pmic_typec_pdphy->vdd_pdphy = devm_regulator_get(dev, "vdd-pdphy");
498 	if (IS_ERR(pmic_typec_pdphy->vdd_pdphy))
499 		return PTR_ERR(pmic_typec_pdphy->vdd_pdphy);
500 
501 	pmic_typec_pdphy->dev = dev;
502 	pmic_typec_pdphy->base = base;
503 	pmic_typec_pdphy->regmap = regmap;
504 	pmic_typec_pdphy->nr_irqs = res->nr_irqs;
505 	pmic_typec_pdphy->irq_data = irq_data;
506 	spin_lock_init(&pmic_typec_pdphy->lock);
507 	INIT_WORK(&pmic_typec_pdphy->reset_work, qcom_pmic_typec_pdphy_sig_reset_work);
508 
509 	for (i = 0; i < res->nr_irqs; i++, irq_data++) {
510 		irq = platform_get_irq_byname(pdev, res->irq_params[i].irq_name);
511 		if (irq < 0)
512 			return irq;
513 
514 		irq_data->pmic_typec_pdphy = pmic_typec_pdphy;
515 		irq_data->irq = irq;
516 		irq_data->virq = res->irq_params[i].virq;
517 
518 		ret = devm_request_threaded_irq(dev, irq, NULL,
519 						qcom_pmic_typec_pdphy_isr,
520 						IRQF_ONESHOT | IRQF_NO_AUTOEN,
521 						res->irq_params[i].irq_name,
522 						irq_data);
523 		if (ret)
524 			return ret;
525 	}
526 
527 	return 0;
528 }
529