xref: /linux/drivers/staging/greybus/sdio.c (revision 58f6259b7a08f8d47d4629609703d358b042f0fd)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * SD/MMC Greybus driver.
4  *
5  * Copyright 2014-2015 Google Inc.
6  * Copyright 2014-2015 Linaro Ltd.
7  */
8 
9 #include <linux/kernel.h>
10 #include <linux/mmc/core.h>
11 #include <linux/mmc/host.h>
12 #include <linux/mmc/mmc.h>
13 #include <linux/scatterlist.h>
14 #include <linux/workqueue.h>
15 #include <linux/greybus.h>
16 
17 #include "gbphy.h"
18 
19 struct gb_sdio_host {
20 	struct gb_connection	*connection;
21 	struct gbphy_device	*gbphy_dev;
22 	struct mmc_host		*mmc;
23 	struct mmc_request	*mrq;
24 	struct mutex		lock;	/* lock for this host */
25 	size_t			data_max;
26 	spinlock_t		xfer;	/* lock to cancel ongoing transfer */
27 	bool			xfer_stop;
28 	struct workqueue_struct	*mrq_workqueue;
29 	struct work_struct	mrqwork;
30 	u8			queued_events;
31 	bool			removed;
32 	bool			card_present;
33 	bool			read_only;
34 };
35 
36 #define GB_SDIO_RSP_R1_R5_R6_R7	(GB_SDIO_RSP_PRESENT | GB_SDIO_RSP_CRC | \
37 				 GB_SDIO_RSP_OPCODE)
38 #define GB_SDIO_RSP_R3_R4	(GB_SDIO_RSP_PRESENT)
39 #define GB_SDIO_RSP_R2		(GB_SDIO_RSP_PRESENT | GB_SDIO_RSP_CRC | \
40 				 GB_SDIO_RSP_136)
41 #define GB_SDIO_RSP_R1B		(GB_SDIO_RSP_PRESENT | GB_SDIO_RSP_CRC | \
42 				 GB_SDIO_RSP_OPCODE | GB_SDIO_RSP_BUSY)
43 
44 /* kernel vdd starts at 0x80 and we need to translate to greybus ones 0x01 */
45 #define GB_SDIO_VDD_SHIFT	8
46 
47 #ifndef MMC_CAP2_CORE_RUNTIME_PM
48 #define MMC_CAP2_CORE_RUNTIME_PM	0
49 #endif
50 
51 static inline bool single_op(struct mmc_command *cmd)
52 {
53 	u32 opcode = cmd->opcode;
54 
55 	return opcode == MMC_WRITE_BLOCK ||
56 	       opcode == MMC_READ_SINGLE_BLOCK;
57 }
58 
59 static void _gb_sdio_set_host_caps(struct gb_sdio_host *host, u32 r)
60 {
61 	u32 caps = 0;
62 	u32 caps2 = 0;
63 
64 	caps = ((r & GB_SDIO_CAP_NONREMOVABLE) ? MMC_CAP_NONREMOVABLE : 0) |
65 		((r & GB_SDIO_CAP_4_BIT_DATA) ? MMC_CAP_4_BIT_DATA : 0) |
66 		((r & GB_SDIO_CAP_8_BIT_DATA) ? MMC_CAP_8_BIT_DATA : 0) |
67 		((r & GB_SDIO_CAP_MMC_HS) ? MMC_CAP_MMC_HIGHSPEED : 0) |
68 		((r & GB_SDIO_CAP_SD_HS) ? MMC_CAP_SD_HIGHSPEED : 0) |
69 		((r & GB_SDIO_CAP_1_2V_DDR) ? MMC_CAP_1_2V_DDR : 0) |
70 		((r & GB_SDIO_CAP_1_8V_DDR) ? MMC_CAP_1_8V_DDR : 0) |
71 		((r & GB_SDIO_CAP_POWER_OFF_CARD) ? MMC_CAP_POWER_OFF_CARD : 0) |
72 		((r & GB_SDIO_CAP_UHS_SDR12) ? MMC_CAP_UHS_SDR12 : 0) |
73 		((r & GB_SDIO_CAP_UHS_SDR25) ? MMC_CAP_UHS_SDR25 : 0) |
74 		((r & GB_SDIO_CAP_UHS_SDR50) ? MMC_CAP_UHS_SDR50 : 0) |
75 		((r & GB_SDIO_CAP_UHS_SDR104) ? MMC_CAP_UHS_SDR104 : 0) |
76 		((r & GB_SDIO_CAP_UHS_DDR50) ? MMC_CAP_UHS_DDR50 : 0) |
77 		((r & GB_SDIO_CAP_DRIVER_TYPE_A) ? MMC_CAP_DRIVER_TYPE_A : 0) |
78 		((r & GB_SDIO_CAP_DRIVER_TYPE_C) ? MMC_CAP_DRIVER_TYPE_C : 0) |
79 		((r & GB_SDIO_CAP_DRIVER_TYPE_D) ? MMC_CAP_DRIVER_TYPE_D : 0);
80 
81 	caps2 = ((r & GB_SDIO_CAP_HS200_1_2V) ? MMC_CAP2_HS200_1_2V_SDR : 0) |
82 		((r & GB_SDIO_CAP_HS400_1_2V) ? MMC_CAP2_HS400_1_2V : 0) |
83 		((r & GB_SDIO_CAP_HS400_1_8V) ? MMC_CAP2_HS400_1_8V : 0) |
84 		((r & GB_SDIO_CAP_HS200_1_8V) ? MMC_CAP2_HS200_1_8V_SDR : 0);
85 
86 	host->mmc->caps = caps;
87 	host->mmc->caps2 = caps2 | MMC_CAP2_CORE_RUNTIME_PM;
88 
89 	if (caps & MMC_CAP_NONREMOVABLE)
90 		host->card_present = true;
91 }
92 
93 static u32 _gb_sdio_get_host_ocr(u32 ocr)
94 {
95 	return (((ocr & GB_SDIO_VDD_165_195) ? MMC_VDD_165_195 : 0) |
96 		((ocr & GB_SDIO_VDD_20_21) ? MMC_VDD_20_21 : 0) |
97 		((ocr & GB_SDIO_VDD_21_22) ? MMC_VDD_21_22 : 0) |
98 		((ocr & GB_SDIO_VDD_22_23) ? MMC_VDD_22_23 : 0) |
99 		((ocr & GB_SDIO_VDD_23_24) ? MMC_VDD_23_24 : 0) |
100 		((ocr & GB_SDIO_VDD_24_25) ? MMC_VDD_24_25 : 0) |
101 		((ocr & GB_SDIO_VDD_25_26) ? MMC_VDD_25_26 : 0) |
102 		((ocr & GB_SDIO_VDD_26_27) ? MMC_VDD_26_27 : 0) |
103 		((ocr & GB_SDIO_VDD_27_28) ? MMC_VDD_27_28 : 0) |
104 		((ocr & GB_SDIO_VDD_28_29) ? MMC_VDD_28_29 : 0) |
105 		((ocr & GB_SDIO_VDD_29_30) ? MMC_VDD_29_30 : 0) |
106 		((ocr & GB_SDIO_VDD_30_31) ? MMC_VDD_30_31 : 0) |
107 		((ocr & GB_SDIO_VDD_31_32) ? MMC_VDD_31_32 : 0) |
108 		((ocr & GB_SDIO_VDD_32_33) ? MMC_VDD_32_33 : 0) |
109 		((ocr & GB_SDIO_VDD_33_34) ? MMC_VDD_33_34 : 0) |
110 		((ocr & GB_SDIO_VDD_34_35) ? MMC_VDD_34_35 : 0) |
111 		((ocr & GB_SDIO_VDD_35_36) ? MMC_VDD_35_36 : 0)
112 		);
113 }
114 
115 static int gb_sdio_get_caps(struct gb_sdio_host *host)
116 {
117 	struct gb_sdio_get_caps_response response;
118 	struct mmc_host *mmc = host->mmc;
119 	u16 data_max;
120 	u32 blksz;
121 	u32 ocr;
122 	u32 r;
123 	int ret;
124 
125 	ret = gb_operation_sync(host->connection, GB_SDIO_TYPE_GET_CAPABILITIES,
126 				NULL, 0, &response, sizeof(response));
127 	if (ret < 0)
128 		return ret;
129 	r = le32_to_cpu(response.caps);
130 
131 	_gb_sdio_set_host_caps(host, r);
132 
133 	/* get the max block size that could fit our payload */
134 	data_max = gb_operation_get_payload_size_max(host->connection);
135 	data_max = min(data_max - sizeof(struct gb_sdio_transfer_request),
136 		       data_max - sizeof(struct gb_sdio_transfer_response));
137 
138 	blksz = min_t(u16, le16_to_cpu(response.max_blk_size), data_max);
139 	blksz = max_t(u32, 512, blksz);
140 
141 	mmc->max_blk_size = rounddown_pow_of_two(blksz);
142 	mmc->max_blk_count = le16_to_cpu(response.max_blk_count);
143 	host->data_max = data_max;
144 
145 	/* get ocr supported values */
146 	ocr = _gb_sdio_get_host_ocr(le32_to_cpu(response.ocr));
147 	mmc->ocr_avail = ocr;
148 	mmc->ocr_avail_sdio = mmc->ocr_avail;
149 	mmc->ocr_avail_sd = mmc->ocr_avail;
150 	mmc->ocr_avail_mmc = mmc->ocr_avail;
151 
152 	/* get frequency range values */
153 	mmc->f_min = le32_to_cpu(response.f_min);
154 	mmc->f_max = le32_to_cpu(response.f_max);
155 
156 	return 0;
157 }
158 
159 static void _gb_queue_event(struct gb_sdio_host *host, u8 event)
160 {
161 	if (event & GB_SDIO_CARD_INSERTED)
162 		host->queued_events &= ~GB_SDIO_CARD_REMOVED;
163 	else if (event & GB_SDIO_CARD_REMOVED)
164 		host->queued_events &= ~GB_SDIO_CARD_INSERTED;
165 
166 	host->queued_events |= event;
167 }
168 
169 static int _gb_sdio_process_events(struct gb_sdio_host *host, u8 event)
170 {
171 	u8 state_changed = 0;
172 
173 	if (event & GB_SDIO_CARD_INSERTED) {
174 		if (host->mmc->caps & MMC_CAP_NONREMOVABLE)
175 			return 0;
176 		if (host->card_present)
177 			return 0;
178 		host->card_present = true;
179 		state_changed = 1;
180 	}
181 
182 	if (event & GB_SDIO_CARD_REMOVED) {
183 		if (host->mmc->caps & MMC_CAP_NONREMOVABLE)
184 			return 0;
185 		if (!(host->card_present))
186 			return 0;
187 		host->card_present = false;
188 		state_changed = 1;
189 	}
190 
191 	if (event & GB_SDIO_WP)
192 		host->read_only = true;
193 
194 	if (state_changed) {
195 		dev_info(mmc_dev(host->mmc), "card %s now event\n",
196 			 (host->card_present ?  "inserted" : "removed"));
197 		mmc_detect_change(host->mmc, 0);
198 	}
199 
200 	return 0;
201 }
202 
203 static int gb_sdio_request_handler(struct gb_operation *op)
204 {
205 	struct gb_sdio_host *host = gb_connection_get_data(op->connection);
206 	struct gb_message *request;
207 	struct gb_sdio_event_request *payload;
208 	u8 type = op->type;
209 	int ret =  0;
210 	u8 event;
211 
212 	if (type != GB_SDIO_TYPE_EVENT) {
213 		dev_err(mmc_dev(host->mmc),
214 			"unsupported unsolicited event: %u\n", type);
215 		return -EINVAL;
216 	}
217 
218 	request = op->request;
219 
220 	if (request->payload_size < sizeof(*payload)) {
221 		dev_err(mmc_dev(host->mmc), "wrong event size received (%zu < %zu)\n",
222 			request->payload_size, sizeof(*payload));
223 		return -EINVAL;
224 	}
225 
226 	payload = request->payload;
227 	event = payload->event;
228 
229 	if (host->removed)
230 		_gb_queue_event(host, event);
231 	else
232 		ret = _gb_sdio_process_events(host, event);
233 
234 	return ret;
235 }
236 
237 static int gb_sdio_set_ios(struct gb_sdio_host *host,
238 			   struct gb_sdio_set_ios_request *request)
239 {
240 	int ret;
241 
242 	ret = gbphy_runtime_get_sync(host->gbphy_dev);
243 	if (ret)
244 		return ret;
245 
246 	ret = gb_operation_sync(host->connection, GB_SDIO_TYPE_SET_IOS, request,
247 				sizeof(*request), NULL, 0);
248 
249 	gbphy_runtime_put_autosuspend(host->gbphy_dev);
250 
251 	return ret;
252 }
253 
254 static int _gb_sdio_send(struct gb_sdio_host *host, struct mmc_data *data,
255 			 size_t len, u16 nblocks, off_t skip)
256 {
257 	struct gb_sdio_transfer_request *request;
258 	struct gb_sdio_transfer_response *response;
259 	struct gb_operation *operation;
260 	struct scatterlist *sg = data->sg;
261 	unsigned int sg_len = data->sg_len;
262 	size_t copied;
263 	u16 send_blksz;
264 	u16 send_blocks;
265 	int ret;
266 
267 	WARN_ON(len > host->data_max);
268 
269 	operation = gb_operation_create(host->connection, GB_SDIO_TYPE_TRANSFER,
270 					len + sizeof(*request),
271 					sizeof(*response), GFP_KERNEL);
272 	if (!operation)
273 		return -ENOMEM;
274 
275 	request = operation->request->payload;
276 	request->data_flags = data->flags >> 8;
277 	request->data_blocks = cpu_to_le16(nblocks);
278 	request->data_blksz = cpu_to_le16(data->blksz);
279 
280 	copied = sg_pcopy_to_buffer(sg, sg_len, &request->data[0], len, skip);
281 
282 	if (copied != len) {
283 		ret = -EINVAL;
284 		goto err_put_operation;
285 	}
286 
287 	ret = gb_operation_request_send_sync(operation);
288 	if (ret < 0)
289 		goto err_put_operation;
290 
291 	response = operation->response->payload;
292 
293 	send_blocks = le16_to_cpu(response->data_blocks);
294 	send_blksz = le16_to_cpu(response->data_blksz);
295 
296 	if (len != send_blksz * send_blocks) {
297 		dev_err(mmc_dev(host->mmc), "send: size received: %zu != %d\n",
298 			len, send_blksz * send_blocks);
299 		ret = -EINVAL;
300 	}
301 
302 err_put_operation:
303 	gb_operation_put(operation);
304 
305 	return ret;
306 }
307 
308 static int _gb_sdio_recv(struct gb_sdio_host *host, struct mmc_data *data,
309 			 size_t len, u16 nblocks, off_t skip)
310 {
311 	struct gb_sdio_transfer_request *request;
312 	struct gb_sdio_transfer_response *response;
313 	struct gb_operation *operation;
314 	struct scatterlist *sg = data->sg;
315 	unsigned int sg_len = data->sg_len;
316 	size_t copied;
317 	u16 recv_blksz;
318 	u16 recv_blocks;
319 	int ret;
320 
321 	WARN_ON(len > host->data_max);
322 
323 	operation = gb_operation_create(host->connection, GB_SDIO_TYPE_TRANSFER,
324 					sizeof(*request),
325 					len + sizeof(*response), GFP_KERNEL);
326 	if (!operation)
327 		return -ENOMEM;
328 
329 	request = operation->request->payload;
330 	request->data_flags = data->flags >> 8;
331 	request->data_blocks = cpu_to_le16(nblocks);
332 	request->data_blksz = cpu_to_le16(data->blksz);
333 
334 	ret = gb_operation_request_send_sync(operation);
335 	if (ret < 0)
336 		goto err_put_operation;
337 
338 	response = operation->response->payload;
339 	recv_blocks = le16_to_cpu(response->data_blocks);
340 	recv_blksz = le16_to_cpu(response->data_blksz);
341 
342 	if (len != recv_blksz * recv_blocks) {
343 		dev_err(mmc_dev(host->mmc), "recv: size received: %d != %zu\n",
344 			recv_blksz * recv_blocks, len);
345 		ret = -EINVAL;
346 		goto err_put_operation;
347 	}
348 
349 	copied = sg_pcopy_from_buffer(sg, sg_len, &response->data[0], len,
350 				      skip);
351 	if (copied != len)
352 		ret = -EINVAL;
353 
354 err_put_operation:
355 	gb_operation_put(operation);
356 
357 	return ret;
358 }
359 
360 static int gb_sdio_transfer(struct gb_sdio_host *host, struct mmc_data *data)
361 {
362 	size_t left, len;
363 	off_t skip = 0;
364 	int ret = 0;
365 	u16 nblocks;
366 
367 	if (single_op(data->mrq->cmd) && data->blocks > 1) {
368 		ret = -ETIMEDOUT;
369 		goto out;
370 	}
371 
372 	left = data->blksz * data->blocks;
373 
374 	while (left) {
375 		/* check is a stop transmission is pending */
376 		spin_lock(&host->xfer);
377 		if (host->xfer_stop) {
378 			host->xfer_stop = false;
379 			spin_unlock(&host->xfer);
380 			ret = -EINTR;
381 			goto out;
382 		}
383 		spin_unlock(&host->xfer);
384 		len = min(left, host->data_max);
385 		nblocks = len / data->blksz;
386 		len = nblocks * data->blksz;
387 
388 		if (data->flags & MMC_DATA_READ) {
389 			ret = _gb_sdio_recv(host, data, len, nblocks, skip);
390 			if (ret < 0)
391 				goto out;
392 		} else {
393 			ret = _gb_sdio_send(host, data, len, nblocks, skip);
394 			if (ret < 0)
395 				goto out;
396 		}
397 		data->bytes_xfered += len;
398 		left -= len;
399 		skip += len;
400 	}
401 
402 out:
403 	data->error = ret;
404 	return ret;
405 }
406 
407 static int gb_sdio_command(struct gb_sdio_host *host, struct mmc_command *cmd)
408 {
409 	struct gb_sdio_command_request request = {0};
410 	struct gb_sdio_command_response response;
411 	struct mmc_data *data = host->mrq->data;
412 	unsigned int timeout_ms;
413 	u8 cmd_flags;
414 	u8 cmd_type;
415 	int i;
416 	int ret;
417 
418 	switch (mmc_resp_type(cmd)) {
419 	case MMC_RSP_NONE:
420 		cmd_flags = GB_SDIO_RSP_NONE;
421 		break;
422 	case MMC_RSP_R1:
423 		cmd_flags = GB_SDIO_RSP_R1_R5_R6_R7;
424 		break;
425 	case MMC_RSP_R1B:
426 		cmd_flags = GB_SDIO_RSP_R1B;
427 		break;
428 	case MMC_RSP_R2:
429 		cmd_flags = GB_SDIO_RSP_R2;
430 		break;
431 	case MMC_RSP_R3:
432 		cmd_flags = GB_SDIO_RSP_R3_R4;
433 		break;
434 	default:
435 		dev_err(mmc_dev(host->mmc), "cmd flag invalid 0x%04x\n",
436 			mmc_resp_type(cmd));
437 		ret = -EINVAL;
438 		goto out;
439 	}
440 
441 	switch (mmc_cmd_type(cmd)) {
442 	case MMC_CMD_BC:
443 		cmd_type = GB_SDIO_CMD_BC;
444 		break;
445 	case MMC_CMD_BCR:
446 		cmd_type = GB_SDIO_CMD_BCR;
447 		break;
448 	case MMC_CMD_AC:
449 		cmd_type = GB_SDIO_CMD_AC;
450 		break;
451 	case MMC_CMD_ADTC:
452 		cmd_type = GB_SDIO_CMD_ADTC;
453 		break;
454 	default:
455 		dev_err(mmc_dev(host->mmc), "cmd type invalid 0x%04x\n",
456 			mmc_cmd_type(cmd));
457 		ret = -EINVAL;
458 		goto out;
459 	}
460 
461 	request.cmd = cmd->opcode;
462 	request.cmd_flags = cmd_flags;
463 	request.cmd_type = cmd_type;
464 	request.cmd_arg = cpu_to_le32(cmd->arg);
465 	/* some controllers need to know at command time data details */
466 	if (data) {
467 		request.data_blocks = cpu_to_le16(data->blocks);
468 		request.data_blksz = cpu_to_le16(data->blksz);
469 	}
470 
471 	timeout_ms = cmd->busy_timeout ? cmd->busy_timeout :
472 		GB_OPERATION_TIMEOUT_DEFAULT;
473 
474 	ret = gb_operation_sync_timeout(host->connection, GB_SDIO_TYPE_COMMAND,
475 					&request, sizeof(request), &response,
476 					sizeof(response), timeout_ms);
477 	if (ret < 0)
478 		goto out;
479 
480 	/* no response expected */
481 	if (cmd_flags == GB_SDIO_RSP_NONE)
482 		goto out;
483 
484 	/* long response expected */
485 	if (cmd_flags & GB_SDIO_RSP_R2)
486 		for (i = 0; i < 4; i++)
487 			cmd->resp[i] = le32_to_cpu(response.resp[i]);
488 	else
489 		cmd->resp[0] = le32_to_cpu(response.resp[0]);
490 
491 out:
492 	cmd->error = ret;
493 	return ret;
494 }
495 
496 static void gb_sdio_mrq_work(struct work_struct *work)
497 {
498 	struct gb_sdio_host *host;
499 	struct mmc_request *mrq;
500 	int ret;
501 
502 	host = container_of(work, struct gb_sdio_host, mrqwork);
503 
504 	ret = gbphy_runtime_get_sync(host->gbphy_dev);
505 	if (ret)
506 		return;
507 
508 	mutex_lock(&host->lock);
509 	mrq = host->mrq;
510 	if (!mrq) {
511 		mutex_unlock(&host->lock);
512 		gbphy_runtime_put_autosuspend(host->gbphy_dev);
513 		dev_err(mmc_dev(host->mmc), "mmc request is NULL");
514 		return;
515 	}
516 
517 	if (host->removed) {
518 		mrq->cmd->error = -ESHUTDOWN;
519 		goto done;
520 	}
521 
522 	if (mrq->sbc) {
523 		ret = gb_sdio_command(host, mrq->sbc);
524 		if (ret < 0)
525 			goto done;
526 	}
527 
528 	ret = gb_sdio_command(host, mrq->cmd);
529 	if (ret < 0)
530 		goto done;
531 
532 	if (mrq->data) {
533 		ret = gb_sdio_transfer(host, mrq->data);
534 		if (ret < 0)
535 			goto done;
536 	}
537 
538 	if (mrq->stop) {
539 		ret = gb_sdio_command(host, mrq->stop);
540 		if (ret < 0)
541 			goto done;
542 	}
543 
544 done:
545 	host->mrq = NULL;
546 	mutex_unlock(&host->lock);
547 	mmc_request_done(host->mmc, mrq);
548 	gbphy_runtime_put_autosuspend(host->gbphy_dev);
549 }
550 
551 static void gb_mmc_request(struct mmc_host *mmc, struct mmc_request *mrq)
552 {
553 	struct gb_sdio_host *host = mmc_priv(mmc);
554 	struct mmc_command *cmd = mrq->cmd;
555 
556 	/* Check if it is a cancel to ongoing transfer */
557 	if (cmd->opcode == MMC_STOP_TRANSMISSION) {
558 		spin_lock(&host->xfer);
559 		host->xfer_stop = true;
560 		spin_unlock(&host->xfer);
561 	}
562 
563 	mutex_lock(&host->lock);
564 
565 	WARN_ON(host->mrq);
566 	host->mrq = mrq;
567 
568 	if (host->removed) {
569 		mrq->cmd->error = -ESHUTDOWN;
570 		goto out;
571 	}
572 	if (!host->card_present) {
573 		mrq->cmd->error = -ENOMEDIUM;
574 		goto out;
575 	}
576 
577 	queue_work(host->mrq_workqueue, &host->mrqwork);
578 
579 	mutex_unlock(&host->lock);
580 	return;
581 
582 out:
583 	host->mrq = NULL;
584 	mutex_unlock(&host->lock);
585 	mmc_request_done(mmc, mrq);
586 }
587 
588 static void gb_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
589 {
590 	struct gb_sdio_host *host = mmc_priv(mmc);
591 	struct gb_sdio_set_ios_request request;
592 	int ret;
593 	u8 power_mode;
594 	u8 bus_width;
595 	u8 timing;
596 	u8 signal_voltage;
597 	u8 drv_type;
598 	u32 vdd = 0;
599 
600 	mutex_lock(&host->lock);
601 	request.clock = cpu_to_le32(ios->clock);
602 
603 	if (ios->vdd)
604 		vdd = 1 << (ios->vdd - GB_SDIO_VDD_SHIFT);
605 	request.vdd = cpu_to_le32(vdd);
606 
607 	request.bus_mode = ios->bus_mode == MMC_BUSMODE_OPENDRAIN ?
608 			    GB_SDIO_BUSMODE_OPENDRAIN :
609 			    GB_SDIO_BUSMODE_PUSHPULL;
610 
611 	switch (ios->power_mode) {
612 	case MMC_POWER_OFF:
613 	default:
614 		power_mode = GB_SDIO_POWER_OFF;
615 		break;
616 	case MMC_POWER_UP:
617 		power_mode = GB_SDIO_POWER_UP;
618 		break;
619 	case MMC_POWER_ON:
620 		power_mode = GB_SDIO_POWER_ON;
621 		break;
622 	case MMC_POWER_UNDEFINED:
623 		power_mode = GB_SDIO_POWER_UNDEFINED;
624 		break;
625 	}
626 	request.power_mode = power_mode;
627 
628 	switch (ios->bus_width) {
629 	case MMC_BUS_WIDTH_1:
630 		bus_width = GB_SDIO_BUS_WIDTH_1;
631 		break;
632 	case MMC_BUS_WIDTH_4:
633 	default:
634 		bus_width = GB_SDIO_BUS_WIDTH_4;
635 		break;
636 	case MMC_BUS_WIDTH_8:
637 		bus_width = GB_SDIO_BUS_WIDTH_8;
638 		break;
639 	}
640 	request.bus_width = bus_width;
641 
642 	switch (ios->timing) {
643 	case MMC_TIMING_LEGACY:
644 	default:
645 		timing = GB_SDIO_TIMING_LEGACY;
646 		break;
647 	case MMC_TIMING_MMC_HS:
648 		timing = GB_SDIO_TIMING_MMC_HS;
649 		break;
650 	case MMC_TIMING_SD_HS:
651 		timing = GB_SDIO_TIMING_SD_HS;
652 		break;
653 	case MMC_TIMING_UHS_SDR12:
654 		timing = GB_SDIO_TIMING_UHS_SDR12;
655 		break;
656 	case MMC_TIMING_UHS_SDR25:
657 		timing = GB_SDIO_TIMING_UHS_SDR25;
658 		break;
659 	case MMC_TIMING_UHS_SDR50:
660 		timing = GB_SDIO_TIMING_UHS_SDR50;
661 		break;
662 	case MMC_TIMING_UHS_SDR104:
663 		timing = GB_SDIO_TIMING_UHS_SDR104;
664 		break;
665 	case MMC_TIMING_UHS_DDR50:
666 		timing = GB_SDIO_TIMING_UHS_DDR50;
667 		break;
668 	case MMC_TIMING_MMC_DDR52:
669 		timing = GB_SDIO_TIMING_MMC_DDR52;
670 		break;
671 	case MMC_TIMING_MMC_HS200:
672 		timing = GB_SDIO_TIMING_MMC_HS200;
673 		break;
674 	case MMC_TIMING_MMC_HS400:
675 		timing = GB_SDIO_TIMING_MMC_HS400;
676 		break;
677 	}
678 	request.timing = timing;
679 
680 	switch (ios->signal_voltage) {
681 	case MMC_SIGNAL_VOLTAGE_330:
682 		signal_voltage = GB_SDIO_SIGNAL_VOLTAGE_330;
683 		break;
684 	case MMC_SIGNAL_VOLTAGE_180:
685 	default:
686 		signal_voltage = GB_SDIO_SIGNAL_VOLTAGE_180;
687 		break;
688 	case MMC_SIGNAL_VOLTAGE_120:
689 		signal_voltage = GB_SDIO_SIGNAL_VOLTAGE_120;
690 		break;
691 	}
692 	request.signal_voltage = signal_voltage;
693 
694 	switch (ios->drv_type) {
695 	case MMC_SET_DRIVER_TYPE_A:
696 		drv_type = GB_SDIO_SET_DRIVER_TYPE_A;
697 		break;
698 	case MMC_SET_DRIVER_TYPE_C:
699 		drv_type = GB_SDIO_SET_DRIVER_TYPE_C;
700 		break;
701 	case MMC_SET_DRIVER_TYPE_D:
702 		drv_type = GB_SDIO_SET_DRIVER_TYPE_D;
703 		break;
704 	case MMC_SET_DRIVER_TYPE_B:
705 	default:
706 		drv_type = GB_SDIO_SET_DRIVER_TYPE_B;
707 		break;
708 	}
709 	request.drv_type = drv_type;
710 
711 	ret = gb_sdio_set_ios(host, &request);
712 	if (ret < 0)
713 		goto out;
714 
715 	memcpy(&mmc->ios, ios, sizeof(mmc->ios));
716 
717 out:
718 	mutex_unlock(&host->lock);
719 }
720 
721 static int gb_mmc_get_ro(struct mmc_host *mmc)
722 {
723 	struct gb_sdio_host *host = mmc_priv(mmc);
724 
725 	mutex_lock(&host->lock);
726 	if (host->removed) {
727 		mutex_unlock(&host->lock);
728 		return -ESHUTDOWN;
729 	}
730 	mutex_unlock(&host->lock);
731 
732 	return host->read_only;
733 }
734 
735 static int gb_mmc_get_cd(struct mmc_host *mmc)
736 {
737 	struct gb_sdio_host *host = mmc_priv(mmc);
738 
739 	mutex_lock(&host->lock);
740 	if (host->removed) {
741 		mutex_unlock(&host->lock);
742 		return -ESHUTDOWN;
743 	}
744 	mutex_unlock(&host->lock);
745 
746 	return host->card_present;
747 }
748 
749 static int gb_mmc_switch_voltage(struct mmc_host *mmc, struct mmc_ios *ios)
750 {
751 	return 0;
752 }
753 
754 static const struct mmc_host_ops gb_sdio_ops = {
755 	.request	= gb_mmc_request,
756 	.set_ios	= gb_mmc_set_ios,
757 	.get_ro		= gb_mmc_get_ro,
758 	.get_cd		= gb_mmc_get_cd,
759 	.start_signal_voltage_switch	= gb_mmc_switch_voltage,
760 };
761 
762 static int gb_sdio_probe(struct gbphy_device *gbphy_dev,
763 			 const struct gbphy_device_id *id)
764 {
765 	struct gb_connection *connection;
766 	struct mmc_host *mmc;
767 	struct gb_sdio_host *host;
768 	int ret = 0;
769 
770 	mmc = mmc_alloc_host(sizeof(*host), &gbphy_dev->dev);
771 	if (!mmc)
772 		return -ENOMEM;
773 
774 	connection = gb_connection_create(gbphy_dev->bundle,
775 					  le16_to_cpu(gbphy_dev->cport_desc->id),
776 					  gb_sdio_request_handler);
777 	if (IS_ERR(connection)) {
778 		ret = PTR_ERR(connection);
779 		goto exit_mmc_free;
780 	}
781 
782 	host = mmc_priv(mmc);
783 	host->mmc = mmc;
784 	host->removed = true;
785 
786 	host->connection = connection;
787 	gb_connection_set_data(connection, host);
788 	host->gbphy_dev = gbphy_dev;
789 	gb_gbphy_set_data(gbphy_dev, host);
790 
791 	ret = gb_connection_enable_tx(connection);
792 	if (ret)
793 		goto exit_connection_destroy;
794 
795 	ret = gb_sdio_get_caps(host);
796 	if (ret < 0)
797 		goto exit_connection_disable;
798 
799 	mmc->ops = &gb_sdio_ops;
800 
801 	mmc->max_segs = host->mmc->max_blk_count;
802 
803 	/* for now we make a map 1:1 between max request and segment size */
804 	mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count;
805 	mmc->max_seg_size = mmc->max_req_size;
806 
807 	mutex_init(&host->lock);
808 	spin_lock_init(&host->xfer);
809 	host->mrq_workqueue = alloc_workqueue("mmc-%s", 0, 1,
810 					      dev_name(&gbphy_dev->dev));
811 	if (!host->mrq_workqueue) {
812 		ret = -ENOMEM;
813 		goto exit_connection_disable;
814 	}
815 	INIT_WORK(&host->mrqwork, gb_sdio_mrq_work);
816 
817 	ret = gb_connection_enable(connection);
818 	if (ret)
819 		goto exit_wq_destroy;
820 
821 	ret = mmc_add_host(mmc);
822 	if (ret < 0)
823 		goto exit_wq_destroy;
824 	host->removed = false;
825 	ret = _gb_sdio_process_events(host, host->queued_events);
826 	host->queued_events = 0;
827 
828 	gbphy_runtime_put_autosuspend(gbphy_dev);
829 
830 	return ret;
831 
832 exit_wq_destroy:
833 	destroy_workqueue(host->mrq_workqueue);
834 exit_connection_disable:
835 	gb_connection_disable(connection);
836 exit_connection_destroy:
837 	gb_connection_destroy(connection);
838 exit_mmc_free:
839 	mmc_free_host(mmc);
840 
841 	return ret;
842 }
843 
844 static void gb_sdio_remove(struct gbphy_device *gbphy_dev)
845 {
846 	struct gb_sdio_host *host = gb_gbphy_get_data(gbphy_dev);
847 	struct gb_connection *connection = host->connection;
848 	struct mmc_host *mmc;
849 	int ret;
850 
851 	ret = gbphy_runtime_get_sync(gbphy_dev);
852 	if (ret)
853 		gbphy_runtime_get_noresume(gbphy_dev);
854 
855 	mutex_lock(&host->lock);
856 	host->removed = true;
857 	mmc = host->mmc;
858 	gb_connection_set_data(connection, NULL);
859 	mutex_unlock(&host->lock);
860 
861 	destroy_workqueue(host->mrq_workqueue);
862 	gb_connection_disable_rx(connection);
863 	mmc_remove_host(mmc);
864 	gb_connection_disable(connection);
865 	gb_connection_destroy(connection);
866 	mmc_free_host(mmc);
867 }
868 
869 static const struct gbphy_device_id gb_sdio_id_table[] = {
870 	{ GBPHY_PROTOCOL(GREYBUS_PROTOCOL_SDIO) },
871 	{ },
872 };
873 MODULE_DEVICE_TABLE(gbphy, gb_sdio_id_table);
874 
875 static struct gbphy_driver sdio_driver = {
876 	.name		= "sdio",
877 	.probe		= gb_sdio_probe,
878 	.remove		= gb_sdio_remove,
879 	.id_table	= gb_sdio_id_table,
880 };
881 
882 module_gbphy_driver(sdio_driver);
883 MODULE_LICENSE("GPL v2");
884