xref: /illumos-gate/usr/src/uts/common/io/comstar/lu/stmf_sbd/sbd_scsi.c (revision 5f8171005a0c33f3c67f7da52d41c2362c3fd891)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright 2010 Sun Microsystems, Inc.  All rights reserved.
23  * Use is subject to license terms.
24  */
25 
26 #include <sys/conf.h>
27 #include <sys/file.h>
28 #include <sys/ddi.h>
29 #include <sys/sunddi.h>
30 #include <sys/modctl.h>
31 #include <sys/scsi/scsi.h>
32 #include <sys/scsi/impl/scsi_reset_notify.h>
33 #include <sys/scsi/generic/mode.h>
34 #include <sys/disp.h>
35 #include <sys/byteorder.h>
36 #include <sys/atomic.h>
37 #include <sys/sdt.h>
38 #include <sys/dkio.h>
39 
40 #include <stmf.h>
41 #include <lpif.h>
42 #include <portif.h>
43 #include <stmf_ioctl.h>
44 #include <stmf_sbd.h>
45 #include <stmf_sbd_ioctl.h>
46 #include <sbd_impl.h>
47 
48 #define	SCSI2_CONFLICT_FREE_CMDS(cdb)	( \
49 	/* ----------------------- */                                      \
50 	/* Refer Both		   */                                      \
51 	/* SPC-2 (rev 20) Table 10 */                                      \
52 	/* SPC-3 (rev 23) Table 31 */                                      \
53 	/* ----------------------- */                                      \
54 	((cdb[0]) == SCMD_INQUIRY)					|| \
55 	((cdb[0]) == SCMD_LOG_SENSE_G1)					|| \
56 	((cdb[0]) == SCMD_RELEASE)					|| \
57 	((cdb[0]) == SCMD_RELEASE_G1)					|| \
58 	((cdb[0]) == SCMD_REPORT_LUNS)					|| \
59 	((cdb[0]) == SCMD_REQUEST_SENSE)				|| \
60 	/* PREVENT ALLOW MEDIUM REMOVAL with prevent == 0 */               \
61 	((((cdb[0]) == SCMD_DOORLOCK) && (((cdb[4]) & 0x3) == 0)))	|| \
62 	/* SERVICE ACTION IN with READ MEDIA SERIAL NUMBER (0x01) */       \
63 	(((cdb[0]) == SCMD_SVC_ACTION_IN_G5) && (                          \
64 	    ((cdb[1]) & 0x1F) == 0x01))					|| \
65 	/* MAINTENANCE IN with service actions REPORT ALIASES (0x0Bh) */   \
66 	/* REPORT DEVICE IDENTIFIER (0x05)  REPORT PRIORITY (0x0Eh) */     \
67 	/* REPORT TARGET PORT GROUPS (0x0A) REPORT TIMESTAMP (0x0F) */     \
68 	(((cdb[0]) == SCMD_MAINTENANCE_IN) && (                            \
69 	    (((cdb[1]) & 0x1F) == 0x0B) ||                                 \
70 	    (((cdb[1]) & 0x1F) == 0x05) ||                                 \
71 	    (((cdb[1]) & 0x1F) == 0x0E) ||                                 \
72 	    (((cdb[1]) & 0x1F) == 0x0A) ||                                 \
73 	    (((cdb[1]) & 0x1F) == 0x0F)))				|| \
74 	/* ----------------------- */                                      \
75 	/* SBC-3 (rev 17) Table 3  */                                      \
76 	/* ----------------------- */                                      \
77 	/* READ CAPACITY(10) */                                            \
78 	((cdb[0]) == SCMD_READ_CAPACITY)				|| \
79 	/* READ CAPACITY(16) */                                            \
80 	(((cdb[0]) == SCMD_SVC_ACTION_IN_G4) && (                          \
81 	    ((cdb[1]) & 0x1F) == 0x10))					|| \
82 	/* START STOP UNIT with START bit 0 and POWER CONDITION 0  */      \
83 	(((cdb[0]) == SCMD_START_STOP) && (                                \
84 	    (((cdb[4]) & 0xF0) == 0) && (((cdb[4]) & 0x01) == 0))))
85 /* End of SCSI2_CONFLICT_FREE_CMDS */
86 
87 stmf_status_t sbd_lu_reset_state(stmf_lu_t *lu);
88 static void sbd_handle_sync_cache(struct scsi_task *task,
89     struct stmf_data_buf *initial_dbuf);
90 void sbd_handle_read_xfer_completion(struct scsi_task *task,
91     sbd_cmd_t *scmd, struct stmf_data_buf *dbuf);
92 void sbd_handle_short_write_xfer_completion(scsi_task_t *task,
93     stmf_data_buf_t *dbuf);
94 void sbd_handle_short_write_transfers(scsi_task_t *task,
95     stmf_data_buf_t *dbuf, uint32_t cdb_xfer_size);
96 static void sbd_handle_sync_cache(struct scsi_task *task,
97     struct stmf_data_buf *initial_dbuf);
98 void sbd_handle_mode_select_xfer(scsi_task_t *task, uint8_t *buf,
99     uint32_t buflen);
100 void sbd_handle_mode_select(scsi_task_t *task, stmf_data_buf_t *dbuf);
101 
102 extern void sbd_pgr_initialize_it(scsi_task_t *);
103 extern int sbd_pgr_reservation_conflict(scsi_task_t *);
104 extern void sbd_pgr_reset(sbd_lu_t *);
105 extern void sbd_pgr_remove_it_handle(sbd_lu_t *, sbd_it_data_t *);
106 extern void sbd_handle_pgr_in_cmd(scsi_task_t *, stmf_data_buf_t *);
107 extern void sbd_handle_pgr_out_cmd(scsi_task_t *, stmf_data_buf_t *);
108 extern void sbd_handle_pgr_out_data(scsi_task_t *, stmf_data_buf_t *);
109 /*
110  * IMPORTANT NOTE:
111  * =================
112  * The whole world here is based on the assumption that everything within
113  * a scsi task executes in a single threaded manner, even the aborts.
114  * Dont ever change that. There wont be any performance gain but there
115  * will be tons of race conditions.
116  */
117 
118 void
119 sbd_do_read_xfer(struct scsi_task *task, sbd_cmd_t *scmd,
120 					struct stmf_data_buf *dbuf)
121 {
122 	sbd_lu_t *sl = (sbd_lu_t *)task->task_lu->lu_provider_private;
123 	uint64_t laddr;
124 	uint32_t len, buflen, iolen;
125 	int ndx;
126 	int bufs_to_take;
127 
128 	/* Lets try not to hog all the buffers the port has. */
129 	bufs_to_take = ((task->task_max_nbufs > 2) &&
130 	    (task->task_cmd_xfer_length < (32 * 1024))) ? 2 :
131 	    task->task_max_nbufs;
132 
133 	len = scmd->len > dbuf->db_buf_size ? dbuf->db_buf_size : scmd->len;
134 	laddr = scmd->addr + scmd->current_ro;
135 
136 	for (buflen = 0, ndx = 0; (buflen < len) &&
137 	    (ndx < dbuf->db_sglist_length); ndx++) {
138 		iolen = min(len - buflen, dbuf->db_sglist[ndx].seg_length);
139 		if (iolen == 0)
140 			break;
141 		if (sbd_data_read(sl, laddr, (uint64_t)iolen,
142 		    dbuf->db_sglist[ndx].seg_addr) != STMF_SUCCESS) {
143 			scmd->flags |= SBD_SCSI_CMD_XFER_FAIL;
144 			/* Do not need to do xfer anymore, just complete it */
145 			dbuf->db_data_size = 0;
146 			dbuf->db_xfer_status = STMF_SUCCESS;
147 			sbd_handle_read_xfer_completion(task, scmd, dbuf);
148 			return;
149 		}
150 		buflen += iolen;
151 		laddr += (uint64_t)iolen;
152 	}
153 	dbuf->db_relative_offset = scmd->current_ro;
154 	dbuf->db_data_size = buflen;
155 	dbuf->db_flags = DB_DIRECTION_TO_RPORT;
156 	(void) stmf_xfer_data(task, dbuf, 0);
157 	scmd->len -= buflen;
158 	scmd->current_ro += buflen;
159 	if (scmd->len && (scmd->nbufs < bufs_to_take)) {
160 		uint32_t maxsize, minsize, old_minsize;
161 
162 		maxsize = (scmd->len > (128*1024)) ? 128*1024 : scmd->len;
163 		minsize = maxsize >> 2;
164 		do {
165 			/*
166 			 * A bad port implementation can keep on failing the
167 			 * the request but keep on sending us a false
168 			 * minsize.
169 			 */
170 			old_minsize = minsize;
171 			dbuf = stmf_alloc_dbuf(task, maxsize, &minsize, 0);
172 		} while ((dbuf == NULL) && (old_minsize > minsize) &&
173 		    (minsize >= 512));
174 		if (dbuf == NULL) {
175 			return;
176 		}
177 		scmd->nbufs++;
178 		sbd_do_read_xfer(task, scmd, dbuf);
179 	}
180 }
181 
182 void
183 sbd_handle_read_xfer_completion(struct scsi_task *task, sbd_cmd_t *scmd,
184 				struct stmf_data_buf *dbuf)
185 {
186 	if (dbuf->db_xfer_status != STMF_SUCCESS) {
187 		stmf_abort(STMF_QUEUE_TASK_ABORT, task,
188 		    dbuf->db_xfer_status, NULL);
189 		return;
190 	}
191 	task->task_nbytes_transferred += dbuf->db_data_size;
192 	if (scmd->len == 0 || scmd->flags & SBD_SCSI_CMD_XFER_FAIL) {
193 		stmf_free_dbuf(task, dbuf);
194 		scmd->nbufs--;
195 		if (scmd->nbufs)
196 			return;	/* wait for all buffers to complete */
197 		scmd->flags &= ~SBD_SCSI_CMD_ACTIVE;
198 		if (scmd->flags & SBD_SCSI_CMD_XFER_FAIL)
199 			stmf_scsilib_send_status(task, STATUS_CHECK,
200 			    STMF_SAA_READ_ERROR);
201 		else
202 			stmf_scsilib_send_status(task, STATUS_GOOD, 0);
203 		return;
204 	}
205 	if (dbuf->db_flags & DB_DONT_REUSE) {
206 		/* allocate new dbuf */
207 		uint32_t maxsize, minsize, old_minsize;
208 		stmf_free_dbuf(task, dbuf);
209 
210 		maxsize = (scmd->len > (128*1024)) ? 128*1024 : scmd->len;
211 		minsize = maxsize >> 2;
212 		do {
213 			old_minsize = minsize;
214 			dbuf = stmf_alloc_dbuf(task, maxsize, &minsize, 0);
215 		} while ((dbuf == NULL) && (old_minsize > minsize) &&
216 		    (minsize >= 512));
217 		if (dbuf == NULL) {
218 			scmd->nbufs --;
219 			if (scmd->nbufs == 0) {
220 				stmf_abort(STMF_QUEUE_TASK_ABORT, task,
221 				    STMF_ALLOC_FAILURE, NULL);
222 			}
223 			return;
224 		}
225 	}
226 	sbd_do_read_xfer(task, scmd, dbuf);
227 }
228 
229 void
230 sbd_handle_read(struct scsi_task *task, struct stmf_data_buf *initial_dbuf)
231 {
232 	uint64_t lba, laddr;
233 	uint32_t len;
234 	uint8_t op = task->task_cdb[0];
235 	sbd_lu_t *sl = (sbd_lu_t *)task->task_lu->lu_provider_private;
236 	sbd_cmd_t *scmd;
237 	stmf_data_buf_t *dbuf;
238 	int fast_path;
239 
240 	if (op == SCMD_READ) {
241 		lba = READ_SCSI21(&task->task_cdb[1], uint64_t);
242 		len = (uint32_t)task->task_cdb[4];
243 
244 		if (len == 0) {
245 			len = 256;
246 		}
247 	} else if (op == SCMD_READ_G1) {
248 		lba = READ_SCSI32(&task->task_cdb[2], uint64_t);
249 		len = READ_SCSI16(&task->task_cdb[7], uint32_t);
250 	} else if (op == SCMD_READ_G5) {
251 		lba = READ_SCSI32(&task->task_cdb[2], uint64_t);
252 		len = READ_SCSI32(&task->task_cdb[6], uint32_t);
253 	} else if (op == SCMD_READ_G4) {
254 		lba = READ_SCSI64(&task->task_cdb[2], uint64_t);
255 		len = READ_SCSI32(&task->task_cdb[10], uint32_t);
256 	} else {
257 		stmf_scsilib_send_status(task, STATUS_CHECK,
258 		    STMF_SAA_INVALID_OPCODE);
259 		return;
260 	}
261 
262 	laddr = lba << sl->sl_data_blocksize_shift;
263 	len <<= sl->sl_data_blocksize_shift;
264 
265 	if ((laddr + (uint64_t)len) > sl->sl_lu_size) {
266 		stmf_scsilib_send_status(task, STATUS_CHECK,
267 		    STMF_SAA_LBA_OUT_OF_RANGE);
268 		return;
269 	}
270 
271 	task->task_cmd_xfer_length = len;
272 	if (task->task_additional_flags & TASK_AF_NO_EXPECTED_XFER_LENGTH) {
273 		task->task_expected_xfer_length = len;
274 	}
275 
276 	if (len != task->task_expected_xfer_length) {
277 		fast_path = 0;
278 		len = (len > task->task_expected_xfer_length) ?
279 		    task->task_expected_xfer_length : len;
280 	} else {
281 		fast_path = 1;
282 	}
283 
284 	if (len == 0) {
285 		stmf_scsilib_send_status(task, STATUS_GOOD, 0);
286 		return;
287 	}
288 
289 	if (initial_dbuf == NULL) {
290 		uint32_t maxsize, minsize, old_minsize;
291 
292 		maxsize = (len > (128*1024)) ? 128*1024 : len;
293 		minsize = maxsize >> 2;
294 		do {
295 			old_minsize = minsize;
296 			initial_dbuf = stmf_alloc_dbuf(task, maxsize,
297 			    &minsize, 0);
298 		} while ((initial_dbuf == NULL) && (old_minsize > minsize) &&
299 		    (minsize >= 512));
300 		if (initial_dbuf == NULL) {
301 			stmf_scsilib_send_status(task, STATUS_QFULL, 0);
302 			return;
303 		}
304 	}
305 	dbuf = initial_dbuf;
306 
307 	if ((dbuf->db_buf_size >= len) && fast_path &&
308 	    (dbuf->db_sglist_length == 1)) {
309 		if (sbd_data_read(sl, laddr, (uint64_t)len,
310 		    dbuf->db_sglist[0].seg_addr) == STMF_SUCCESS) {
311 			dbuf->db_relative_offset = 0;
312 			dbuf->db_data_size = len;
313 			dbuf->db_flags = DB_SEND_STATUS_GOOD |
314 			    DB_DIRECTION_TO_RPORT;
315 			(void) stmf_xfer_data(task, dbuf, STMF_IOF_LU_DONE);
316 		} else {
317 			stmf_scsilib_send_status(task, STATUS_CHECK,
318 			    STMF_SAA_READ_ERROR);
319 		}
320 		return;
321 	}
322 
323 	if (task->task_lu_private) {
324 		scmd = (sbd_cmd_t *)task->task_lu_private;
325 	} else {
326 		scmd = (sbd_cmd_t *)kmem_alloc(sizeof (sbd_cmd_t), KM_SLEEP);
327 		task->task_lu_private = scmd;
328 	}
329 	scmd->flags = SBD_SCSI_CMD_ACTIVE;
330 	scmd->cmd_type = SBD_CMD_SCSI_READ;
331 	scmd->nbufs = 1;
332 	scmd->addr = laddr;
333 	scmd->len = len;
334 	scmd->current_ro = 0;
335 
336 	sbd_do_read_xfer(task, scmd, dbuf);
337 }
338 
339 void
340 sbd_do_write_xfer(struct scsi_task *task, sbd_cmd_t *scmd,
341 					struct stmf_data_buf *dbuf)
342 {
343 	uint32_t len;
344 	int bufs_to_take;
345 
346 	/* Lets try not to hog all the buffers the port has. */
347 	bufs_to_take = ((task->task_max_nbufs > 2) &&
348 	    (task->task_cmd_xfer_length < (32 * 1024))) ? 2 :
349 	    task->task_max_nbufs;
350 
351 	len = scmd->len > dbuf->db_buf_size ? dbuf->db_buf_size : scmd->len;
352 
353 	dbuf->db_relative_offset = scmd->current_ro;
354 	dbuf->db_data_size = len;
355 	dbuf->db_flags = DB_DIRECTION_FROM_RPORT;
356 	(void) stmf_xfer_data(task, dbuf, 0);
357 	scmd->len -= len;
358 	scmd->current_ro += len;
359 	if (scmd->len && (scmd->nbufs < bufs_to_take)) {
360 		uint32_t maxsize, minsize, old_minsize;
361 
362 		maxsize = (scmd->len > (128*1024)) ? 128*1024 : scmd->len;
363 		minsize = maxsize >> 2;
364 		do {
365 			old_minsize = minsize;
366 			dbuf = stmf_alloc_dbuf(task, maxsize, &minsize, 0);
367 		} while ((dbuf == NULL) && (old_minsize > minsize) &&
368 		    (minsize >= 512));
369 		if (dbuf == NULL) {
370 			return;
371 		}
372 		scmd->nbufs++;
373 		sbd_do_write_xfer(task, scmd, dbuf);
374 	}
375 }
376 
377 void
378 sbd_handle_write_xfer_completion(struct scsi_task *task, sbd_cmd_t *scmd,
379     struct stmf_data_buf *dbuf, uint8_t dbuf_reusable)
380 {
381 	sbd_lu_t *sl = (sbd_lu_t *)task->task_lu->lu_provider_private;
382 	uint64_t laddr;
383 	uint32_t buflen, iolen;
384 	int ndx;
385 
386 	if (dbuf->db_xfer_status != STMF_SUCCESS) {
387 		stmf_abort(STMF_QUEUE_TASK_ABORT, task,
388 		    dbuf->db_xfer_status, NULL);
389 		return;
390 	}
391 
392 	if (scmd->flags & SBD_SCSI_CMD_XFER_FAIL) {
393 		goto WRITE_XFER_DONE;
394 	}
395 
396 	laddr = scmd->addr + dbuf->db_relative_offset;
397 
398 	for (buflen = 0, ndx = 0; (buflen < dbuf->db_data_size) &&
399 	    (ndx < dbuf->db_sglist_length); ndx++) {
400 		iolen = min(dbuf->db_data_size - buflen,
401 		    dbuf->db_sglist[ndx].seg_length);
402 		if (iolen == 0)
403 			break;
404 		if (sbd_data_write(sl, laddr, (uint64_t)iolen,
405 		    dbuf->db_sglist[ndx].seg_addr) != STMF_SUCCESS) {
406 			scmd->flags |= SBD_SCSI_CMD_XFER_FAIL;
407 			break;
408 		}
409 		buflen += iolen;
410 		laddr += (uint64_t)iolen;
411 	}
412 	task->task_nbytes_transferred += buflen;
413 WRITE_XFER_DONE:
414 	if (scmd->len == 0 || scmd->flags & SBD_SCSI_CMD_XFER_FAIL) {
415 		stmf_free_dbuf(task, dbuf);
416 		scmd->nbufs--;
417 		if (scmd->nbufs)
418 			return;	/* wait for all buffers to complete */
419 		scmd->flags &= ~SBD_SCSI_CMD_ACTIVE;
420 		if (scmd->flags & SBD_SCSI_CMD_XFER_FAIL)
421 			stmf_scsilib_send_status(task, STATUS_CHECK,
422 			    STMF_SAA_WRITE_ERROR);
423 		else
424 			stmf_scsilib_send_status(task, STATUS_GOOD, 0);
425 		return;
426 	}
427 	if (dbuf->db_flags & DB_DONT_REUSE || dbuf_reusable == 0) {
428 		uint32_t maxsize, minsize, old_minsize;
429 		/* free current dbuf and allocate a new one */
430 		stmf_free_dbuf(task, dbuf);
431 
432 		maxsize = (scmd->len > (128*1024)) ? 128*1024 : scmd->len;
433 		minsize = maxsize >> 2;
434 		do {
435 			old_minsize = minsize;
436 			dbuf = stmf_alloc_dbuf(task, maxsize, &minsize, 0);
437 		} while ((dbuf == NULL) && (old_minsize > minsize) &&
438 		    (minsize >= 512));
439 		if (dbuf == NULL) {
440 			scmd->nbufs --;
441 			if (scmd->nbufs == 0) {
442 				stmf_abort(STMF_QUEUE_TASK_ABORT, task,
443 				    STMF_ALLOC_FAILURE, NULL);
444 			}
445 			return;
446 		}
447 	}
448 	sbd_do_write_xfer(task, scmd, dbuf);
449 }
450 
451 void
452 sbd_handle_write(struct scsi_task *task, struct stmf_data_buf *initial_dbuf)
453 {
454 	uint64_t lba, laddr;
455 	uint32_t len;
456 	uint8_t op = task->task_cdb[0], do_immediate_data = 0;
457 	sbd_lu_t *sl = (sbd_lu_t *)task->task_lu->lu_provider_private;
458 	sbd_cmd_t *scmd;
459 	stmf_data_buf_t *dbuf;
460 
461 	if (sl->sl_flags & SL_WRITE_PROTECTED) {
462 		stmf_scsilib_send_status(task, STATUS_CHECK,
463 		    STMF_SAA_WRITE_PROTECTED);
464 		return;
465 	}
466 	if (op == SCMD_WRITE) {
467 		lba = READ_SCSI21(&task->task_cdb[1], uint64_t);
468 		len = (uint32_t)task->task_cdb[4];
469 
470 		if (len == 0) {
471 			len = 256;
472 		}
473 	} else if (op == SCMD_WRITE_G1) {
474 		lba = READ_SCSI32(&task->task_cdb[2], uint64_t);
475 		len = READ_SCSI16(&task->task_cdb[7], uint32_t);
476 	} else if (op == SCMD_WRITE_G5) {
477 		lba = READ_SCSI32(&task->task_cdb[2], uint64_t);
478 		len = READ_SCSI32(&task->task_cdb[6], uint32_t);
479 	} else if (op == SCMD_WRITE_G4) {
480 		lba = READ_SCSI64(&task->task_cdb[2], uint64_t);
481 		len = READ_SCSI32(&task->task_cdb[10], uint32_t);
482 	} else {
483 		stmf_scsilib_send_status(task, STATUS_CHECK,
484 		    STMF_SAA_INVALID_OPCODE);
485 		return;
486 	}
487 
488 	laddr = lba << sl->sl_data_blocksize_shift;
489 	len <<= sl->sl_data_blocksize_shift;
490 
491 	if ((laddr + (uint64_t)len) > sl->sl_lu_size) {
492 		stmf_scsilib_send_status(task, STATUS_CHECK,
493 		    STMF_SAA_LBA_OUT_OF_RANGE);
494 		return;
495 	}
496 
497 	task->task_cmd_xfer_length = len;
498 	if (task->task_additional_flags & TASK_AF_NO_EXPECTED_XFER_LENGTH) {
499 		task->task_expected_xfer_length = len;
500 	}
501 
502 	len = (len > task->task_expected_xfer_length) ?
503 	    task->task_expected_xfer_length : len;
504 
505 	if (len == 0) {
506 		stmf_scsilib_send_status(task, STATUS_GOOD, 0);
507 		return;
508 	}
509 
510 	if (initial_dbuf == NULL) {
511 		uint32_t maxsize, minsize, old_minsize;
512 
513 		maxsize = (len > (128*1024)) ? 128*1024 : len;
514 		minsize = maxsize >> 2;
515 		do {
516 			old_minsize = minsize;
517 			initial_dbuf = stmf_alloc_dbuf(task, maxsize,
518 			    &minsize, 0);
519 		} while ((initial_dbuf == NULL) && (old_minsize > minsize) &&
520 		    (minsize >= 512));
521 		if (initial_dbuf == NULL) {
522 			stmf_abort(STMF_QUEUE_TASK_ABORT, task,
523 			    STMF_ALLOC_FAILURE, NULL);
524 			return;
525 		}
526 	} else if (task->task_flags & TF_INITIAL_BURST) {
527 		if (initial_dbuf->db_data_size > len) {
528 			if (initial_dbuf->db_data_size >
529 			    task->task_expected_xfer_length) {
530 				/* protocol error */
531 				stmf_abort(STMF_QUEUE_TASK_ABORT, task,
532 				    STMF_INVALID_ARG, NULL);
533 				return;
534 			}
535 			initial_dbuf->db_data_size = len;
536 		}
537 		do_immediate_data = 1;
538 	}
539 	dbuf = initial_dbuf;
540 
541 	if (task->task_lu_private) {
542 		scmd = (sbd_cmd_t *)task->task_lu_private;
543 	} else {
544 		scmd = (sbd_cmd_t *)kmem_alloc(sizeof (sbd_cmd_t), KM_SLEEP);
545 		task->task_lu_private = scmd;
546 	}
547 	scmd->flags = SBD_SCSI_CMD_ACTIVE;
548 	scmd->cmd_type = SBD_CMD_SCSI_WRITE;
549 	scmd->nbufs = 1;
550 	scmd->addr = laddr;
551 	scmd->len = len;
552 	scmd->current_ro = 0;
553 
554 	if (do_immediate_data) {
555 		scmd->len -= dbuf->db_data_size;
556 		scmd->current_ro += dbuf->db_data_size;
557 		dbuf->db_xfer_status = STMF_SUCCESS;
558 		sbd_handle_write_xfer_completion(task, scmd, dbuf, 0);
559 	} else {
560 		sbd_do_write_xfer(task, scmd, dbuf);
561 	}
562 }
563 
564 /*
565  * Utility routine to handle small non performance data transfers to the
566  * initiators. dbuf is an initial data buf (if any), 'p' points to a data
567  * buffer which is source of data for transfer, cdb_xfer_size is the
568  * transfer size based on CDB, cmd_xfer_size is the actual amount of data
569  * which this command would transfer (the size of data pointed to by 'p').
570  */
571 void
572 sbd_handle_short_read_transfers(scsi_task_t *task, stmf_data_buf_t *dbuf,
573     uint8_t *p, uint32_t cdb_xfer_size, uint32_t cmd_xfer_size)
574 {
575 	uint32_t bufsize, ndx;
576 	sbd_cmd_t *scmd;
577 
578 	cmd_xfer_size = min(cmd_xfer_size, cdb_xfer_size);
579 
580 	task->task_cmd_xfer_length = cmd_xfer_size;
581 	if (task->task_additional_flags & TASK_AF_NO_EXPECTED_XFER_LENGTH) {
582 		task->task_expected_xfer_length = cmd_xfer_size;
583 	} else {
584 		cmd_xfer_size = min(cmd_xfer_size,
585 		    task->task_expected_xfer_length);
586 	}
587 
588 	if (cmd_xfer_size == 0) {
589 		stmf_scsilib_send_status(task, STATUS_CHECK,
590 		    STMF_SAA_INVALID_FIELD_IN_CDB);
591 		return;
592 	}
593 	if (dbuf == NULL) {
594 		uint32_t minsize = cmd_xfer_size;
595 
596 		dbuf = stmf_alloc_dbuf(task, cmd_xfer_size, &minsize, 0);
597 	}
598 	if (dbuf == NULL) {
599 		stmf_scsilib_send_status(task, STATUS_QFULL, 0);
600 		return;
601 	}
602 
603 	for (bufsize = 0, ndx = 0; bufsize < cmd_xfer_size; ndx++) {
604 		uint8_t *d;
605 		uint32_t s;
606 
607 		d = dbuf->db_sglist[ndx].seg_addr;
608 		s = min((cmd_xfer_size - bufsize),
609 		    dbuf->db_sglist[ndx].seg_length);
610 		bcopy(p+bufsize, d, s);
611 		bufsize += s;
612 	}
613 	dbuf->db_relative_offset = 0;
614 	dbuf->db_data_size = cmd_xfer_size;
615 	dbuf->db_flags = DB_DIRECTION_TO_RPORT;
616 
617 	if (task->task_lu_private == NULL) {
618 		task->task_lu_private =
619 		    kmem_alloc(sizeof (sbd_cmd_t), KM_SLEEP);
620 	}
621 	scmd = (sbd_cmd_t *)task->task_lu_private;
622 
623 	scmd->cmd_type = SBD_CMD_SMALL_READ;
624 	scmd->flags = SBD_SCSI_CMD_ACTIVE;
625 	(void) stmf_xfer_data(task, dbuf, 0);
626 }
627 
628 void
629 sbd_handle_short_read_xfer_completion(struct scsi_task *task, sbd_cmd_t *scmd,
630 				struct stmf_data_buf *dbuf)
631 {
632 	if (dbuf->db_xfer_status != STMF_SUCCESS) {
633 		stmf_abort(STMF_QUEUE_TASK_ABORT, task,
634 		    dbuf->db_xfer_status, NULL);
635 		return;
636 	}
637 	task->task_nbytes_transferred = dbuf->db_data_size;
638 	scmd->flags &= ~SBD_SCSI_CMD_ACTIVE;
639 	stmf_scsilib_send_status(task, STATUS_GOOD, 0);
640 }
641 
642 void
643 sbd_handle_short_write_transfers(scsi_task_t *task,
644     stmf_data_buf_t *dbuf, uint32_t cdb_xfer_size)
645 {
646 	sbd_cmd_t *scmd;
647 
648 	task->task_cmd_xfer_length = cdb_xfer_size;
649 	if (task->task_additional_flags & TASK_AF_NO_EXPECTED_XFER_LENGTH) {
650 		task->task_expected_xfer_length = cdb_xfer_size;
651 	} else {
652 		cdb_xfer_size = min(cdb_xfer_size,
653 		    task->task_expected_xfer_length);
654 	}
655 
656 	if (cdb_xfer_size == 0) {
657 		stmf_scsilib_send_status(task, STATUS_CHECK,
658 		    STMF_SAA_INVALID_FIELD_IN_CDB);
659 		return;
660 	}
661 	if (task->task_lu_private == NULL) {
662 		task->task_lu_private = kmem_zalloc(sizeof (sbd_cmd_t),
663 		    KM_SLEEP);
664 	} else {
665 		bzero(task->task_lu_private, sizeof (sbd_cmd_t));
666 	}
667 	scmd = (sbd_cmd_t *)task->task_lu_private;
668 	scmd->cmd_type = SBD_CMD_SMALL_WRITE;
669 	scmd->flags = SBD_SCSI_CMD_ACTIVE;
670 	scmd->len = cdb_xfer_size;
671 	if (dbuf == NULL) {
672 		uint32_t minsize = cdb_xfer_size;
673 
674 		dbuf = stmf_alloc_dbuf(task, cdb_xfer_size, &minsize, 0);
675 		if (dbuf == NULL) {
676 			stmf_abort(STMF_QUEUE_TASK_ABORT, task,
677 			    STMF_ALLOC_FAILURE, NULL);
678 			return;
679 		}
680 		dbuf->db_data_size = cdb_xfer_size;
681 		dbuf->db_relative_offset = 0;
682 		dbuf->db_flags = DB_DIRECTION_FROM_RPORT;
683 		(void) stmf_xfer_data(task, dbuf, 0);
684 	} else {
685 		if (dbuf->db_data_size < cdb_xfer_size) {
686 			stmf_abort(STMF_QUEUE_TASK_ABORT, task,
687 			    STMF_ABORTED, NULL);
688 			return;
689 		}
690 		dbuf->db_data_size = cdb_xfer_size;
691 		sbd_handle_short_write_xfer_completion(task, dbuf);
692 	}
693 }
694 
695 void
696 sbd_handle_short_write_xfer_completion(scsi_task_t *task,
697     stmf_data_buf_t *dbuf)
698 {
699 	sbd_cmd_t *scmd;
700 	stmf_status_t st_ret;
701 	sbd_lu_t *sl = (sbd_lu_t *)task->task_lu->lu_provider_private;
702 
703 	/*
704 	 * For now lets assume we will get only one sglist element
705 	 * for short writes. If that ever changes, we should allocate
706 	 * a local buffer and copy all the sg elements to one linear space.
707 	 */
708 	if ((dbuf->db_xfer_status != STMF_SUCCESS) ||
709 	    (dbuf->db_sglist_length > 1)) {
710 		stmf_abort(STMF_QUEUE_TASK_ABORT, task,
711 		    dbuf->db_xfer_status, NULL);
712 		return;
713 	}
714 
715 	task->task_nbytes_transferred = dbuf->db_data_size;
716 	scmd = (sbd_cmd_t *)task->task_lu_private;
717 	scmd->flags &= ~SBD_SCSI_CMD_ACTIVE;
718 
719 	/* Lets find out who to call */
720 	switch (task->task_cdb[0]) {
721 	case SCMD_MODE_SELECT:
722 	case SCMD_MODE_SELECT_G1:
723 		if (sl->sl_access_state == SBD_LU_STANDBY) {
724 			st_ret = stmf_proxy_scsi_cmd(task, dbuf);
725 			if (st_ret != STMF_SUCCESS) {
726 				stmf_scsilib_send_status(task, STATUS_CHECK,
727 				    STMF_SAA_LU_NO_ACCESS_UNAVAIL);
728 			}
729 		} else {
730 			sbd_handle_mode_select_xfer(task,
731 			    dbuf->db_sglist[0].seg_addr, dbuf->db_data_size);
732 		}
733 		break;
734 	case SCMD_PERSISTENT_RESERVE_OUT:
735 		if (sl->sl_access_state == SBD_LU_STANDBY) {
736 			st_ret = stmf_proxy_scsi_cmd(task, dbuf);
737 			if (st_ret != STMF_SUCCESS) {
738 				stmf_scsilib_send_status(task, STATUS_CHECK,
739 				    STMF_SAA_LU_NO_ACCESS_UNAVAIL);
740 			}
741 		} else {
742 			sbd_handle_pgr_out_data(task, dbuf);
743 		}
744 		break;
745 	default:
746 		/* This should never happen */
747 		stmf_abort(STMF_QUEUE_TASK_ABORT, task,
748 		    STMF_ABORTED, NULL);
749 	}
750 }
751 
752 void
753 sbd_handle_read_capacity(struct scsi_task *task,
754     struct stmf_data_buf *initial_dbuf)
755 {
756 	sbd_lu_t *sl = (sbd_lu_t *)task->task_lu->lu_provider_private;
757 	uint32_t cdb_len;
758 	uint8_t p[32];
759 	uint64_t s;
760 	uint16_t blksize;
761 
762 	s = sl->sl_lu_size >> sl->sl_data_blocksize_shift;
763 	s--;
764 	blksize = ((uint16_t)1) << sl->sl_data_blocksize_shift;
765 
766 	switch (task->task_cdb[0]) {
767 	case SCMD_READ_CAPACITY:
768 		if (s & 0xffffffff00000000ull) {
769 			p[0] = p[1] = p[2] = p[3] = 0xFF;
770 		} else {
771 			p[0] = (s >> 24) & 0xff;
772 			p[1] = (s >> 16) & 0xff;
773 			p[2] = (s >> 8) & 0xff;
774 			p[3] = s & 0xff;
775 		}
776 		p[4] = 0; p[5] = 0;
777 		p[6] = (blksize >> 8) & 0xff;
778 		p[7] = blksize & 0xff;
779 		sbd_handle_short_read_transfers(task, initial_dbuf, p, 8, 8);
780 		break;
781 
782 	case SCMD_SVC_ACTION_IN_G4:
783 		cdb_len = READ_SCSI32(&task->task_cdb[10], uint32_t);
784 		bzero(p, 32);
785 		p[0] = (s >> 56) & 0xff;
786 		p[1] = (s >> 48) & 0xff;
787 		p[2] = (s >> 40) & 0xff;
788 		p[3] = (s >> 32) & 0xff;
789 		p[4] = (s >> 24) & 0xff;
790 		p[5] = (s >> 16) & 0xff;
791 		p[6] = (s >> 8) & 0xff;
792 		p[7] = s & 0xff;
793 		p[10] = (blksize >> 8) & 0xff;
794 		p[11] = blksize & 0xff;
795 		sbd_handle_short_read_transfers(task, initial_dbuf, p,
796 		    cdb_len, 32);
797 		break;
798 	}
799 }
800 
801 void
802 sbd_calc_geometry(uint64_t s, uint16_t blksize, uint8_t *nsectors,
803     uint8_t *nheads, uint32_t *ncyl)
804 {
805 	if (s < (4ull * 1024ull * 1024ull * 1024ull)) {
806 		*nsectors = 32;
807 		*nheads = 8;
808 	} else {
809 		*nsectors = 254;
810 		*nheads = 254;
811 	}
812 	*ncyl = s / ((uint64_t)blksize * (uint64_t)(*nsectors) *
813 	    (uint64_t)(*nheads));
814 }
815 
816 void
817 sbd_handle_mode_sense(struct scsi_task *task,
818     struct stmf_data_buf *initial_dbuf, uint8_t *buf)
819 {
820 	sbd_lu_t *sl = (sbd_lu_t *)task->task_lu->lu_provider_private;
821 	uint32_t cmd_size, n;
822 	uint8_t *cdb;
823 	uint32_t ncyl;
824 	uint8_t nsectors, nheads;
825 	uint8_t page, ctrl, header_size, pc_valid;
826 	uint16_t nbytes;
827 	uint8_t *p;
828 	uint64_t s = sl->sl_lu_size;
829 	uint32_t dev_spec_param_offset;
830 
831 	p = buf;	/* buf is assumed to be zeroed out and large enough */
832 	n = 0;
833 	cdb = &task->task_cdb[0];
834 	page = cdb[2] & 0x3F;
835 	ctrl = (cdb[2] >> 6) & 3;
836 	cmd_size = (cdb[0] == SCMD_MODE_SENSE) ? cdb[4] :
837 	    READ_SCSI16(&cdb[7], uint32_t);
838 
839 	if (cdb[0] == SCMD_MODE_SENSE) {
840 		header_size = 4;
841 		dev_spec_param_offset = 2;
842 	} else {
843 		header_size = 8;
844 		dev_spec_param_offset = 3;
845 	}
846 
847 	/* Now validate the command */
848 	if ((cdb[2] == 0) || (page == MODEPAGE_ALLPAGES) || (page == 0x08) ||
849 	    (page == 0x0A) || (page == 0x03) || (page == 0x04)) {
850 		pc_valid = 1;
851 	} else {
852 		pc_valid = 0;
853 	}
854 	if ((cmd_size < header_size) || (pc_valid == 0)) {
855 		stmf_scsilib_send_status(task, STATUS_CHECK,
856 		    STMF_SAA_INVALID_FIELD_IN_CDB);
857 		return;
858 	}
859 
860 	/* We will update the length in the mode header at the end */
861 
862 	/* Block dev device specific param in mode param header has wp bit */
863 	if (sl->sl_flags & SL_WRITE_PROTECTED) {
864 		p[n + dev_spec_param_offset] = BIT_7;
865 	}
866 	n += header_size;
867 	/* We are not going to return any block descriptor */
868 
869 	nbytes = ((uint16_t)1) << sl->sl_data_blocksize_shift;
870 	sbd_calc_geometry(s, nbytes, &nsectors, &nheads, &ncyl);
871 
872 	if ((page == 0x03) || (page == MODEPAGE_ALLPAGES)) {
873 		p[n] = 0x03;
874 		p[n+1] = 0x16;
875 		if (ctrl != 1) {
876 			p[n + 11] = nsectors;
877 			p[n + 12] = nbytes >> 8;
878 			p[n + 13] = nbytes & 0xff;
879 			p[n + 20] = 0x80;
880 		}
881 		n += 24;
882 	}
883 	if ((page == 0x04) || (page == MODEPAGE_ALLPAGES)) {
884 		p[n] = 0x04;
885 		p[n + 1] = 0x16;
886 		if (ctrl != 1) {
887 			p[n + 2] = ncyl >> 16;
888 			p[n + 3] = ncyl >> 8;
889 			p[n + 4] = ncyl & 0xff;
890 			p[n + 5] = nheads;
891 			p[n + 20] = 0x15;
892 			p[n + 21] = 0x18;
893 		}
894 		n += 24;
895 	}
896 	if ((page == MODEPAGE_CACHING) || (page == MODEPAGE_ALLPAGES)) {
897 		struct mode_caching *mode_caching_page;
898 
899 		mode_caching_page = (struct mode_caching *)&p[n];
900 
901 		mode_caching_page->mode_page.code = MODEPAGE_CACHING;
902 		mode_caching_page->mode_page.ps = 1; /* A saveable page */
903 		mode_caching_page->mode_page.length = 0x12;
904 
905 		switch (ctrl) {
906 		case (0):
907 			/* Current */
908 			if ((sl->sl_flags & SL_WRITEBACK_CACHE_DISABLE) == 0) {
909 				mode_caching_page->wce = 1;
910 			}
911 			break;
912 
913 		case (1):
914 			/* Changeable */
915 			if ((sl->sl_flags &
916 			    SL_WRITEBACK_CACHE_SET_UNSUPPORTED) == 0) {
917 				mode_caching_page->wce = 1;
918 			}
919 			break;
920 
921 		default:
922 			if ((sl->sl_flags &
923 			    SL_SAVED_WRITE_CACHE_DISABLE) == 0) {
924 				mode_caching_page->wce = 1;
925 			}
926 			break;
927 		}
928 		n += (sizeof (struct mode_page) +
929 		    mode_caching_page->mode_page.length);
930 	}
931 	if ((page == MODEPAGE_CTRL_MODE) || (page == MODEPAGE_ALLPAGES)) {
932 		struct mode_control_scsi3 *mode_control_page;
933 
934 		mode_control_page = (struct mode_control_scsi3 *)&p[n];
935 
936 		mode_control_page->mode_page.code = MODEPAGE_CTRL_MODE;
937 		mode_control_page->mode_page.length =
938 		    PAGELENGTH_MODE_CONTROL_SCSI3;
939 		if (ctrl != 1) {
940 			/* If not looking for changeable values, report this. */
941 			mode_control_page->que_mod = CTRL_QMOD_UNRESTRICT;
942 		}
943 		n += (sizeof (struct mode_page) +
944 		    mode_control_page->mode_page.length);
945 	}
946 
947 	if (cdb[0] == SCMD_MODE_SENSE) {
948 		if (n > 255) {
949 			stmf_scsilib_send_status(task, STATUS_CHECK,
950 			    STMF_SAA_INVALID_FIELD_IN_CDB);
951 			return;
952 		}
953 		/*
954 		 * Mode parameter header length doesn't include the number
955 		 * of bytes in the length field, so adjust the count.
956 		 * Byte count minus header length field size.
957 		 */
958 		buf[0] = (n - 1) & 0xff;
959 	} else {
960 		/* Byte count minus header length field size. */
961 		buf[1] = (n - 2) & 0xff;
962 		buf[0] = ((n - 2) >> 8) & 0xff;
963 	}
964 
965 	sbd_handle_short_read_transfers(task, initial_dbuf, buf,
966 	    cmd_size, n);
967 }
968 
969 void
970 sbd_handle_mode_select(scsi_task_t *task, stmf_data_buf_t *dbuf)
971 {
972 	uint32_t cmd_xfer_len;
973 
974 	if (task->task_cdb[0] == SCMD_MODE_SELECT) {
975 		cmd_xfer_len = (uint32_t)task->task_cdb[4];
976 	} else {
977 		cmd_xfer_len = READ_SCSI16(&task->task_cdb[7], uint32_t);
978 	}
979 
980 	if ((task->task_cdb[1] & 0xFE) != 0x10) {
981 		stmf_scsilib_send_status(task, STATUS_CHECK,
982 		    STMF_SAA_INVALID_FIELD_IN_CDB);
983 		return;
984 	}
985 
986 	if (cmd_xfer_len == 0) {
987 		/* zero byte mode selects are allowed */
988 		stmf_scsilib_send_status(task, STATUS_GOOD, 0);
989 		return;
990 	}
991 
992 	sbd_handle_short_write_transfers(task, dbuf, cmd_xfer_len);
993 }
994 
995 void
996 sbd_handle_mode_select_xfer(scsi_task_t *task, uint8_t *buf, uint32_t buflen)
997 {
998 	sbd_lu_t *sl = (sbd_lu_t *)task->task_lu->lu_provider_private;
999 	sbd_it_data_t *it;
1000 	int hdr_len, bd_len;
1001 	sbd_status_t sret;
1002 	int i;
1003 
1004 	if (task->task_cdb[0] == SCMD_MODE_SELECT) {
1005 		hdr_len = 4;
1006 	} else {
1007 		hdr_len = 8;
1008 	}
1009 
1010 	if (buflen < hdr_len)
1011 		goto mode_sel_param_len_err;
1012 
1013 	bd_len = hdr_len == 4 ? buf[3] : READ_SCSI16(&buf[6], int);
1014 
1015 	if (buflen < (hdr_len + bd_len + 2))
1016 		goto mode_sel_param_len_err;
1017 
1018 	buf += hdr_len + bd_len;
1019 	buflen -= hdr_len + bd_len;
1020 
1021 	if ((buf[0] != 8) || (buflen != ((uint32_t)buf[1] + 2))) {
1022 		goto mode_sel_param_len_err;
1023 	}
1024 
1025 	if (buf[2] & 0xFB) {
1026 		goto mode_sel_param_field_err;
1027 	}
1028 
1029 	for (i = 3; i < (buf[1] + 2); i++) {
1030 		if (buf[i]) {
1031 			goto mode_sel_param_field_err;
1032 		}
1033 	}
1034 
1035 	sret = SBD_SUCCESS;
1036 
1037 	/* All good. Lets handle the write cache change, if any */
1038 	if (buf[2] & BIT_2) {
1039 		sret = sbd_wcd_set(0, sl);
1040 	} else {
1041 		sret = sbd_wcd_set(1, sl);
1042 	}
1043 
1044 	if (sret != SBD_SUCCESS) {
1045 		stmf_scsilib_send_status(task, STATUS_CHECK,
1046 		    STMF_SAA_WRITE_ERROR);
1047 		return;
1048 	}
1049 
1050 	/* set on the device passed, now set the flags */
1051 	mutex_enter(&sl->sl_lock);
1052 	if (buf[2] & BIT_2) {
1053 		sl->sl_flags &= ~SL_WRITEBACK_CACHE_DISABLE;
1054 	} else {
1055 		sl->sl_flags |= SL_WRITEBACK_CACHE_DISABLE;
1056 	}
1057 
1058 	for (it = sl->sl_it_list; it != NULL; it = it->sbd_it_next) {
1059 		if (it == task->task_lu_itl_handle)
1060 			continue;
1061 		it->sbd_it_ua_conditions |= SBD_UA_MODE_PARAMETERS_CHANGED;
1062 	}
1063 
1064 	if (task->task_cdb[1] & 1) {
1065 		if (buf[2] & BIT_2) {
1066 			sl->sl_flags &= ~SL_SAVED_WRITE_CACHE_DISABLE;
1067 		} else {
1068 			sl->sl_flags |= SL_SAVED_WRITE_CACHE_DISABLE;
1069 		}
1070 		mutex_exit(&sl->sl_lock);
1071 		sret = sbd_write_lu_info(sl);
1072 	} else {
1073 		mutex_exit(&sl->sl_lock);
1074 	}
1075 	if (sret == SBD_SUCCESS) {
1076 		stmf_scsilib_send_status(task, STATUS_GOOD, 0);
1077 	} else {
1078 		stmf_scsilib_send_status(task, STATUS_CHECK,
1079 		    STMF_SAA_WRITE_ERROR);
1080 	}
1081 	return;
1082 
1083 mode_sel_param_len_err:
1084 	stmf_scsilib_send_status(task, STATUS_CHECK,
1085 	    STMF_SAA_PARAM_LIST_LENGTH_ERROR);
1086 	return;
1087 mode_sel_param_field_err:
1088 	stmf_scsilib_send_status(task, STATUS_CHECK,
1089 	    STMF_SAA_INVALID_FIELD_IN_PARAM_LIST);
1090 }
1091 
1092 /*
1093  * This function parse through a string, passed to it as a pointer to a string,
1094  * by adjusting the pointer to the first non-space character and returns
1095  * the count/length of the first bunch of non-space characters. Multiple
1096  * Management URLs are stored as a space delimited string in sl_mgmt_url
1097  * field of sbd_lu_t. This function is used to retrieve one url at a time.
1098  *
1099  * i/p : pointer to pointer to a url string
1100  * o/p : Adjust the pointer to the url to the first non white character
1101  *       and returns the length of the URL
1102  */
1103 uint16_t
1104 sbd_parse_mgmt_url(char **url_addr) {
1105 	uint16_t url_length = 0;
1106 	char *url;
1107 	url = *url_addr;
1108 
1109 	while (*url != '\0') {
1110 		if (*url == ' ' || *url == '\t' || *url == '\n') {
1111 			(*url_addr)++;
1112 			url = *url_addr;
1113 		} else {
1114 			break;
1115 		}
1116 	}
1117 
1118 	while (*url != '\0') {
1119 		if (*url == ' ' || *url == '\t' ||
1120 		    *url == '\n' || *url == '\0') {
1121 			break;
1122 		}
1123 		url++;
1124 		url_length++;
1125 	}
1126 	return (url_length);
1127 }
1128 
1129 void
1130 sbd_handle_inquiry(struct scsi_task *task, struct stmf_data_buf *initial_dbuf)
1131 {
1132 	sbd_lu_t *sl = (sbd_lu_t *)task->task_lu->lu_provider_private;
1133 	uint8_t *cdbp = (uint8_t *)&task->task_cdb[0];
1134 	uint8_t *p;
1135 	uint8_t byte0;
1136 	uint8_t page_length;
1137 	uint16_t bsize = 512;
1138 	uint16_t cmd_size;
1139 	uint32_t xfer_size = 4;
1140 	uint32_t mgmt_url_size = 0;
1141 	char *mgmt_url = NULL;
1142 
1143 
1144 	byte0 = DTYPE_DIRECT;
1145 	/*
1146 	 * Basic protocol checks.
1147 	 */
1148 
1149 	if ((((cdbp[1] & 1) == 0) && cdbp[2]) || cdbp[5]) {
1150 		stmf_scsilib_send_status(task, STATUS_CHECK,
1151 		    STMF_SAA_INVALID_FIELD_IN_CDB);
1152 		return;
1153 	}
1154 
1155 	/*
1156 	 * Zero byte allocation length is not an error.  Just
1157 	 * return success.
1158 	 */
1159 
1160 	cmd_size = (((uint16_t)cdbp[3]) << 8) | cdbp[4];
1161 
1162 	if (cmd_size == 0) {
1163 		task->task_cmd_xfer_length = 0;
1164 		if (task->task_additional_flags &
1165 		    TASK_AF_NO_EXPECTED_XFER_LENGTH) {
1166 			task->task_expected_xfer_length = 0;
1167 		}
1168 		stmf_scsilib_send_status(task, STATUS_GOOD, 0);
1169 		return;
1170 	}
1171 
1172 	/*
1173 	 * Standard inquiry
1174 	 */
1175 
1176 	if ((cdbp[1] & 1) == 0) {
1177 		int	i;
1178 		struct scsi_inquiry *inq;
1179 
1180 		p = (uint8_t *)kmem_zalloc(bsize, KM_SLEEP);
1181 		inq = (struct scsi_inquiry *)p;
1182 
1183 		page_length = 69;
1184 		xfer_size = page_length + 5;
1185 
1186 		inq->inq_dtype = DTYPE_DIRECT;
1187 		inq->inq_ansi = 5;	/* SPC-3 */
1188 		inq->inq_hisup = 1;
1189 		inq->inq_rdf = 2;	/* Response data format for SPC-3 */
1190 		inq->inq_len = page_length;
1191 
1192 		inq->inq_tpgs = TPGS_FAILOVER_IMPLICIT;
1193 		inq->inq_cmdque = 1;
1194 
1195 		if (sl->sl_flags & SL_VID_VALID) {
1196 			bcopy(sl->sl_vendor_id, inq->inq_vid, 8);
1197 		} else {
1198 			bcopy(sbd_vendor_id, inq->inq_vid, 8);
1199 		}
1200 
1201 		if (sl->sl_flags & SL_PID_VALID) {
1202 			bcopy(sl->sl_product_id, inq->inq_pid, 16);
1203 		} else {
1204 			bcopy(sbd_product_id, inq->inq_pid, 16);
1205 		}
1206 
1207 		if (sl->sl_flags & SL_REV_VALID) {
1208 			bcopy(sl->sl_revision, inq->inq_revision, 4);
1209 		} else {
1210 			bcopy(sbd_revision, inq->inq_revision, 4);
1211 		}
1212 
1213 		/* Adding Version Descriptors */
1214 		i = 0;
1215 		/* SAM-3 no version */
1216 		inq->inq_vd[i].inq_vd_msb = 0x00;
1217 		inq->inq_vd[i].inq_vd_lsb = 0x60;
1218 		i++;
1219 
1220 		/* transport */
1221 		switch (task->task_lport->lport_id->protocol_id) {
1222 		case PROTOCOL_FIBRE_CHANNEL:
1223 			inq->inq_vd[i].inq_vd_msb = 0x09;
1224 			inq->inq_vd[i].inq_vd_lsb = 0x00;
1225 			i++;
1226 			break;
1227 
1228 		case PROTOCOL_PARALLEL_SCSI:
1229 		case PROTOCOL_SSA:
1230 		case PROTOCOL_IEEE_1394:
1231 			/* Currently no claims of conformance */
1232 			break;
1233 
1234 		case PROTOCOL_SRP:
1235 			inq->inq_vd[i].inq_vd_msb = 0x09;
1236 			inq->inq_vd[i].inq_vd_lsb = 0x40;
1237 			i++;
1238 			break;
1239 
1240 		case PROTOCOL_iSCSI:
1241 			inq->inq_vd[i].inq_vd_msb = 0x09;
1242 			inq->inq_vd[i].inq_vd_lsb = 0x60;
1243 			i++;
1244 			break;
1245 
1246 		case PROTOCOL_SAS:
1247 		case PROTOCOL_ADT:
1248 		case PROTOCOL_ATAPI:
1249 		default:
1250 			/* Currently no claims of conformance */
1251 			break;
1252 		}
1253 
1254 		/* SPC-3 no version */
1255 		inq->inq_vd[i].inq_vd_msb = 0x03;
1256 		inq->inq_vd[i].inq_vd_lsb = 0x00;
1257 		i++;
1258 
1259 		/* SBC-2 no version */
1260 		inq->inq_vd[i].inq_vd_msb = 0x03;
1261 		inq->inq_vd[i].inq_vd_lsb = 0x20;
1262 
1263 		sbd_handle_short_read_transfers(task, initial_dbuf, p, cmd_size,
1264 		    min(cmd_size, xfer_size));
1265 		kmem_free(p, bsize);
1266 
1267 		return;
1268 	}
1269 
1270 	rw_enter(&sbd_global_prop_lock, RW_READER);
1271 	if (sl->sl_mgmt_url) {
1272 		mgmt_url_size = strlen(sl->sl_mgmt_url);
1273 		mgmt_url = sl->sl_mgmt_url;
1274 	} else if (sbd_mgmt_url) {
1275 		mgmt_url_size = strlen(sbd_mgmt_url);
1276 		mgmt_url = sbd_mgmt_url;
1277 	}
1278 
1279 	/*
1280 	 * EVPD handling
1281 	 */
1282 
1283 	/* Default 512 bytes may not be enough, increase bsize if necessary */
1284 	if (cdbp[2] == 0x83 || cdbp[2] == 0x85) {
1285 		if (bsize <  cmd_size)
1286 			bsize = cmd_size;
1287 	}
1288 	p = (uint8_t *)kmem_zalloc(bsize, KM_SLEEP);
1289 
1290 	switch (cdbp[2]) {
1291 	case 0x00:
1292 		page_length = 4 + (mgmt_url_size ? 1 : 0);
1293 
1294 		p[0] = byte0;
1295 		p[3] = page_length;
1296 		/* Supported VPD pages in ascending order */
1297 		{
1298 			uint8_t i = 5;
1299 
1300 			p[i++] = 0x80;
1301 			p[i++] = 0x83;
1302 			if (mgmt_url_size != 0)
1303 				p[i++] = 0x85;
1304 			p[i++] = 0x86;
1305 		}
1306 		xfer_size = page_length + 4;
1307 		break;
1308 
1309 	case 0x80:
1310 		if (sl->sl_serial_no_size) {
1311 			page_length = sl->sl_serial_no_size;
1312 			bcopy(sl->sl_serial_no, p + 4, sl->sl_serial_no_size);
1313 		} else {
1314 			/* if no serial num is specified set 4 spaces */
1315 			page_length = 4;
1316 			bcopy("    ", p + 4, 4);
1317 		}
1318 		p[0] = byte0;
1319 		p[1] = 0x80;
1320 		p[3] = page_length;
1321 		xfer_size = page_length + 4;
1322 		break;
1323 
1324 	case 0x83:
1325 		xfer_size = stmf_scsilib_prepare_vpd_page83(task, p,
1326 		    bsize, byte0, STMF_VPD_LU_ID|STMF_VPD_TARGET_ID|
1327 		    STMF_VPD_TP_GROUP|STMF_VPD_RELATIVE_TP_ID);
1328 		break;
1329 
1330 	case 0x85:
1331 		if (mgmt_url_size == 0) {
1332 			stmf_scsilib_send_status(task, STATUS_CHECK,
1333 			    STMF_SAA_INVALID_FIELD_IN_CDB);
1334 			goto err_done;
1335 		}
1336 		{
1337 			uint16_t idx, newidx, sz, url_size;
1338 			char *url;
1339 
1340 			p[0] = byte0;
1341 			p[1] = 0x85;
1342 
1343 			idx = 4;
1344 			url = mgmt_url;
1345 			url_size = sbd_parse_mgmt_url(&url);
1346 			/* Creating Network Service Descriptors */
1347 			while (url_size != 0) {
1348 				/* Null terminated and 4 Byte aligned */
1349 				sz = url_size + 1;
1350 				sz += (sz % 4) ? 4 - (sz % 4) : 0;
1351 				newidx = idx + sz + 4;
1352 
1353 				if (newidx < bsize) {
1354 					/*
1355 					 * SPC-3r23 : Table 320  (Sec 7.6.5)
1356 					 * (Network service descriptor format
1357 					 *
1358 					 * Note: Hard coding service type as
1359 					 * "Storage Configuration Service".
1360 					 */
1361 					p[idx] = 1;
1362 					SCSI_WRITE16(p + idx + 2, sz);
1363 					bcopy(url, p + idx + 4, url_size);
1364 					xfer_size = newidx + 4;
1365 				}
1366 				idx = newidx;
1367 
1368 				/* skip to next mgmt url if any */
1369 				url += url_size;
1370 				url_size = sbd_parse_mgmt_url(&url);
1371 			}
1372 
1373 			/* Total descriptor length */
1374 			SCSI_WRITE16(p + 2, idx - 4);
1375 			break;
1376 		}
1377 
1378 	case 0x86:
1379 		page_length = 0x3c;
1380 
1381 		p[0] = byte0;
1382 		p[1] = 0x86;		/* Page 86 response */
1383 		p[3] = page_length;
1384 
1385 		/*
1386 		 * Bits 0, 1, and 2 will need to be updated
1387 		 * to reflect the queue tag handling if/when
1388 		 * that is implemented.  For now, we're going
1389 		 * to claim support only for Simple TA.
1390 		 */
1391 		p[5] = 1;
1392 		xfer_size = page_length + 4;
1393 		break;
1394 
1395 	default:
1396 		stmf_scsilib_send_status(task, STATUS_CHECK,
1397 		    STMF_SAA_INVALID_FIELD_IN_CDB);
1398 		goto err_done;
1399 	}
1400 
1401 	sbd_handle_short_read_transfers(task, initial_dbuf, p, cmd_size,
1402 	    min(cmd_size, xfer_size));
1403 err_done:
1404 	kmem_free(p, bsize);
1405 	rw_exit(&sbd_global_prop_lock);
1406 }
1407 
1408 stmf_status_t
1409 sbd_task_alloc(struct scsi_task *task)
1410 {
1411 	if ((task->task_lu_private =
1412 	    kmem_alloc(sizeof (sbd_cmd_t), KM_NOSLEEP)) != NULL) {
1413 		sbd_cmd_t *scmd = (sbd_cmd_t *)task->task_lu_private;
1414 		scmd->flags = 0;
1415 		return (STMF_SUCCESS);
1416 	}
1417 	return (STMF_ALLOC_FAILURE);
1418 }
1419 
1420 void
1421 sbd_remove_it_handle(sbd_lu_t *sl, sbd_it_data_t *it)
1422 {
1423 	sbd_it_data_t **ppit;
1424 
1425 	sbd_pgr_remove_it_handle(sl, it);
1426 	mutex_enter(&sl->sl_lock);
1427 	for (ppit = &sl->sl_it_list; *ppit != NULL;
1428 	    ppit = &((*ppit)->sbd_it_next)) {
1429 		if ((*ppit) == it) {
1430 			*ppit = it->sbd_it_next;
1431 			break;
1432 		}
1433 	}
1434 	mutex_exit(&sl->sl_lock);
1435 
1436 	DTRACE_PROBE2(itl__nexus__end, stmf_lu_t *, sl->sl_lu,
1437 	    sbd_it_data_t *, it);
1438 
1439 	kmem_free(it, sizeof (*it));
1440 }
1441 
1442 void
1443 sbd_check_and_clear_scsi2_reservation(sbd_lu_t *sl, sbd_it_data_t *it)
1444 {
1445 	mutex_enter(&sl->sl_lock);
1446 	if ((sl->sl_flags & SL_LU_HAS_SCSI2_RESERVATION) == 0) {
1447 		/* If we dont have any reservations, just get out. */
1448 		mutex_exit(&sl->sl_lock);
1449 		return;
1450 	}
1451 
1452 	if (it == NULL) {
1453 		/* Find the I_T nexus which is holding the reservation. */
1454 		for (it = sl->sl_it_list; it != NULL; it = it->sbd_it_next) {
1455 			if (it->sbd_it_flags & SBD_IT_HAS_SCSI2_RESERVATION) {
1456 				ASSERT(it->sbd_it_session_id ==
1457 				    sl->sl_rs_owner_session_id);
1458 				break;
1459 			}
1460 		}
1461 		ASSERT(it != NULL);
1462 	} else {
1463 		/*
1464 		 * We were passed an I_T nexus. If this nexus does not hold
1465 		 * the reservation, do nothing. This is why this function is
1466 		 * called "check_and_clear".
1467 		 */
1468 		if ((it->sbd_it_flags & SBD_IT_HAS_SCSI2_RESERVATION) == 0) {
1469 			mutex_exit(&sl->sl_lock);
1470 			return;
1471 		}
1472 	}
1473 	it->sbd_it_flags &= ~SBD_IT_HAS_SCSI2_RESERVATION;
1474 	sl->sl_flags &= ~SL_LU_HAS_SCSI2_RESERVATION;
1475 	mutex_exit(&sl->sl_lock);
1476 }
1477 
1478 
1479 
1480 void
1481 sbd_new_task(struct scsi_task *task, struct stmf_data_buf *initial_dbuf)
1482 {
1483 	sbd_lu_t *sl = (sbd_lu_t *)task->task_lu->lu_provider_private;
1484 	sbd_it_data_t *it;
1485 	uint8_t cdb0, cdb1;
1486 	stmf_status_t st_ret;
1487 
1488 	if ((it = task->task_lu_itl_handle) == NULL) {
1489 		mutex_enter(&sl->sl_lock);
1490 		for (it = sl->sl_it_list; it != NULL; it = it->sbd_it_next) {
1491 			if (it->sbd_it_session_id ==
1492 			    task->task_session->ss_session_id) {
1493 				mutex_exit(&sl->sl_lock);
1494 				stmf_scsilib_send_status(task, STATUS_BUSY, 0);
1495 				return;
1496 			}
1497 		}
1498 		it = (sbd_it_data_t *)kmem_zalloc(sizeof (*it), KM_NOSLEEP);
1499 		if (it == NULL) {
1500 			mutex_exit(&sl->sl_lock);
1501 			stmf_scsilib_send_status(task, STATUS_BUSY, 0);
1502 			return;
1503 		}
1504 		it->sbd_it_session_id = task->task_session->ss_session_id;
1505 		bcopy(task->task_lun_no, it->sbd_it_lun, 8);
1506 		it->sbd_it_next = sl->sl_it_list;
1507 		sl->sl_it_list = it;
1508 		mutex_exit(&sl->sl_lock);
1509 
1510 		DTRACE_PROBE1(itl__nexus__start, scsi_task *, task);
1511 
1512 		sbd_pgr_initialize_it(task);
1513 		if (stmf_register_itl_handle(task->task_lu, task->task_lun_no,
1514 		    task->task_session, it->sbd_it_session_id, it)
1515 		    != STMF_SUCCESS) {
1516 			sbd_remove_it_handle(sl, it);
1517 			stmf_scsilib_send_status(task, STATUS_BUSY, 0);
1518 			return;
1519 		}
1520 		task->task_lu_itl_handle = it;
1521 		if (sl->sl_access_state != SBD_LU_STANDBY) {
1522 			it->sbd_it_ua_conditions = SBD_UA_POR;
1523 		}
1524 	} else if (it->sbd_it_flags & SBD_IT_PGR_CHECK_FLAG) {
1525 		sbd_pgr_initialize_it(task);
1526 		mutex_enter(&sl->sl_lock);
1527 		it->sbd_it_flags &= ~SBD_IT_PGR_CHECK_FLAG;
1528 		mutex_exit(&sl->sl_lock);
1529 	}
1530 
1531 	if (task->task_mgmt_function) {
1532 		stmf_scsilib_handle_task_mgmt(task);
1533 		return;
1534 	}
1535 
1536 	/*
1537 	 * if we're transitioning between access
1538 	 * states, return NOT READY
1539 	 */
1540 	if (sl->sl_access_state == SBD_LU_TRANSITION_TO_STANDBY ||
1541 	    sl->sl_access_state == SBD_LU_TRANSITION_TO_ACTIVE) {
1542 		stmf_scsilib_send_status(task, STATUS_CHECK,
1543 		    STMF_SAA_LU_NO_ACCESS_UNAVAIL);
1544 		return;
1545 	}
1546 
1547 	/* Checking ua conditions as per SAM3R14 5.3.2 specified order */
1548 	if ((it->sbd_it_ua_conditions) && (task->task_cdb[0] != SCMD_INQUIRY)) {
1549 		uint32_t saa = 0;
1550 
1551 		mutex_enter(&sl->sl_lock);
1552 		if (it->sbd_it_ua_conditions & SBD_UA_POR) {
1553 			it->sbd_it_ua_conditions &= ~SBD_UA_POR;
1554 			saa = STMF_SAA_POR;
1555 		}
1556 		mutex_exit(&sl->sl_lock);
1557 		if (saa) {
1558 			stmf_scsilib_send_status(task, STATUS_CHECK, saa);
1559 			return;
1560 		}
1561 	}
1562 
1563 	/* Reservation conflict checks */
1564 	if (sl->sl_access_state != SBD_LU_STANDBY) {
1565 		if (SBD_PGR_RSVD(sl->sl_pgr)) {
1566 			if (sbd_pgr_reservation_conflict(task)) {
1567 				stmf_scsilib_send_status(task,
1568 				    STATUS_RESERVATION_CONFLICT, 0);
1569 				return;
1570 			}
1571 		} else if ((sl->sl_flags & SL_LU_HAS_SCSI2_RESERVATION) &&
1572 		    ((it->sbd_it_flags & SBD_IT_HAS_SCSI2_RESERVATION) == 0)) {
1573 			if (!(SCSI2_CONFLICT_FREE_CMDS(task->task_cdb))) {
1574 				stmf_scsilib_send_status(task,
1575 				    STATUS_RESERVATION_CONFLICT, 0);
1576 				return;
1577 			}
1578 		}
1579 	}
1580 
1581 	/* Rest of the ua conndition checks */
1582 	if ((it->sbd_it_ua_conditions) && (task->task_cdb[0] != SCMD_INQUIRY)) {
1583 		uint32_t saa = 0;
1584 
1585 		mutex_enter(&sl->sl_lock);
1586 		if (it->sbd_it_ua_conditions & SBD_UA_CAPACITY_CHANGED) {
1587 			it->sbd_it_ua_conditions &= ~SBD_UA_CAPACITY_CHANGED;
1588 			if ((task->task_cdb[0] == SCMD_READ_CAPACITY) ||
1589 			    ((task->task_cdb[0] == SCMD_SVC_ACTION_IN_G4) &&
1590 			    (task->task_cdb[1] ==
1591 			    SSVC_ACTION_READ_CAPACITY_G4))) {
1592 				saa = 0;
1593 			} else {
1594 				saa = STMF_SAA_CAPACITY_DATA_HAS_CHANGED;
1595 			}
1596 		} else if (it->sbd_it_ua_conditions &
1597 		    SBD_UA_MODE_PARAMETERS_CHANGED) {
1598 			it->sbd_it_ua_conditions &=
1599 			    ~SBD_UA_MODE_PARAMETERS_CHANGED;
1600 			saa = STMF_SAA_MODE_PARAMETERS_CHANGED;
1601 		} else if (it->sbd_it_ua_conditions &
1602 		    SBD_UA_ASYMMETRIC_ACCESS_CHANGED) {
1603 			it->sbd_it_ua_conditions &=
1604 			    ~SBD_UA_ASYMMETRIC_ACCESS_CHANGED;
1605 			saa = STMF_SAA_ASYMMETRIC_ACCESS_CHANGED;
1606 		} else if (it->sbd_it_ua_conditions &
1607 		    SBD_UA_ACCESS_STATE_TRANSITION) {
1608 			it->sbd_it_ua_conditions &=
1609 			    ~SBD_UA_ACCESS_STATE_TRANSITION;
1610 			saa = STMF_SAA_LU_NO_ACCESS_TRANSITION;
1611 		} else {
1612 			it->sbd_it_ua_conditions = 0;
1613 			saa = 0;
1614 		}
1615 		mutex_exit(&sl->sl_lock);
1616 		if (saa) {
1617 			stmf_scsilib_send_status(task, STATUS_CHECK, saa);
1618 			return;
1619 		}
1620 	}
1621 
1622 	cdb0 = task->task_cdb[0];
1623 	cdb1 = task->task_cdb[1];
1624 
1625 	if (sl->sl_access_state == SBD_LU_STANDBY) {
1626 		if (cdb0 != SCMD_INQUIRY &&
1627 		    cdb0 != SCMD_MODE_SENSE &&
1628 		    cdb0 != SCMD_MODE_SENSE_G1 &&
1629 		    cdb0 != SCMD_MODE_SELECT &&
1630 		    cdb0 != SCMD_MODE_SELECT_G1 &&
1631 		    cdb0 != SCMD_RESERVE &&
1632 		    cdb0 != SCMD_RELEASE &&
1633 		    cdb0 != SCMD_PERSISTENT_RESERVE_OUT &&
1634 		    cdb0 != SCMD_PERSISTENT_RESERVE_IN &&
1635 		    cdb0 != SCMD_REQUEST_SENSE &&
1636 		    cdb0 != SCMD_READ_CAPACITY &&
1637 		    !(cdb0 == SCMD_SVC_ACTION_IN_G4 &&
1638 		    cdb1 == SSVC_ACTION_READ_CAPACITY_G4) &&
1639 		    !(cdb0 == SCMD_MAINTENANCE_IN &&
1640 		    (cdb1 & 0x1F) == 0x0A)) {
1641 			stmf_scsilib_send_status(task, STATUS_CHECK,
1642 			    STMF_SAA_LU_NO_ACCESS_STANDBY);
1643 			return;
1644 		}
1645 
1646 		/*
1647 		 * is this a short write?
1648 		 * if so, we'll need to wait until we have the buffer
1649 		 * before proxying the command
1650 		 */
1651 		switch (cdb0) {
1652 			case SCMD_MODE_SELECT:
1653 			case SCMD_MODE_SELECT_G1:
1654 			case SCMD_PERSISTENT_RESERVE_OUT:
1655 				break;
1656 			default:
1657 				st_ret = stmf_proxy_scsi_cmd(task,
1658 				    initial_dbuf);
1659 				if (st_ret != STMF_SUCCESS) {
1660 					stmf_scsilib_send_status(task,
1661 					    STATUS_CHECK,
1662 					    STMF_SAA_LU_NO_ACCESS_UNAVAIL);
1663 				}
1664 				return;
1665 		}
1666 	}
1667 
1668 	cdb0 = task->task_cdb[0] & 0x1F;
1669 
1670 	if ((cdb0 == SCMD_READ) || (cdb0 == SCMD_WRITE)) {
1671 		if (task->task_additional_flags & TASK_AF_PORT_LOAD_HIGH) {
1672 			stmf_scsilib_send_status(task, STATUS_QFULL, 0);
1673 			return;
1674 		}
1675 		if (cdb0 == SCMD_READ) {
1676 			sbd_handle_read(task, initial_dbuf);
1677 			return;
1678 		}
1679 		sbd_handle_write(task, initial_dbuf);
1680 		return;
1681 	}
1682 
1683 	cdb0 = task->task_cdb[0];
1684 	cdb1 = task->task_cdb[1];
1685 
1686 	if (cdb0 == SCMD_INQUIRY) {		/* Inquiry */
1687 		sbd_handle_inquiry(task, initial_dbuf);
1688 		return;
1689 	}
1690 
1691 	if (cdb0  == SCMD_PERSISTENT_RESERVE_OUT) {
1692 		sbd_handle_pgr_out_cmd(task, initial_dbuf);
1693 		return;
1694 	}
1695 
1696 	if (cdb0  == SCMD_PERSISTENT_RESERVE_IN) {
1697 		sbd_handle_pgr_in_cmd(task, initial_dbuf);
1698 		return;
1699 	}
1700 
1701 	if (cdb0 == SCMD_RELEASE) {
1702 		if (cdb1) {
1703 			stmf_scsilib_send_status(task, STATUS_CHECK,
1704 			    STMF_SAA_INVALID_FIELD_IN_CDB);
1705 			return;
1706 		}
1707 
1708 		mutex_enter(&sl->sl_lock);
1709 		if (sl->sl_flags & SL_LU_HAS_SCSI2_RESERVATION) {
1710 			/* If not owner don't release it, just return good */
1711 			if (it->sbd_it_session_id !=
1712 			    sl->sl_rs_owner_session_id) {
1713 				mutex_exit(&sl->sl_lock);
1714 				stmf_scsilib_send_status(task, STATUS_GOOD, 0);
1715 				return;
1716 			}
1717 		}
1718 		sl->sl_flags &= ~SL_LU_HAS_SCSI2_RESERVATION;
1719 		it->sbd_it_flags &= ~SBD_IT_HAS_SCSI2_RESERVATION;
1720 		mutex_exit(&sl->sl_lock);
1721 		stmf_scsilib_send_status(task, STATUS_GOOD, 0);
1722 		return;
1723 	}
1724 
1725 	if (cdb0 == SCMD_RESERVE) {
1726 		if (cdb1) {
1727 			stmf_scsilib_send_status(task, STATUS_CHECK,
1728 			    STMF_SAA_INVALID_FIELD_IN_CDB);
1729 			return;
1730 		}
1731 
1732 		mutex_enter(&sl->sl_lock);
1733 		if (sl->sl_flags & SL_LU_HAS_SCSI2_RESERVATION) {
1734 			/* If not owner, return conflict status */
1735 			if (it->sbd_it_session_id !=
1736 			    sl->sl_rs_owner_session_id) {
1737 				mutex_exit(&sl->sl_lock);
1738 				stmf_scsilib_send_status(task,
1739 				    STATUS_RESERVATION_CONFLICT, 0);
1740 				return;
1741 			}
1742 		}
1743 		sl->sl_flags |= SL_LU_HAS_SCSI2_RESERVATION;
1744 		it->sbd_it_flags |= SBD_IT_HAS_SCSI2_RESERVATION;
1745 		sl->sl_rs_owner_session_id = it->sbd_it_session_id;
1746 		mutex_exit(&sl->sl_lock);
1747 		stmf_scsilib_send_status(task, STATUS_GOOD, 0);
1748 		return;
1749 	}
1750 
1751 	if (cdb0 == SCMD_REQUEST_SENSE) {
1752 		/*
1753 		 * LU provider needs to store unretrieved sense data
1754 		 * (e.g. after power-on/reset).  For now, we'll just
1755 		 * return good status with no sense.
1756 		 */
1757 
1758 		if ((cdb1 & ~1) || task->task_cdb[2] || task->task_cdb[3] ||
1759 		    task->task_cdb[5]) {
1760 			stmf_scsilib_send_status(task, STATUS_CHECK,
1761 			    STMF_SAA_INVALID_FIELD_IN_CDB);
1762 		} else {
1763 			stmf_scsilib_send_status(task, STATUS_GOOD, 0);
1764 		}
1765 
1766 		return;
1767 	}
1768 
1769 	/* Report Target Port Groups */
1770 	if ((cdb0 == SCMD_MAINTENANCE_IN) &&
1771 	    ((cdb1 & 0x1F) == 0x0A)) {
1772 		stmf_scsilib_handle_report_tpgs(task, initial_dbuf);
1773 		return;
1774 	}
1775 
1776 	if (cdb0 == SCMD_START_STOP) {			/* Start stop */
1777 		task->task_cmd_xfer_length = 0;
1778 		if (task->task_cdb[4] & 0xFC) {
1779 			stmf_scsilib_send_status(task, STATUS_CHECK,
1780 			    STMF_SAA_INVALID_FIELD_IN_CDB);
1781 			return;
1782 		}
1783 		if (task->task_cdb[4] & 2) {
1784 			stmf_scsilib_send_status(task, STATUS_CHECK,
1785 			    STMF_SAA_INVALID_FIELD_IN_CDB);
1786 		} else {
1787 			stmf_scsilib_send_status(task, STATUS_GOOD, 0);
1788 		}
1789 		return;
1790 
1791 	}
1792 
1793 	if ((cdb0 == SCMD_MODE_SENSE) || (cdb0 == SCMD_MODE_SENSE_G1)) {
1794 		uint8_t *p;
1795 		p = kmem_zalloc(512, KM_SLEEP);
1796 		sbd_handle_mode_sense(task, initial_dbuf, p);
1797 		kmem_free(p, 512);
1798 		return;
1799 	}
1800 
1801 	if ((cdb0 == SCMD_MODE_SELECT) || (cdb0 == SCMD_MODE_SELECT_G1)) {
1802 		sbd_handle_mode_select(task, initial_dbuf);
1803 		return;
1804 	}
1805 
1806 	if (cdb0 == SCMD_TEST_UNIT_READY) {	/* Test unit ready */
1807 		task->task_cmd_xfer_length = 0;
1808 		stmf_scsilib_send_status(task, STATUS_GOOD, 0);
1809 		return;
1810 	}
1811 
1812 	if (cdb0 == SCMD_READ_CAPACITY) {		/* Read Capacity */
1813 		sbd_handle_read_capacity(task, initial_dbuf);
1814 		return;
1815 	}
1816 
1817 	if (cdb0 == SCMD_SVC_ACTION_IN_G4) { 	/* Read Capacity or read long */
1818 		if (cdb1 == SSVC_ACTION_READ_CAPACITY_G4) {
1819 			sbd_handle_read_capacity(task, initial_dbuf);
1820 			return;
1821 		/*
1822 		 * } else if (cdb1 == SSVC_ACTION_READ_LONG_G4) {
1823 		 * 	sbd_handle_read(task, initial_dbuf);
1824 		 * 	return;
1825 		 */
1826 		}
1827 	}
1828 
1829 	/*
1830 	 * if (cdb0 == SCMD_SVC_ACTION_OUT_G4) {
1831 	 *	if (cdb1 == SSVC_ACTION_WRITE_LONG_G4) {
1832 	 *		 sbd_handle_write(task, initial_dbuf);
1833 	 * 		return;
1834 	 *	}
1835 	 * }
1836 	 */
1837 
1838 	if (cdb0 == SCMD_VERIFY) {
1839 		/*
1840 		 * Something more likely needs to be done here.
1841 		 */
1842 		task->task_cmd_xfer_length = 0;
1843 		stmf_scsilib_send_status(task, STATUS_GOOD, 0);
1844 		return;
1845 	}
1846 
1847 	if (cdb0 == SCMD_SYNCHRONIZE_CACHE ||
1848 	    cdb0 == SCMD_SYNCHRONIZE_CACHE_G4) {
1849 		sbd_handle_sync_cache(task, initial_dbuf);
1850 		return;
1851 	}
1852 
1853 	stmf_scsilib_send_status(task, STATUS_CHECK, STMF_SAA_INVALID_OPCODE);
1854 }
1855 
1856 void
1857 sbd_dbuf_xfer_done(struct scsi_task *task, struct stmf_data_buf *dbuf)
1858 {
1859 	sbd_cmd_t *scmd = NULL;
1860 
1861 	scmd = (sbd_cmd_t *)task->task_lu_private;
1862 	if ((scmd == NULL) || ((scmd->flags & SBD_SCSI_CMD_ACTIVE) == 0))
1863 		return;
1864 
1865 	switch (scmd->cmd_type) {
1866 	case (SBD_CMD_SCSI_READ):
1867 		sbd_handle_read_xfer_completion(task, scmd, dbuf);
1868 		break;
1869 
1870 	case (SBD_CMD_SCSI_WRITE):
1871 		sbd_handle_write_xfer_completion(task, scmd, dbuf, 1);
1872 		break;
1873 
1874 	case (SBD_CMD_SMALL_READ):
1875 		sbd_handle_short_read_xfer_completion(task, scmd, dbuf);
1876 		break;
1877 
1878 	case (SBD_CMD_SMALL_WRITE):
1879 		sbd_handle_short_write_xfer_completion(task, dbuf);
1880 		break;
1881 
1882 	default:
1883 		cmn_err(CE_PANIC, "Unknown cmd type, task = %p", (void *)task);
1884 		break;
1885 	}
1886 }
1887 
1888 /* ARGSUSED */
1889 void
1890 sbd_send_status_done(struct scsi_task *task)
1891 {
1892 	cmn_err(CE_PANIC,
1893 	    "sbd_send_status_done: this should not have been called");
1894 }
1895 
1896 void
1897 sbd_task_free(struct scsi_task *task)
1898 {
1899 	if (task->task_lu_private) {
1900 		sbd_cmd_t *scmd = (sbd_cmd_t *)task->task_lu_private;
1901 		if (scmd->flags & SBD_SCSI_CMD_ACTIVE) {
1902 			cmn_err(CE_PANIC, "cmd is active, task = %p",
1903 			    (void *)task);
1904 		}
1905 		kmem_free(scmd, sizeof (sbd_cmd_t));
1906 	}
1907 }
1908 
1909 /*
1910  * Aborts are synchronus w.r.t. I/O AND
1911  * All the I/O which SBD does is synchronous AND
1912  * Everything within a task is single threaded.
1913  *   IT MEANS
1914  * If this function is called, we are doing nothing with this task
1915  * inside of sbd module.
1916  */
1917 /* ARGSUSED */
1918 stmf_status_t
1919 sbd_abort(struct stmf_lu *lu, int abort_cmd, void *arg, uint32_t flags)
1920 {
1921 	sbd_lu_t *sl = (sbd_lu_t *)lu->lu_provider_private;
1922 	scsi_task_t *task;
1923 
1924 	if (abort_cmd == STMF_LU_RESET_STATE) {
1925 		return (sbd_lu_reset_state(lu));
1926 	}
1927 
1928 	if (abort_cmd == STMF_LU_ITL_HANDLE_REMOVED) {
1929 		sbd_check_and_clear_scsi2_reservation(sl, (sbd_it_data_t *)arg);
1930 		sbd_remove_it_handle(sl, (sbd_it_data_t *)arg);
1931 		return (STMF_SUCCESS);
1932 	}
1933 
1934 	ASSERT(abort_cmd == STMF_LU_ABORT_TASK);
1935 	task = (scsi_task_t *)arg;
1936 	if (task->task_lu_private) {
1937 		sbd_cmd_t *scmd = (sbd_cmd_t *)task->task_lu_private;
1938 
1939 		if (scmd->flags & SBD_SCSI_CMD_ACTIVE) {
1940 			scmd->flags &= ~SBD_SCSI_CMD_ACTIVE;
1941 			return (STMF_ABORT_SUCCESS);
1942 		}
1943 	}
1944 
1945 	return (STMF_NOT_FOUND);
1946 }
1947 
1948 /* ARGSUSED */
1949 void
1950 sbd_ctl(struct stmf_lu *lu, int cmd, void *arg)
1951 {
1952 	sbd_lu_t *sl = (sbd_lu_t *)lu->lu_provider_private;
1953 	stmf_change_status_t st;
1954 
1955 	ASSERT((cmd == STMF_CMD_LU_ONLINE) ||
1956 	    (cmd == STMF_CMD_LU_OFFLINE) ||
1957 	    (cmd == STMF_ACK_LU_ONLINE_COMPLETE) ||
1958 	    (cmd == STMF_ACK_LU_OFFLINE_COMPLETE));
1959 
1960 	st.st_completion_status = STMF_SUCCESS;
1961 	st.st_additional_info = NULL;
1962 
1963 	switch (cmd) {
1964 	case STMF_CMD_LU_ONLINE:
1965 		if (sl->sl_state == STMF_STATE_ONLINE)
1966 			st.st_completion_status = STMF_ALREADY;
1967 		else if (sl->sl_state != STMF_STATE_OFFLINE)
1968 			st.st_completion_status = STMF_FAILURE;
1969 		if (st.st_completion_status == STMF_SUCCESS) {
1970 			sl->sl_state = STMF_STATE_ONLINE;
1971 			sl->sl_state_not_acked = 1;
1972 		}
1973 		(void) stmf_ctl(STMF_CMD_LU_ONLINE_COMPLETE, lu, &st);
1974 		break;
1975 
1976 	case STMF_CMD_LU_OFFLINE:
1977 		if (sl->sl_state == STMF_STATE_OFFLINE)
1978 			st.st_completion_status = STMF_ALREADY;
1979 		else if (sl->sl_state != STMF_STATE_ONLINE)
1980 			st.st_completion_status = STMF_FAILURE;
1981 		if (st.st_completion_status == STMF_SUCCESS) {
1982 			sl->sl_flags &= ~(SL_MEDIUM_REMOVAL_PREVENTED |
1983 			    SL_LU_HAS_SCSI2_RESERVATION);
1984 			sl->sl_state = STMF_STATE_OFFLINE;
1985 			sl->sl_state_not_acked = 1;
1986 			sbd_pgr_reset(sl);
1987 		}
1988 		(void) stmf_ctl(STMF_CMD_LU_OFFLINE_COMPLETE, lu, &st);
1989 		break;
1990 
1991 	case STMF_ACK_LU_ONLINE_COMPLETE:
1992 		/* Fallthrough */
1993 	case STMF_ACK_LU_OFFLINE_COMPLETE:
1994 		sl->sl_state_not_acked = 0;
1995 		break;
1996 
1997 	}
1998 }
1999 
2000 /* ARGSUSED */
2001 stmf_status_t
2002 sbd_info(uint32_t cmd, stmf_lu_t *lu, void *arg, uint8_t *buf,
2003     uint32_t *bufsizep)
2004 {
2005 	return (STMF_NOT_SUPPORTED);
2006 }
2007 
2008 stmf_status_t
2009 sbd_lu_reset_state(stmf_lu_t *lu)
2010 {
2011 	sbd_lu_t *sl = (sbd_lu_t *)lu->lu_provider_private;
2012 
2013 	mutex_enter(&sl->sl_lock);
2014 	if (sl->sl_flags & SL_SAVED_WRITE_CACHE_DISABLE) {
2015 		sl->sl_flags |= SL_WRITEBACK_CACHE_DISABLE;
2016 		mutex_exit(&sl->sl_lock);
2017 		if (sl->sl_access_state == SBD_LU_ACTIVE) {
2018 			(void) sbd_wcd_set(1, sl);
2019 		}
2020 	} else {
2021 		sl->sl_flags &= ~SL_WRITEBACK_CACHE_DISABLE;
2022 		mutex_exit(&sl->sl_lock);
2023 		if (sl->sl_access_state == SBD_LU_ACTIVE) {
2024 			(void) sbd_wcd_set(0, sl);
2025 		}
2026 	}
2027 	sbd_pgr_reset(sl);
2028 	sbd_check_and_clear_scsi2_reservation(sl, NULL);
2029 	if (stmf_deregister_all_lu_itl_handles(lu) != STMF_SUCCESS) {
2030 		return (STMF_FAILURE);
2031 	}
2032 	return (STMF_SUCCESS);
2033 }
2034 
2035 sbd_status_t
2036 sbd_flush_data_cache(sbd_lu_t *sl, int fsync_done)
2037 {
2038 	int r = 0;
2039 	int ret;
2040 
2041 	if (fsync_done)
2042 		goto over_fsync;
2043 	if ((sl->sl_data_vtype == VREG) || (sl->sl_data_vtype == VBLK)) {
2044 		if (VOP_FSYNC(sl->sl_data_vp, FSYNC, kcred, NULL))
2045 			return (SBD_FAILURE);
2046 	}
2047 over_fsync:
2048 	if (((sl->sl_data_vtype == VCHR) || (sl->sl_data_vtype == VBLK)) &&
2049 	    ((sl->sl_flags & SL_NO_DATA_DKIOFLUSH) == 0)) {
2050 		ret = VOP_IOCTL(sl->sl_data_vp, DKIOCFLUSHWRITECACHE, NULL,
2051 		    FKIOCTL, kcred, &r, NULL);
2052 		if ((ret == ENOTTY) || (ret == ENOTSUP)) {
2053 			mutex_enter(&sl->sl_lock);
2054 			sl->sl_flags |= SL_NO_DATA_DKIOFLUSH;
2055 			mutex_exit(&sl->sl_lock);
2056 		} else if (ret != 0) {
2057 			return (SBD_FAILURE);
2058 		}
2059 	}
2060 
2061 	return (SBD_SUCCESS);
2062 }
2063 
2064 /* ARGSUSED */
2065 static void
2066 sbd_handle_sync_cache(struct scsi_task *task,
2067     struct stmf_data_buf *initial_dbuf)
2068 {
2069 	sbd_lu_t *sl = (sbd_lu_t *)task->task_lu->lu_provider_private;
2070 	uint64_t	lba, laddr;
2071 	sbd_status_t	sret;
2072 	uint32_t	len;
2073 	int		is_g4 = 0;
2074 	int		immed;
2075 
2076 	task->task_cmd_xfer_length = 0;
2077 	/*
2078 	 * Determine if this is a 10 or 16 byte CDB
2079 	 */
2080 
2081 	if (task->task_cdb[0] == SCMD_SYNCHRONIZE_CACHE_G4)
2082 		is_g4 = 1;
2083 
2084 	/*
2085 	 * Determine other requested parameters
2086 	 *
2087 	 * We don't have a non-volatile cache, so don't care about SYNC_NV.
2088 	 * Do not support the IMMED bit.
2089 	 */
2090 
2091 	immed = (task->task_cdb[1] & 0x02);
2092 
2093 	if (immed) {
2094 		stmf_scsilib_send_status(task, STATUS_CHECK,
2095 		    STMF_SAA_INVALID_FIELD_IN_CDB);
2096 		return;
2097 	}
2098 
2099 	/*
2100 	 * Check to be sure we're not being asked to sync an LBA
2101 	 * that is out of range.  While checking, verify reserved fields.
2102 	 */
2103 
2104 	if (is_g4) {
2105 		if ((task->task_cdb[1] & 0xf9) || task->task_cdb[14] ||
2106 		    task->task_cdb[15]) {
2107 			stmf_scsilib_send_status(task, STATUS_CHECK,
2108 			    STMF_SAA_INVALID_FIELD_IN_CDB);
2109 			return;
2110 		}
2111 
2112 		lba = READ_SCSI64(&task->task_cdb[2], uint64_t);
2113 		len = READ_SCSI32(&task->task_cdb[10], uint32_t);
2114 	} else {
2115 		if ((task->task_cdb[1] & 0xf9) || task->task_cdb[6] ||
2116 		    task->task_cdb[9]) {
2117 			stmf_scsilib_send_status(task, STATUS_CHECK,
2118 			    STMF_SAA_INVALID_FIELD_IN_CDB);
2119 			return;
2120 		}
2121 
2122 		lba = READ_SCSI32(&task->task_cdb[2], uint64_t);
2123 		len = READ_SCSI16(&task->task_cdb[7], uint32_t);
2124 	}
2125 
2126 	laddr = lba << sl->sl_data_blocksize_shift;
2127 	len <<= sl->sl_data_blocksize_shift;
2128 
2129 	if ((laddr + (uint64_t)len) > sl->sl_lu_size) {
2130 		stmf_scsilib_send_status(task, STATUS_CHECK,
2131 		    STMF_SAA_LBA_OUT_OF_RANGE);
2132 		return;
2133 	}
2134 
2135 	sret = sbd_flush_data_cache(sl, 0);
2136 	if (sret != SBD_SUCCESS) {
2137 		stmf_scsilib_send_status(task, STATUS_CHECK,
2138 		    STMF_SAA_WRITE_ERROR);
2139 		return;
2140 	}
2141 
2142 	stmf_scsilib_send_status(task, STATUS_GOOD, 0);
2143 }
2144