xref: /illumos-gate/usr/src/uts/common/io/comstar/lu/stmf_sbd/sbd_scsi.c (revision 581cede61ac9c14d8d4ea452562a567189eead78)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
23  * Use is subject to license terms.
24  */
25 
26 #include <sys/conf.h>
27 #include <sys/file.h>
28 #include <sys/ddi.h>
29 #include <sys/sunddi.h>
30 #include <sys/modctl.h>
31 #include <sys/scsi/scsi.h>
32 #include <sys/scsi/impl/scsi_reset_notify.h>
33 #include <sys/scsi/generic/mode.h>
34 #include <sys/disp.h>
35 #include <sys/byteorder.h>
36 #include <sys/atomic.h>
37 #include <sys/sdt.h>
38 #include <sys/dkio.h>
39 
40 #include <stmf.h>
41 #include <lpif.h>
42 #include <portif.h>
43 #include <stmf_ioctl.h>
44 #include <stmf_sbd.h>
45 #include <stmf_sbd_ioctl.h>
46 #include <sbd_impl.h>
47 
48 #define	SCSI2_CONFLICT_FREE_CMDS(cdb)	( \
49 	/* ----------------------- */                                      \
50 	/* Refer Both		   */                                      \
51 	/* SPC-2 (rev 20) Table 10 */                                      \
52 	/* SPC-3 (rev 23) Table 31 */                                      \
53 	/* ----------------------- */                                      \
54 	((cdb[0]) == SCMD_INQUIRY)					|| \
55 	((cdb[0]) == SCMD_LOG_SENSE_G1)					|| \
56 	((cdb[0]) == SCMD_RELEASE)					|| \
57 	((cdb[0]) == SCMD_RELEASE_G1)					|| \
58 	((cdb[0]) == SCMD_REPORT_LUNS)					|| \
59 	((cdb[0]) == SCMD_REQUEST_SENSE)				|| \
60 	/* PREVENT ALLOW MEDIUM REMOVAL with prevent == 0 */               \
61 	((((cdb[0]) == SCMD_DOORLOCK) && (((cdb[4]) & 0x3) == 0)))	|| \
62 	/* SERVICE ACTION IN with READ MEDIA SERIAL NUMBER (0x01) */       \
63 	(((cdb[0]) == SCMD_SVC_ACTION_IN_G5) && (                          \
64 	    ((cdb[1]) & 0x1F) == 0x01))					|| \
65 	/* MAINTENANCE IN with service actions REPORT ALIASES (0x0Bh) */   \
66 	/* REPORT DEVICE IDENTIFIER (0x05)  REPORT PRIORITY (0x0Eh) */     \
67 	/* REPORT TARGET PORT GROUPS (0x0A) REPORT TIMESTAMP (0x0F) */     \
68 	(((cdb[0]) == SCMD_MAINTENANCE_IN) && (                            \
69 	    (((cdb[1]) & 0x1F) == 0x0B) ||                                 \
70 	    (((cdb[1]) & 0x1F) == 0x05) ||                                 \
71 	    (((cdb[1]) & 0x1F) == 0x0E) ||                                 \
72 	    (((cdb[1]) & 0x1F) == 0x0A) ||                                 \
73 	    (((cdb[1]) & 0x1F) == 0x0F)))				|| \
74 	/* ----------------------- */                                      \
75 	/* SBC-3 (rev 17) Table 3  */                                      \
76 	/* ----------------------- */                                      \
77 	/* READ CAPACITY(10) */                                            \
78 	((cdb[0]) == SCMD_READ_CAPACITY)				|| \
79 	/* READ CAPACITY(16) */                                            \
80 	(((cdb[0]) == SCMD_SVC_ACTION_IN_G4) && (                          \
81 	    ((cdb[1]) & 0x1F) == 0x10))					|| \
82 	/* START STOP UNIT with START bit 0 and POWER CONDITION 0  */      \
83 	(((cdb[0]) == SCMD_START_STOP) && (                                \
84 	    (((cdb[4]) & 0xF0) == 0) && (((cdb[4]) & 0x01) == 0))))
85 /* End of SCSI2_CONFLICT_FREE_CMDS */
86 
87 stmf_status_t sbd_lu_reset_state(stmf_lu_t *lu);
88 static void sbd_handle_sync_cache(struct scsi_task *task,
89     struct stmf_data_buf *initial_dbuf);
90 void sbd_handle_read_xfer_completion(struct scsi_task *task,
91     sbd_cmd_t *scmd, struct stmf_data_buf *dbuf);
92 void sbd_handle_short_write_xfer_completion(scsi_task_t *task,
93     stmf_data_buf_t *dbuf);
94 void sbd_handle_short_write_transfers(scsi_task_t *task,
95     stmf_data_buf_t *dbuf, uint32_t cdb_xfer_size);
96 static void sbd_handle_sync_cache(struct scsi_task *task,
97     struct stmf_data_buf *initial_dbuf);
98 void sbd_handle_mode_select_xfer(scsi_task_t *task, uint8_t *buf,
99     uint32_t buflen);
100 void sbd_handle_mode_select(scsi_task_t *task, stmf_data_buf_t *dbuf);
101 
102 extern void sbd_pgr_initialize_it(scsi_task_t *);
103 extern int sbd_pgr_reservation_conflict(scsi_task_t *);
104 extern void sbd_pgr_remove_it_handle(sbd_lu_t *, sbd_it_data_t *);
105 extern void sbd_handle_pgr_in_cmd(scsi_task_t *, stmf_data_buf_t *);
106 extern void sbd_handle_pgr_out_cmd(scsi_task_t *, stmf_data_buf_t *);
107 extern void sbd_handle_pgr_out_data(scsi_task_t *, stmf_data_buf_t *);
108 /*
109  * IMPORTANT NOTE:
110  * =================
111  * The whole world here is based on the assumption that everything within
112  * a scsi task executes in a single threaded manner, even the aborts.
113  * Dont ever change that. There wont be any performance gain but there
114  * will be tons of race conditions.
115  */
116 
117 void
118 sbd_do_read_xfer(struct scsi_task *task, sbd_cmd_t *scmd,
119 					struct stmf_data_buf *dbuf)
120 {
121 	sbd_lu_t *sl = (sbd_lu_t *)task->task_lu->lu_provider_private;
122 	uint64_t laddr;
123 	uint32_t len, buflen, iolen;
124 	int ndx;
125 	int bufs_to_take;
126 
127 	/* Lets try not to hog all the buffers the port has. */
128 	bufs_to_take = ((task->task_max_nbufs > 2) &&
129 	    (task->task_cmd_xfer_length < (32 * 1024))) ? 2 :
130 	    task->task_max_nbufs;
131 
132 	len = scmd->len > dbuf->db_buf_size ? dbuf->db_buf_size : scmd->len;
133 	laddr = scmd->addr + scmd->current_ro;
134 
135 	for (buflen = 0, ndx = 0; (buflen < len) &&
136 	    (ndx < dbuf->db_sglist_length); ndx++) {
137 		iolen = min(len - buflen, dbuf->db_sglist[ndx].seg_length);
138 		if (iolen == 0)
139 			break;
140 		if (sbd_data_read(sl, laddr, (uint64_t)iolen,
141 		    dbuf->db_sglist[ndx].seg_addr) != STMF_SUCCESS) {
142 			scmd->flags |= SBD_SCSI_CMD_XFER_FAIL;
143 			/* Do not need to do xfer anymore, just complete it */
144 			dbuf->db_data_size = 0;
145 			dbuf->db_xfer_status = STMF_SUCCESS;
146 			sbd_handle_read_xfer_completion(task, scmd, dbuf);
147 			return;
148 		}
149 		buflen += iolen;
150 		laddr += (uint64_t)iolen;
151 	}
152 	dbuf->db_relative_offset = scmd->current_ro;
153 	dbuf->db_data_size = buflen;
154 	dbuf->db_flags = DB_DIRECTION_TO_RPORT;
155 	(void) stmf_xfer_data(task, dbuf, 0);
156 	scmd->len -= buflen;
157 	scmd->current_ro += buflen;
158 	if (scmd->len && (scmd->nbufs < bufs_to_take)) {
159 		uint32_t maxsize, minsize, old_minsize;
160 
161 		maxsize = (scmd->len > (128*1024)) ? 128*1024 : scmd->len;
162 		minsize = maxsize >> 2;
163 		do {
164 			/*
165 			 * A bad port implementation can keep on failing the
166 			 * the request but keep on sending us a false
167 			 * minsize.
168 			 */
169 			old_minsize = minsize;
170 			dbuf = stmf_alloc_dbuf(task, maxsize, &minsize, 0);
171 		} while ((dbuf == NULL) && (old_minsize > minsize) &&
172 		    (minsize >= 512));
173 		if (dbuf == NULL) {
174 			return;
175 		}
176 		scmd->nbufs++;
177 		sbd_do_read_xfer(task, scmd, dbuf);
178 	}
179 }
180 
181 void
182 sbd_handle_read_xfer_completion(struct scsi_task *task, sbd_cmd_t *scmd,
183 				struct stmf_data_buf *dbuf)
184 {
185 	if (dbuf->db_xfer_status != STMF_SUCCESS) {
186 		stmf_abort(STMF_QUEUE_TASK_ABORT, task,
187 		    dbuf->db_xfer_status, NULL);
188 		return;
189 	}
190 	task->task_nbytes_transferred += dbuf->db_data_size;
191 	if (scmd->len == 0 || scmd->flags & SBD_SCSI_CMD_XFER_FAIL) {
192 		stmf_free_dbuf(task, dbuf);
193 		scmd->nbufs--;
194 		if (scmd->nbufs)
195 			return;	/* wait for all buffers to complete */
196 		scmd->flags &= ~SBD_SCSI_CMD_ACTIVE;
197 		if (scmd->flags & SBD_SCSI_CMD_XFER_FAIL)
198 			stmf_scsilib_send_status(task, STATUS_CHECK,
199 			    STMF_SAA_READ_ERROR);
200 		else
201 			stmf_scsilib_send_status(task, STATUS_GOOD, 0);
202 		return;
203 	}
204 	if (dbuf->db_flags & DB_DONT_REUSE) {
205 		/* allocate new dbuf */
206 		uint32_t maxsize, minsize, old_minsize;
207 		stmf_free_dbuf(task, dbuf);
208 
209 		maxsize = (scmd->len > (128*1024)) ? 128*1024 : scmd->len;
210 		minsize = maxsize >> 2;
211 		do {
212 			old_minsize = minsize;
213 			dbuf = stmf_alloc_dbuf(task, maxsize, &minsize, 0);
214 		} while ((dbuf == NULL) && (old_minsize > minsize) &&
215 		    (minsize >= 512));
216 		if (dbuf == NULL) {
217 			scmd->nbufs --;
218 			if (scmd->nbufs == 0) {
219 				stmf_abort(STMF_QUEUE_TASK_ABORT, task,
220 				    STMF_ALLOC_FAILURE, NULL);
221 			}
222 			return;
223 		}
224 	}
225 	sbd_do_read_xfer(task, scmd, dbuf);
226 }
227 
228 void
229 sbd_handle_read(struct scsi_task *task, struct stmf_data_buf *initial_dbuf)
230 {
231 	uint64_t lba, laddr;
232 	uint32_t len;
233 	uint8_t op = task->task_cdb[0];
234 	sbd_lu_t *sl = (sbd_lu_t *)task->task_lu->lu_provider_private;
235 	sbd_cmd_t *scmd;
236 	stmf_data_buf_t *dbuf;
237 	int fast_path;
238 
239 	if (op == SCMD_READ) {
240 		lba = READ_SCSI21(&task->task_cdb[1], uint64_t);
241 		len = (uint32_t)task->task_cdb[4];
242 
243 		if (len == 0) {
244 			len = 256;
245 		}
246 	} else if (op == SCMD_READ_G1) {
247 		lba = READ_SCSI32(&task->task_cdb[2], uint64_t);
248 		len = READ_SCSI16(&task->task_cdb[7], uint32_t);
249 	} else if (op == SCMD_READ_G5) {
250 		lba = READ_SCSI32(&task->task_cdb[2], uint64_t);
251 		len = READ_SCSI32(&task->task_cdb[6], uint32_t);
252 	} else if (op == SCMD_READ_G4) {
253 		lba = READ_SCSI64(&task->task_cdb[2], uint64_t);
254 		len = READ_SCSI32(&task->task_cdb[10], uint32_t);
255 	} else {
256 		stmf_scsilib_send_status(task, STATUS_CHECK,
257 		    STMF_SAA_INVALID_OPCODE);
258 		return;
259 	}
260 
261 	laddr = lba << sl->sl_data_blocksize_shift;
262 	len <<= sl->sl_data_blocksize_shift;
263 
264 	if ((laddr + (uint64_t)len) > sl->sl_lu_size) {
265 		stmf_scsilib_send_status(task, STATUS_CHECK,
266 		    STMF_SAA_LBA_OUT_OF_RANGE);
267 		return;
268 	}
269 
270 	task->task_cmd_xfer_length = len;
271 	if (task->task_additional_flags & TASK_AF_NO_EXPECTED_XFER_LENGTH) {
272 		task->task_expected_xfer_length = len;
273 	}
274 
275 	if (len != task->task_expected_xfer_length) {
276 		fast_path = 0;
277 		len = (len > task->task_expected_xfer_length) ?
278 		    task->task_expected_xfer_length : len;
279 	} else {
280 		fast_path = 1;
281 	}
282 
283 	if (len == 0) {
284 		stmf_scsilib_send_status(task, STATUS_GOOD, 0);
285 		return;
286 	}
287 
288 	if (initial_dbuf == NULL) {
289 		uint32_t maxsize, minsize, old_minsize;
290 
291 		maxsize = (len > (128*1024)) ? 128*1024 : len;
292 		minsize = maxsize >> 2;
293 		do {
294 			old_minsize = minsize;
295 			initial_dbuf = stmf_alloc_dbuf(task, maxsize,
296 			    &minsize, 0);
297 		} while ((initial_dbuf == NULL) && (old_minsize > minsize) &&
298 		    (minsize >= 512));
299 		if (initial_dbuf == NULL) {
300 			stmf_scsilib_send_status(task, STATUS_QFULL, 0);
301 			return;
302 		}
303 	}
304 	dbuf = initial_dbuf;
305 
306 	if ((dbuf->db_buf_size >= len) && fast_path &&
307 	    (dbuf->db_sglist_length == 1)) {
308 		if (sbd_data_read(sl, laddr, (uint64_t)len,
309 		    dbuf->db_sglist[0].seg_addr) == STMF_SUCCESS) {
310 			dbuf->db_relative_offset = 0;
311 			dbuf->db_data_size = len;
312 			dbuf->db_flags = DB_SEND_STATUS_GOOD |
313 			    DB_DIRECTION_TO_RPORT;
314 			(void) stmf_xfer_data(task, dbuf, STMF_IOF_LU_DONE);
315 		} else {
316 			stmf_scsilib_send_status(task, STATUS_CHECK,
317 			    STMF_SAA_READ_ERROR);
318 		}
319 		return;
320 	}
321 
322 	if (task->task_lu_private) {
323 		scmd = (sbd_cmd_t *)task->task_lu_private;
324 	} else {
325 		scmd = (sbd_cmd_t *)kmem_alloc(sizeof (sbd_cmd_t), KM_SLEEP);
326 		task->task_lu_private = scmd;
327 	}
328 	scmd->flags = SBD_SCSI_CMD_ACTIVE;
329 	scmd->cmd_type = SBD_CMD_SCSI_READ;
330 	scmd->nbufs = 1;
331 	scmd->addr = laddr;
332 	scmd->len = len;
333 	scmd->current_ro = 0;
334 
335 	sbd_do_read_xfer(task, scmd, dbuf);
336 }
337 
338 void
339 sbd_do_write_xfer(struct scsi_task *task, sbd_cmd_t *scmd,
340 					struct stmf_data_buf *dbuf)
341 {
342 	uint32_t len;
343 	int bufs_to_take;
344 
345 	/* Lets try not to hog all the buffers the port has. */
346 	bufs_to_take = ((task->task_max_nbufs > 2) &&
347 	    (task->task_cmd_xfer_length < (32 * 1024))) ? 2 :
348 	    task->task_max_nbufs;
349 
350 	len = scmd->len > dbuf->db_buf_size ? dbuf->db_buf_size : scmd->len;
351 
352 	dbuf->db_relative_offset = scmd->current_ro;
353 	dbuf->db_data_size = len;
354 	dbuf->db_flags = DB_DIRECTION_FROM_RPORT;
355 	(void) stmf_xfer_data(task, dbuf, 0);
356 	scmd->len -= len;
357 	scmd->current_ro += len;
358 	if (scmd->len && (scmd->nbufs < bufs_to_take)) {
359 		uint32_t maxsize, minsize, old_minsize;
360 
361 		maxsize = (scmd->len > (128*1024)) ? 128*1024 : scmd->len;
362 		minsize = maxsize >> 2;
363 		do {
364 			old_minsize = minsize;
365 			dbuf = stmf_alloc_dbuf(task, maxsize, &minsize, 0);
366 		} while ((dbuf == NULL) && (old_minsize > minsize) &&
367 		    (minsize >= 512));
368 		if (dbuf == NULL) {
369 			return;
370 		}
371 		scmd->nbufs++;
372 		sbd_do_write_xfer(task, scmd, dbuf);
373 	}
374 }
375 
376 void
377 sbd_handle_write_xfer_completion(struct scsi_task *task, sbd_cmd_t *scmd,
378     struct stmf_data_buf *dbuf, uint8_t dbuf_reusable)
379 {
380 	sbd_lu_t *sl = (sbd_lu_t *)task->task_lu->lu_provider_private;
381 	uint64_t laddr;
382 	uint32_t buflen, iolen;
383 	int ndx;
384 
385 	if (dbuf->db_xfer_status != STMF_SUCCESS) {
386 		stmf_abort(STMF_QUEUE_TASK_ABORT, task,
387 		    dbuf->db_xfer_status, NULL);
388 		return;
389 	}
390 
391 	if (scmd->flags & SBD_SCSI_CMD_XFER_FAIL) {
392 		goto WRITE_XFER_DONE;
393 	}
394 
395 	laddr = scmd->addr + dbuf->db_relative_offset;
396 
397 	for (buflen = 0, ndx = 0; (buflen < dbuf->db_data_size) &&
398 	    (ndx < dbuf->db_sglist_length); ndx++) {
399 		iolen = min(dbuf->db_data_size - buflen,
400 		    dbuf->db_sglist[ndx].seg_length);
401 		if (iolen == 0)
402 			break;
403 		if (sbd_data_write(sl, laddr, (uint64_t)iolen,
404 		    dbuf->db_sglist[ndx].seg_addr) != STMF_SUCCESS) {
405 			scmd->flags |= SBD_SCSI_CMD_XFER_FAIL;
406 			break;
407 		}
408 		buflen += iolen;
409 		laddr += (uint64_t)iolen;
410 	}
411 	task->task_nbytes_transferred += buflen;
412 WRITE_XFER_DONE:
413 	if (scmd->len == 0 || scmd->flags & SBD_SCSI_CMD_XFER_FAIL) {
414 		stmf_free_dbuf(task, dbuf);
415 		scmd->nbufs--;
416 		if (scmd->nbufs)
417 			return;	/* wait for all buffers to complete */
418 		scmd->flags &= ~SBD_SCSI_CMD_ACTIVE;
419 		if (scmd->flags & SBD_SCSI_CMD_XFER_FAIL)
420 			stmf_scsilib_send_status(task, STATUS_CHECK,
421 			    STMF_SAA_WRITE_ERROR);
422 		else
423 			stmf_scsilib_send_status(task, STATUS_GOOD, 0);
424 		return;
425 	}
426 	if (dbuf->db_flags & DB_DONT_REUSE || dbuf_reusable == 0) {
427 		uint32_t maxsize, minsize, old_minsize;
428 		/* free current dbuf and allocate a new one */
429 		stmf_free_dbuf(task, dbuf);
430 
431 		maxsize = (scmd->len > (128*1024)) ? 128*1024 : scmd->len;
432 		minsize = maxsize >> 2;
433 		do {
434 			old_minsize = minsize;
435 			dbuf = stmf_alloc_dbuf(task, maxsize, &minsize, 0);
436 		} while ((dbuf == NULL) && (old_minsize > minsize) &&
437 		    (minsize >= 512));
438 		if (dbuf == NULL) {
439 			scmd->nbufs --;
440 			if (scmd->nbufs == 0) {
441 				stmf_abort(STMF_QUEUE_TASK_ABORT, task,
442 				    STMF_ALLOC_FAILURE, NULL);
443 			}
444 			return;
445 		}
446 	}
447 	sbd_do_write_xfer(task, scmd, dbuf);
448 }
449 
450 void
451 sbd_handle_write(struct scsi_task *task, struct stmf_data_buf *initial_dbuf)
452 {
453 	uint64_t lba, laddr;
454 	uint32_t len;
455 	uint8_t op = task->task_cdb[0], do_immediate_data = 0;
456 	sbd_lu_t *sl = (sbd_lu_t *)task->task_lu->lu_provider_private;
457 	sbd_cmd_t *scmd;
458 	stmf_data_buf_t *dbuf;
459 
460 	if (sl->sl_flags & SL_WRITE_PROTECTED) {
461 		stmf_scsilib_send_status(task, STATUS_CHECK,
462 		    STMF_SAA_WRITE_PROTECTED);
463 		return;
464 	}
465 	if (op == SCMD_WRITE) {
466 		lba = READ_SCSI21(&task->task_cdb[1], uint64_t);
467 		len = (uint32_t)task->task_cdb[4];
468 
469 		if (len == 0) {
470 			len = 256;
471 		}
472 	} else if (op == SCMD_WRITE_G1) {
473 		lba = READ_SCSI32(&task->task_cdb[2], uint64_t);
474 		len = READ_SCSI16(&task->task_cdb[7], uint32_t);
475 	} else if (op == SCMD_WRITE_G5) {
476 		lba = READ_SCSI32(&task->task_cdb[2], uint64_t);
477 		len = READ_SCSI32(&task->task_cdb[6], uint32_t);
478 	} else if (op == SCMD_WRITE_G4) {
479 		lba = READ_SCSI64(&task->task_cdb[2], uint64_t);
480 		len = READ_SCSI32(&task->task_cdb[10], uint32_t);
481 	} else {
482 		stmf_scsilib_send_status(task, STATUS_CHECK,
483 		    STMF_SAA_INVALID_OPCODE);
484 		return;
485 	}
486 
487 	laddr = lba << sl->sl_data_blocksize_shift;
488 	len <<= sl->sl_data_blocksize_shift;
489 
490 	if ((laddr + (uint64_t)len) > sl->sl_lu_size) {
491 		stmf_scsilib_send_status(task, STATUS_CHECK,
492 		    STMF_SAA_LBA_OUT_OF_RANGE);
493 		return;
494 	}
495 
496 	task->task_cmd_xfer_length = len;
497 	if (task->task_additional_flags & TASK_AF_NO_EXPECTED_XFER_LENGTH) {
498 		task->task_expected_xfer_length = len;
499 	}
500 
501 	len = (len > task->task_expected_xfer_length) ?
502 	    task->task_expected_xfer_length : len;
503 
504 	if (len == 0) {
505 		stmf_scsilib_send_status(task, STATUS_GOOD, 0);
506 		return;
507 	}
508 
509 	if (initial_dbuf == NULL) {
510 		uint32_t maxsize, minsize, old_minsize;
511 
512 		maxsize = (len > (128*1024)) ? 128*1024 : len;
513 		minsize = maxsize >> 2;
514 		do {
515 			old_minsize = minsize;
516 			initial_dbuf = stmf_alloc_dbuf(task, maxsize,
517 			    &minsize, 0);
518 		} while ((initial_dbuf == NULL) && (old_minsize > minsize) &&
519 		    (minsize >= 512));
520 		if (initial_dbuf == NULL) {
521 			stmf_abort(STMF_QUEUE_TASK_ABORT, task,
522 			    STMF_ALLOC_FAILURE, NULL);
523 			return;
524 		}
525 	} else if (task->task_flags & TF_INITIAL_BURST) {
526 		if (initial_dbuf->db_data_size > len) {
527 			if (initial_dbuf->db_data_size >
528 			    task->task_expected_xfer_length) {
529 				/* protocol error */
530 				stmf_abort(STMF_QUEUE_TASK_ABORT, task,
531 				    STMF_INVALID_ARG, NULL);
532 				return;
533 			}
534 			initial_dbuf->db_data_size = len;
535 		}
536 		do_immediate_data = 1;
537 	}
538 	dbuf = initial_dbuf;
539 
540 	if (task->task_lu_private) {
541 		scmd = (sbd_cmd_t *)task->task_lu_private;
542 	} else {
543 		scmd = (sbd_cmd_t *)kmem_alloc(sizeof (sbd_cmd_t), KM_SLEEP);
544 		task->task_lu_private = scmd;
545 	}
546 	scmd->flags = SBD_SCSI_CMD_ACTIVE;
547 	scmd->cmd_type = SBD_CMD_SCSI_WRITE;
548 	scmd->nbufs = 1;
549 	scmd->addr = laddr;
550 	scmd->len = len;
551 	scmd->current_ro = 0;
552 
553 	if (do_immediate_data) {
554 		scmd->len -= dbuf->db_data_size;
555 		scmd->current_ro += dbuf->db_data_size;
556 		dbuf->db_xfer_status = STMF_SUCCESS;
557 		sbd_handle_write_xfer_completion(task, scmd, dbuf, 0);
558 	} else {
559 		sbd_do_write_xfer(task, scmd, dbuf);
560 	}
561 }
562 
563 /*
564  * Utility routine to handle small non performance data transfers to the
565  * initiators. dbuf is an initial data buf (if any), 'p' points to a data
566  * buffer which is source of data for transfer, cdb_xfer_size is the
567  * transfer size based on CDB, cmd_xfer_size is the actual amount of data
568  * which this command would transfer (the size of data pointed to by 'p').
569  */
570 void
571 sbd_handle_short_read_transfers(scsi_task_t *task, stmf_data_buf_t *dbuf,
572     uint8_t *p, uint32_t cdb_xfer_size, uint32_t cmd_xfer_size)
573 {
574 	uint32_t bufsize, ndx;
575 	sbd_cmd_t *scmd;
576 
577 	cmd_xfer_size = min(cmd_xfer_size, cdb_xfer_size);
578 
579 	task->task_cmd_xfer_length = cmd_xfer_size;
580 	if (task->task_additional_flags & TASK_AF_NO_EXPECTED_XFER_LENGTH) {
581 		task->task_expected_xfer_length = cmd_xfer_size;
582 	} else {
583 		cmd_xfer_size = min(cmd_xfer_size,
584 		    task->task_expected_xfer_length);
585 	}
586 
587 	if (cmd_xfer_size == 0) {
588 		stmf_scsilib_send_status(task, STATUS_CHECK,
589 		    STMF_SAA_INVALID_FIELD_IN_CDB);
590 		return;
591 	}
592 	if (dbuf == NULL) {
593 		uint32_t minsize = cmd_xfer_size;
594 
595 		dbuf = stmf_alloc_dbuf(task, cmd_xfer_size, &minsize, 0);
596 	}
597 	if (dbuf == NULL) {
598 		stmf_scsilib_send_status(task, STATUS_QFULL, 0);
599 		return;
600 	}
601 
602 	for (bufsize = 0, ndx = 0; bufsize < cmd_xfer_size; ndx++) {
603 		uint8_t *d;
604 		uint32_t s;
605 
606 		d = dbuf->db_sglist[ndx].seg_addr;
607 		s = min((cmd_xfer_size - bufsize),
608 		    dbuf->db_sglist[ndx].seg_length);
609 		bcopy(p+bufsize, d, s);
610 		bufsize += s;
611 	}
612 	dbuf->db_relative_offset = 0;
613 	dbuf->db_data_size = cmd_xfer_size;
614 	dbuf->db_flags = DB_DIRECTION_TO_RPORT;
615 
616 	if (task->task_lu_private == NULL) {
617 		task->task_lu_private =
618 		    kmem_alloc(sizeof (sbd_cmd_t), KM_SLEEP);
619 	}
620 	scmd = (sbd_cmd_t *)task->task_lu_private;
621 
622 	scmd->cmd_type = SBD_CMD_SMALL_READ;
623 	scmd->flags = SBD_SCSI_CMD_ACTIVE;
624 	(void) stmf_xfer_data(task, dbuf, 0);
625 }
626 
627 void
628 sbd_handle_short_read_xfer_completion(struct scsi_task *task, sbd_cmd_t *scmd,
629 				struct stmf_data_buf *dbuf)
630 {
631 	if (dbuf->db_xfer_status != STMF_SUCCESS) {
632 		stmf_abort(STMF_QUEUE_TASK_ABORT, task,
633 		    dbuf->db_xfer_status, NULL);
634 		return;
635 	}
636 	task->task_nbytes_transferred = dbuf->db_data_size;
637 	scmd->flags &= ~SBD_SCSI_CMD_ACTIVE;
638 	stmf_scsilib_send_status(task, STATUS_GOOD, 0);
639 }
640 
641 void
642 sbd_handle_short_write_transfers(scsi_task_t *task,
643     stmf_data_buf_t *dbuf, uint32_t cdb_xfer_size)
644 {
645 	sbd_cmd_t *scmd;
646 
647 	task->task_cmd_xfer_length = cdb_xfer_size;
648 	if (task->task_additional_flags & TASK_AF_NO_EXPECTED_XFER_LENGTH) {
649 		task->task_expected_xfer_length = cdb_xfer_size;
650 	} else {
651 		cdb_xfer_size = min(cdb_xfer_size,
652 		    task->task_expected_xfer_length);
653 	}
654 
655 	if (cdb_xfer_size == 0) {
656 		stmf_scsilib_send_status(task, STATUS_CHECK,
657 		    STMF_SAA_INVALID_FIELD_IN_CDB);
658 		return;
659 	}
660 	if (task->task_lu_private == NULL) {
661 		task->task_lu_private = kmem_zalloc(sizeof (sbd_cmd_t),
662 		    KM_SLEEP);
663 	} else {
664 		bzero(task->task_lu_private, sizeof (sbd_cmd_t));
665 	}
666 	scmd = (sbd_cmd_t *)task->task_lu_private;
667 	scmd->cmd_type = SBD_CMD_SMALL_WRITE;
668 	scmd->flags = SBD_SCSI_CMD_ACTIVE;
669 	scmd->len = cdb_xfer_size;
670 	if (dbuf == NULL) {
671 		uint32_t minsize = cdb_xfer_size;
672 
673 		dbuf = stmf_alloc_dbuf(task, cdb_xfer_size, &minsize, 0);
674 		if (dbuf == NULL) {
675 			stmf_abort(STMF_QUEUE_TASK_ABORT, task,
676 			    STMF_ALLOC_FAILURE, NULL);
677 			return;
678 		}
679 		dbuf->db_data_size = cdb_xfer_size;
680 		dbuf->db_relative_offset = 0;
681 		dbuf->db_flags = DB_DIRECTION_FROM_RPORT;
682 		stmf_xfer_data(task, dbuf, 0);
683 	} else {
684 		if (dbuf->db_data_size < cdb_xfer_size) {
685 			stmf_abort(STMF_QUEUE_TASK_ABORT, task,
686 			    STMF_ABORTED, NULL);
687 			return;
688 		}
689 		dbuf->db_data_size = cdb_xfer_size;
690 		sbd_handle_short_write_xfer_completion(task, dbuf);
691 	}
692 }
693 
694 void
695 sbd_handle_short_write_xfer_completion(scsi_task_t *task,
696     stmf_data_buf_t *dbuf)
697 {
698 	sbd_cmd_t *scmd;
699 
700 	/*
701 	 * For now lets assume we will get only one sglist element
702 	 * for short writes. If that ever changes, we should allocate
703 	 * a local buffer and copy all the sg elements to one linear space.
704 	 */
705 	if ((dbuf->db_xfer_status != STMF_SUCCESS) ||
706 	    (dbuf->db_sglist_length > 1)) {
707 		stmf_abort(STMF_QUEUE_TASK_ABORT, task,
708 		    dbuf->db_xfer_status, NULL);
709 		return;
710 	}
711 
712 	task->task_nbytes_transferred = dbuf->db_data_size;
713 	scmd = (sbd_cmd_t *)task->task_lu_private;
714 	scmd->flags &= ~SBD_SCSI_CMD_ACTIVE;
715 
716 	/* Lets find out who to call */
717 	switch (task->task_cdb[0]) {
718 	case SCMD_MODE_SELECT:
719 	case SCMD_MODE_SELECT_G1:
720 		sbd_handle_mode_select_xfer(task,
721 		    dbuf->db_sglist[0].seg_addr, dbuf->db_data_size);
722 		break;
723 	case SCMD_PERSISTENT_RESERVE_OUT:
724 		sbd_handle_pgr_out_data(task, dbuf);
725 		break;
726 	default:
727 		/* This should never happen */
728 		stmf_abort(STMF_QUEUE_TASK_ABORT, task,
729 		    STMF_ABORTED, NULL);
730 	}
731 }
732 
733 void
734 sbd_handle_read_capacity(struct scsi_task *task,
735     struct stmf_data_buf *initial_dbuf)
736 {
737 	sbd_lu_t *sl = (sbd_lu_t *)task->task_lu->lu_provider_private;
738 	uint32_t cdb_len;
739 	uint8_t p[32];
740 	uint64_t s;
741 	uint16_t blksize;
742 
743 	s = sl->sl_lu_size >> sl->sl_data_blocksize_shift;
744 	s--;
745 	blksize = ((uint16_t)1) << sl->sl_data_blocksize_shift;
746 
747 	switch (task->task_cdb[0]) {
748 	case SCMD_READ_CAPACITY:
749 		if (s & 0xffffffff00000000ull) {
750 			p[0] = p[1] = p[2] = p[3] = 0xFF;
751 		} else {
752 			p[0] = (s >> 24) & 0xff;
753 			p[1] = (s >> 16) & 0xff;
754 			p[2] = (s >> 8) & 0xff;
755 			p[3] = s & 0xff;
756 		}
757 		p[4] = 0; p[5] = 0;
758 		p[6] = (blksize >> 8) & 0xff;
759 		p[7] = blksize & 0xff;
760 		sbd_handle_short_read_transfers(task, initial_dbuf, p, 8, 8);
761 		break;
762 
763 	case SCMD_SVC_ACTION_IN_G4:
764 		cdb_len = READ_SCSI32(&task->task_cdb[10], uint32_t);
765 		bzero(p, 32);
766 		p[0] = (s >> 56) & 0xff;
767 		p[1] = (s >> 48) & 0xff;
768 		p[2] = (s >> 40) & 0xff;
769 		p[3] = (s >> 32) & 0xff;
770 		p[4] = (s >> 24) & 0xff;
771 		p[5] = (s >> 16) & 0xff;
772 		p[6] = (s >> 8) & 0xff;
773 		p[7] = s & 0xff;
774 		p[10] = (blksize >> 8) & 0xff;
775 		p[11] = blksize & 0xff;
776 		sbd_handle_short_read_transfers(task, initial_dbuf, p,
777 		    cdb_len, 32);
778 		break;
779 	}
780 }
781 
782 void
783 sbd_calc_geometry(uint64_t s, uint16_t blksize, uint8_t *nsectors,
784     uint8_t *nheads, uint32_t *ncyl)
785 {
786 	if (s < (4ull * 1024ull * 1024ull * 1024ull)) {
787 		*nsectors = 32;
788 		*nheads = 8;
789 	} else {
790 		*nsectors = 254;
791 		*nheads = 254;
792 	}
793 	*ncyl = s / ((uint64_t)blksize * (uint64_t)(*nsectors) *
794 	    (uint64_t)(*nheads));
795 }
796 
797 void
798 sbd_handle_mode_sense(struct scsi_task *task,
799     struct stmf_data_buf *initial_dbuf, uint8_t *buf)
800 {
801 	sbd_lu_t *sl = (sbd_lu_t *)task->task_lu->lu_provider_private;
802 	uint32_t cmd_size, n;
803 	uint8_t *cdb;
804 	uint32_t ncyl;
805 	uint8_t nsectors, nheads;
806 	uint8_t page, ctrl, header_size, pc_valid;
807 	uint16_t nbytes;
808 	uint8_t *p;
809 	uint64_t s = sl->sl_lu_size;
810 	uint32_t dev_spec_param_offset;
811 
812 	p = buf;	/* buf is assumed to be zeroed out and large enough */
813 	n = 0;
814 	cdb = &task->task_cdb[0];
815 	page = cdb[2] & 0x3F;
816 	ctrl = (cdb[2] >> 6) & 3;
817 	cmd_size = (cdb[0] == SCMD_MODE_SENSE) ? cdb[4] :
818 	    READ_SCSI16(&cdb[7], uint32_t);
819 
820 	if (cdb[0] == SCMD_MODE_SENSE) {
821 		header_size = 4;
822 		dev_spec_param_offset = 2;
823 	} else {
824 		header_size = 8;
825 		dev_spec_param_offset = 3;
826 	}
827 
828 	/* Now validate the command */
829 	if ((cdb[2] == 0) || (page == MODEPAGE_ALLPAGES) || (page == 0x08) ||
830 	    (page == 0x0A) || (page == 0x03) || (page == 0x04)) {
831 		pc_valid = 1;
832 	} else {
833 		pc_valid = 0;
834 	}
835 	if ((cmd_size < header_size) || (pc_valid == 0)) {
836 		stmf_scsilib_send_status(task, STATUS_CHECK,
837 		    STMF_SAA_INVALID_FIELD_IN_CDB);
838 		return;
839 	}
840 
841 	/* We will update the length in the mode header at the end */
842 
843 	/* Block dev device specific param in mode param header has wp bit */
844 	if (sl->sl_flags & SL_WRITE_PROTECTED) {
845 		p[n + dev_spec_param_offset] = BIT_7;
846 	}
847 	n += header_size;
848 	/* We are not going to return any block descriptor */
849 
850 	nbytes = ((uint16_t)1) << sl->sl_data_blocksize_shift;
851 	sbd_calc_geometry(s, nbytes, &nsectors, &nheads, &ncyl);
852 
853 	if ((page == 0x03) || (page == MODEPAGE_ALLPAGES)) {
854 		p[n] = 0x03;
855 		p[n+1] = 0x16;
856 		if (ctrl != 1) {
857 			p[n + 11] = nsectors;
858 			p[n + 12] = nbytes >> 8;
859 			p[n + 13] = nbytes & 0xff;
860 			p[n + 20] = 0x80;
861 		}
862 		n += 24;
863 	}
864 	if ((page == 0x04) || (page == MODEPAGE_ALLPAGES)) {
865 		p[n] = 0x04;
866 		p[n + 1] = 0x16;
867 		if (ctrl != 1) {
868 			p[n + 2] = ncyl >> 16;
869 			p[n + 3] = ncyl >> 8;
870 			p[n + 4] = ncyl & 0xff;
871 			p[n + 5] = nheads;
872 			p[n + 20] = 0x15;
873 			p[n + 21] = 0x18;
874 		}
875 		n += 24;
876 	}
877 	if ((page == MODEPAGE_CACHING) || (page == MODEPAGE_ALLPAGES)) {
878 		struct mode_caching *mode_caching_page;
879 
880 		mode_caching_page = (struct mode_caching *)&p[n];
881 
882 		mode_caching_page->mode_page.code = MODEPAGE_CACHING;
883 		mode_caching_page->mode_page.ps = 1; /* A saveable page */
884 		mode_caching_page->mode_page.length = 0x12;
885 
886 		switch (ctrl) {
887 		case (0):
888 			/* Current */
889 			if ((sl->sl_flags & SL_WRITEBACK_CACHE_DISABLE) == 0) {
890 				mode_caching_page->wce = 1;
891 			}
892 			break;
893 
894 		case (1):
895 			/* Changeable */
896 			if ((sl->sl_flags &
897 			    SL_WRITEBACK_CACHE_SET_UNSUPPORTED) == 0) {
898 				mode_caching_page->wce = 1;
899 			}
900 			break;
901 
902 		default:
903 			if ((sl->sl_flags &
904 			    SL_SAVED_WRITE_CACHE_DISABLE) == 0) {
905 				mode_caching_page->wce = 1;
906 			}
907 			break;
908 		}
909 		n += (sizeof (struct mode_page) +
910 		    mode_caching_page->mode_page.length);
911 	}
912 	if ((page == MODEPAGE_CTRL_MODE) || (page == MODEPAGE_ALLPAGES)) {
913 		struct mode_control_scsi3 *mode_control_page;
914 
915 		mode_control_page = (struct mode_control_scsi3 *)&p[n];
916 
917 		mode_control_page->mode_page.code = MODEPAGE_CTRL_MODE;
918 		mode_control_page->mode_page.length =
919 		    PAGELENGTH_MODE_CONTROL_SCSI3;
920 		if (ctrl != 1) {
921 			/* If not looking for changeable values, report this. */
922 			mode_control_page->que_mod = CTRL_QMOD_UNRESTRICT;
923 		}
924 		n += (sizeof (struct mode_page) +
925 		    mode_control_page->mode_page.length);
926 	}
927 
928 	if (cdb[0] == SCMD_MODE_SENSE) {
929 		if (n > 255) {
930 			stmf_scsilib_send_status(task, STATUS_CHECK,
931 			    STMF_SAA_INVALID_FIELD_IN_CDB);
932 			return;
933 		}
934 		/*
935 		 * Mode parameter header length doesn't include the number
936 		 * of bytes in the length field, so adjust the count.
937 		 * Byte count minus header length field size.
938 		 */
939 		buf[0] = (n - 1) & 0xff;
940 	} else {
941 		/* Byte count minus header length field size. */
942 		buf[1] = (n - 2) & 0xff;
943 		buf[0] = ((n - 2) >> 8) & 0xff;
944 	}
945 
946 	sbd_handle_short_read_transfers(task, initial_dbuf, buf,
947 	    cmd_size, n);
948 }
949 
950 void
951 sbd_handle_mode_select(scsi_task_t *task, stmf_data_buf_t *dbuf)
952 {
953 	uint32_t cmd_xfer_len;
954 
955 	if (task->task_cdb[0] == SCMD_MODE_SELECT) {
956 		cmd_xfer_len = (uint32_t)task->task_cdb[4];
957 	} else {
958 		cmd_xfer_len = READ_SCSI16(&task->task_cdb[7], uint32_t);
959 	}
960 
961 	if ((task->task_cdb[1] & 0xFE) != 0x10) {
962 		stmf_scsilib_send_status(task, STATUS_CHECK,
963 		    STMF_SAA_INVALID_FIELD_IN_CDB);
964 		return;
965 	}
966 
967 	if (cmd_xfer_len == 0) {
968 		/* zero byte mode selects are allowed */
969 		stmf_scsilib_send_status(task, STATUS_GOOD, 0);
970 		return;
971 	}
972 
973 	sbd_handle_short_write_transfers(task, dbuf, cmd_xfer_len);
974 }
975 
976 void
977 sbd_handle_mode_select_xfer(scsi_task_t *task, uint8_t *buf, uint32_t buflen)
978 {
979 	sbd_lu_t *sl = (sbd_lu_t *)task->task_lu->lu_provider_private;
980 	sbd_it_data_t *it;
981 	int hdr_len, bd_len;
982 	sbd_status_t sret;
983 	int i;
984 
985 	if (task->task_cdb[0] == SCMD_MODE_SELECT) {
986 		hdr_len = 4;
987 	} else {
988 		hdr_len = 8;
989 	}
990 
991 	if (buflen < hdr_len)
992 		goto mode_sel_param_len_err;
993 
994 	bd_len = hdr_len == 4 ? buf[3] : READ_SCSI16(&buf[6], int);
995 
996 	if (buflen < (hdr_len + bd_len + 2))
997 		goto mode_sel_param_len_err;
998 
999 	buf += hdr_len + bd_len;
1000 	buflen -= hdr_len + bd_len;
1001 
1002 	if ((buf[0] != 8) || (buflen != ((uint32_t)buf[1] + 2))) {
1003 		goto mode_sel_param_len_err;
1004 	}
1005 
1006 	if (buf[2] & 0xFB) {
1007 		goto mode_sel_param_field_err;
1008 	}
1009 
1010 	for (i = 3; i < (buf[1] + 2); i++) {
1011 		if (buf[i]) {
1012 			goto mode_sel_param_field_err;
1013 		}
1014 	}
1015 
1016 	sret = SBD_SUCCESS;
1017 
1018 	/* All good. Lets handle the write cache change, if any */
1019 	if (buf[2] & BIT_2) {
1020 		sret = sbd_wcd_set(0, sl);
1021 	} else {
1022 		sret = sbd_wcd_set(1, sl);
1023 	}
1024 
1025 	if (sret != SBD_SUCCESS) {
1026 		stmf_scsilib_send_status(task, STATUS_CHECK,
1027 		    STMF_SAA_WRITE_ERROR);
1028 		return;
1029 	}
1030 
1031 	/* set on the device passed, now set the flags */
1032 	mutex_enter(&sl->sl_lock);
1033 	if (buf[2] & BIT_2) {
1034 		sl->sl_flags &= ~SL_WRITEBACK_CACHE_DISABLE;
1035 	} else {
1036 		sl->sl_flags |= SL_WRITEBACK_CACHE_DISABLE;
1037 	}
1038 
1039 	for (it = sl->sl_it_list; it != NULL; it = it->sbd_it_next) {
1040 		if (it == task->task_lu_itl_handle)
1041 			continue;
1042 		it->sbd_it_ua_conditions |= SBD_UA_MODE_PARAMETERS_CHANGED;
1043 	}
1044 
1045 	if (task->task_cdb[1] & 1) {
1046 		if (buf[2] & BIT_2) {
1047 			sl->sl_flags &= ~SL_SAVED_WRITE_CACHE_DISABLE;
1048 		} else {
1049 			sl->sl_flags |= SL_SAVED_WRITE_CACHE_DISABLE;
1050 		}
1051 		mutex_exit(&sl->sl_lock);
1052 		sret = sbd_write_lu_info(sl);
1053 	} else {
1054 		mutex_exit(&sl->sl_lock);
1055 	}
1056 	if (sret == SBD_SUCCESS) {
1057 		stmf_scsilib_send_status(task, STATUS_GOOD, 0);
1058 	} else {
1059 		stmf_scsilib_send_status(task, STATUS_CHECK,
1060 		    STMF_SAA_WRITE_ERROR);
1061 	}
1062 	return;
1063 
1064 mode_sel_param_len_err:
1065 	stmf_scsilib_send_status(task, STATUS_CHECK,
1066 	    STMF_SAA_PARAM_LIST_LENGTH_ERROR);
1067 	return;
1068 mode_sel_param_field_err:
1069 	stmf_scsilib_send_status(task, STATUS_CHECK,
1070 	    STMF_SAA_INVALID_FIELD_IN_PARAM_LIST);
1071 }
1072 
1073 /*
1074  * This function parse through a string, passed to it as a pointer to a string,
1075  * by adjusting the pointer to the first non-space character and returns
1076  * the count/length of the first bunch of non-space characters. Multiple
1077  * Management URLs are stored as a space delimited string in sl_mgmt_url
1078  * field of sbd_lu_t. This function is used to retrieve one url at a time.
1079  *
1080  * i/p : pointer to pointer to a url string
1081  * o/p : Adjust the pointer to the url to the first non white character
1082  *       and returns the length of the URL
1083  */
1084 uint16_t
1085 sbd_parse_mgmt_url(char **url_addr) {
1086 	uint16_t url_length = 0;
1087 	char *url;
1088 	url = *url_addr;
1089 
1090 	while (*url != '\0') {
1091 		if (*url == ' ' || *url == '\t' || *url == '\n') {
1092 			(*url_addr)++;
1093 			url = *url_addr;
1094 		} else {
1095 			break;
1096 		}
1097 	}
1098 
1099 	while (*url != '\0') {
1100 		if (*url == ' ' || *url == '\t' ||
1101 		    *url == '\n' || *url == '\0') {
1102 			break;
1103 		}
1104 		url++;
1105 		url_length++;
1106 	}
1107 	return (url_length);
1108 }
1109 
1110 void
1111 sbd_handle_inquiry(struct scsi_task *task, struct stmf_data_buf *initial_dbuf)
1112 {
1113 	sbd_lu_t *sl = (sbd_lu_t *)task->task_lu->lu_provider_private;
1114 	uint8_t *cdbp = (uint8_t *)&task->task_cdb[0];
1115 	uint8_t *p;
1116 	uint8_t byte0;
1117 	uint8_t page_length;
1118 	uint16_t bsize = 512;
1119 	uint16_t cmd_size;
1120 	uint32_t xfer_size = 4;
1121 	uint32_t mgmt_url_size = 0;
1122 
1123 
1124 	byte0 = DTYPE_DIRECT;
1125 	/*
1126 	 * Basic protocol checks.
1127 	 */
1128 
1129 	if ((((cdbp[1] & 1) == 0) && cdbp[2]) || cdbp[5]) {
1130 		stmf_scsilib_send_status(task, STATUS_CHECK,
1131 		    STMF_SAA_INVALID_FIELD_IN_CDB);
1132 		return;
1133 	}
1134 
1135 	/*
1136 	 * Zero byte allocation length is not an error.  Just
1137 	 * return success.
1138 	 */
1139 
1140 	cmd_size = (((uint16_t)cdbp[3]) << 8) | cdbp[4];
1141 
1142 	if (cmd_size == 0) {
1143 		task->task_cmd_xfer_length = 0;
1144 		if (task->task_additional_flags &
1145 		    TASK_AF_NO_EXPECTED_XFER_LENGTH) {
1146 			task->task_expected_xfer_length = 0;
1147 		}
1148 		stmf_scsilib_send_status(task, STATUS_GOOD, 0);
1149 		return;
1150 	}
1151 
1152 	if (sl->sl_mgmt_url) {
1153 		mgmt_url_size = strlen(sl->sl_mgmt_url);
1154 	}
1155 
1156 	/*
1157 	 * Standard inquiry
1158 	 */
1159 
1160 	if ((cdbp[1] & 1) == 0) {
1161 		int	i;
1162 		struct scsi_inquiry *inq;
1163 
1164 		p = (uint8_t *)kmem_zalloc(bsize, KM_SLEEP);
1165 		inq = (struct scsi_inquiry *)p;
1166 
1167 		page_length = 69;
1168 		xfer_size = page_length + 5;
1169 
1170 		inq->inq_dtype = DTYPE_DIRECT;
1171 		inq->inq_ansi = 5;	/* SPC-3 */
1172 		inq->inq_hisup = 1;
1173 		inq->inq_rdf = 2;	/* Response data format for SPC-3 */
1174 		inq->inq_len = page_length;
1175 
1176 		inq->inq_tpgs = TPGS_FAILOVER_IMPLICIT;
1177 		inq->inq_cmdque = 1;
1178 
1179 		if (sl->sl_flags & SL_VID_VALID) {
1180 			bcopy(sl->sl_vendor_id, inq->inq_vid, 8);
1181 		} else {
1182 			bcopy(sbd_vendor_id, inq->inq_vid, 8);
1183 		}
1184 
1185 		if (sl->sl_flags & SL_PID_VALID) {
1186 			bcopy(sl->sl_product_id, inq->inq_pid, 16);
1187 		} else {
1188 			bcopy(sbd_product_id, inq->inq_pid, 16);
1189 		}
1190 
1191 		if (sl->sl_flags & SL_REV_VALID) {
1192 			bcopy(sl->sl_revision, inq->inq_revision, 4);
1193 		} else {
1194 			bcopy(sbd_revision, inq->inq_revision, 4);
1195 		}
1196 
1197 		/* Adding Version Descriptors */
1198 		i = 0;
1199 		/* SAM-3 no version */
1200 		inq->inq_vd[i].inq_vd_msb = 0x00;
1201 		inq->inq_vd[i].inq_vd_lsb = 0x60;
1202 		i++;
1203 
1204 		/* transport */
1205 		switch (task->task_lport->lport_id->protocol_id) {
1206 		case PROTOCOL_FIBRE_CHANNEL:
1207 			inq->inq_vd[i].inq_vd_msb = 0x09;
1208 			inq->inq_vd[i].inq_vd_lsb = 0x00;
1209 			i++;
1210 			break;
1211 
1212 		case PROTOCOL_PARALLEL_SCSI:
1213 		case PROTOCOL_SSA:
1214 		case PROTOCOL_IEEE_1394:
1215 			/* Currently no claims of conformance */
1216 			break;
1217 
1218 		case PROTOCOL_SRP:
1219 			inq->inq_vd[i].inq_vd_msb = 0x09;
1220 			inq->inq_vd[i].inq_vd_lsb = 0x40;
1221 			i++;
1222 			break;
1223 
1224 		case PROTOCOL_iSCSI:
1225 			inq->inq_vd[i].inq_vd_msb = 0x09;
1226 			inq->inq_vd[i].inq_vd_lsb = 0x60;
1227 			i++;
1228 			break;
1229 
1230 		case PROTOCOL_SAS:
1231 		case PROTOCOL_ADT:
1232 		case PROTOCOL_ATAPI:
1233 		default:
1234 			/* Currently no claims of conformance */
1235 			break;
1236 		}
1237 
1238 		/* SPC-3 no version */
1239 		inq->inq_vd[i].inq_vd_msb = 0x03;
1240 		inq->inq_vd[i].inq_vd_lsb = 0x00;
1241 		i++;
1242 
1243 		/* SBC-2 no version */
1244 		inq->inq_vd[i].inq_vd_msb = 0x03;
1245 		inq->inq_vd[i].inq_vd_lsb = 0x20;
1246 
1247 		sbd_handle_short_read_transfers(task, initial_dbuf, p, cmd_size,
1248 		    min(cmd_size, xfer_size));
1249 		kmem_free(p, bsize);
1250 
1251 		return;
1252 	}
1253 
1254 	/*
1255 	 * EVPD handling
1256 	 */
1257 
1258 	/* Default 512 bytes may not be enough, increase bsize if necessary */
1259 	if (cdbp[2] == 0x83 || cdbp[2] == 0x85) {
1260 		if (bsize <  cmd_size)
1261 			bsize = cmd_size;
1262 	}
1263 	p = (uint8_t *)kmem_zalloc(bsize, KM_SLEEP);
1264 
1265 	switch (cdbp[2]) {
1266 	case 0x00:
1267 		page_length = 4 + (mgmt_url_size ? 1 : 0);
1268 
1269 		p[0] = byte0;
1270 		p[3] = page_length;
1271 		/* Supported VPD pages in ascending order */
1272 		{
1273 			uint8_t i = 5;
1274 
1275 			p[i++] = 0x80;
1276 			p[i++] = 0x83;
1277 			if (mgmt_url_size != 0)
1278 				p[i++] = 0x85;
1279 			p[i++] = 0x86;
1280 		}
1281 		xfer_size = page_length + 4;
1282 		break;
1283 
1284 	case 0x80:
1285 		if (sl->sl_serial_no_size) {
1286 			page_length = sl->sl_serial_no_size;
1287 			bcopy(sl->sl_serial_no, p + 4, sl->sl_serial_no_size);
1288 		} else {
1289 			/* if no serial num is specified set 4 spaces */
1290 			page_length = 4;
1291 			bcopy("    ", p + 4, 4);
1292 		}
1293 		p[0] = byte0;
1294 		p[1] = 0x80;
1295 		p[3] = page_length;
1296 		xfer_size = page_length + 4;
1297 		break;
1298 
1299 	case 0x83:
1300 		xfer_size = stmf_scsilib_prepare_vpd_page83(task, p,
1301 		    bsize, byte0, STMF_VPD_LU_ID|STMF_VPD_TARGET_ID|
1302 		    STMF_VPD_TP_GROUP|STMF_VPD_RELATIVE_TP_ID);
1303 		break;
1304 
1305 	case 0x85:
1306 		if (mgmt_url_size == 0) {
1307 			stmf_scsilib_send_status(task, STATUS_CHECK,
1308 			    STMF_SAA_INVALID_FIELD_IN_CDB);
1309 			kmem_free(p, bsize);
1310 			return;
1311 		}
1312 		{
1313 			uint16_t idx, newidx, sz, url_size;
1314 			char *url;
1315 
1316 			p[0] = byte0;
1317 			p[1] = 0x85;
1318 
1319 			idx = 4;
1320 			url = sl->sl_mgmt_url;
1321 			url_size = sbd_parse_mgmt_url(&url);
1322 			/* Creating Network Service Descriptors */
1323 			while (url_size != 0) {
1324 				/* Null terminated and 4 Byte aligned */
1325 				sz = url_size + 1;
1326 				sz += (sz % 4) ? 4 - (sz % 4) : 0;
1327 				newidx = idx + sz + 4;
1328 
1329 				if (newidx < bsize) {
1330 					/*
1331 					 * SPC-3r23 : Table 320  (Sec 7.6.5)
1332 					 * (Network service descriptor format
1333 					 *
1334 					 * Note: Hard coding service type as
1335 					 * "Storage Configuration Service".
1336 					 */
1337 					p[idx] = 1;
1338 					SCSI_WRITE16(p + idx + 2, sz);
1339 					bcopy(url, p + idx + 4, url_size);
1340 					xfer_size = newidx + 4;
1341 				}
1342 				idx = newidx;
1343 
1344 				/* skip to next mgmt url if any */
1345 				url += url_size;
1346 				url_size = sbd_parse_mgmt_url(&url);
1347 			}
1348 
1349 			/* Total descriptor length */
1350 			SCSI_WRITE16(p + 2, idx - 4);
1351 			break;
1352 		}
1353 
1354 	case 0x86:
1355 		page_length = 0x3c;
1356 
1357 		p[0] = byte0;
1358 		p[1] = 0x86;		/* Page 86 response */
1359 		p[3] = page_length;
1360 
1361 		/*
1362 		 * Bits 0, 1, and 2 will need to be updated
1363 		 * to reflect the queue tag handling if/when
1364 		 * that is implemented.  For now, we're going
1365 		 * to claim support only for Simple TA.
1366 		 */
1367 		p[5] = 1;
1368 		xfer_size = page_length + 4;
1369 		break;
1370 
1371 	default:
1372 		stmf_scsilib_send_status(task, STATUS_CHECK,
1373 		    STMF_SAA_INVALID_FIELD_IN_CDB);
1374 		kmem_free(p, bsize);
1375 		return;
1376 	}
1377 
1378 	sbd_handle_short_read_transfers(task, initial_dbuf, p, cmd_size,
1379 	    min(cmd_size, xfer_size));
1380 	kmem_free(p, bsize);
1381 }
1382 
1383 stmf_status_t
1384 sbd_task_alloc(struct scsi_task *task)
1385 {
1386 	if ((task->task_lu_private =
1387 	    kmem_alloc(sizeof (sbd_cmd_t), KM_NOSLEEP)) != NULL) {
1388 		sbd_cmd_t *scmd = (sbd_cmd_t *)task->task_lu_private;
1389 		scmd->flags = 0;
1390 		return (STMF_SUCCESS);
1391 	}
1392 	return (STMF_ALLOC_FAILURE);
1393 }
1394 
1395 void
1396 sbd_remove_it_handle(sbd_lu_t *sl, sbd_it_data_t *it)
1397 {
1398 	sbd_it_data_t **ppit;
1399 
1400 	sbd_pgr_remove_it_handle(sl, it);
1401 	mutex_enter(&sl->sl_lock);
1402 	for (ppit = &sl->sl_it_list; *ppit != NULL;
1403 	    ppit = &((*ppit)->sbd_it_next)) {
1404 		if ((*ppit) == it) {
1405 			*ppit = it->sbd_it_next;
1406 			break;
1407 		}
1408 	}
1409 	mutex_exit(&sl->sl_lock);
1410 
1411 	DTRACE_PROBE2(itl__nexus__end, stmf_lu_t *, sl->sl_lu,
1412 	    sbd_it_data_t *, it);
1413 
1414 	kmem_free(it, sizeof (*it));
1415 }
1416 
1417 void
1418 sbd_check_and_clear_scsi2_reservation(sbd_lu_t *sl, sbd_it_data_t *it)
1419 {
1420 	mutex_enter(&sl->sl_lock);
1421 	if ((sl->sl_flags & SL_LU_HAS_SCSI2_RESERVATION) == 0) {
1422 		/* If we dont have any reservations, just get out. */
1423 		mutex_exit(&sl->sl_lock);
1424 		return;
1425 	}
1426 
1427 	if (it == NULL) {
1428 		/* Find the I_T nexus which is holding the reservation. */
1429 		for (it = sl->sl_it_list; it != NULL; it = it->sbd_it_next) {
1430 			if (it->sbd_it_flags & SBD_IT_HAS_SCSI2_RESERVATION) {
1431 				ASSERT(it->sbd_it_session_id ==
1432 				    sl->sl_rs_owner_session_id);
1433 				break;
1434 			}
1435 		}
1436 		ASSERT(it != NULL);
1437 	} else {
1438 		/*
1439 		 * We were passed an I_T nexus. If this nexus does not hold
1440 		 * the reservation, do nothing. This is why this function is
1441 		 * called "check_and_clear".
1442 		 */
1443 		if ((it->sbd_it_flags & SBD_IT_HAS_SCSI2_RESERVATION) == 0) {
1444 			mutex_exit(&sl->sl_lock);
1445 			return;
1446 		}
1447 	}
1448 	it->sbd_it_flags &= ~SBD_IT_HAS_SCSI2_RESERVATION;
1449 	sl->sl_flags &= ~SL_LU_HAS_SCSI2_RESERVATION;
1450 	mutex_exit(&sl->sl_lock);
1451 }
1452 
1453 
1454 
1455 void
1456 sbd_new_task(struct scsi_task *task, struct stmf_data_buf *initial_dbuf)
1457 {
1458 	sbd_lu_t *sl = (sbd_lu_t *)task->task_lu->lu_provider_private;
1459 	sbd_it_data_t *it;
1460 	uint8_t cdb0, cdb1;
1461 
1462 	if ((it = task->task_lu_itl_handle) == NULL) {
1463 		mutex_enter(&sl->sl_lock);
1464 		for (it = sl->sl_it_list; it != NULL; it = it->sbd_it_next) {
1465 			if (it->sbd_it_session_id ==
1466 			    task->task_session->ss_session_id) {
1467 				mutex_exit(&sl->sl_lock);
1468 				stmf_scsilib_send_status(task, STATUS_BUSY, 0);
1469 				return;
1470 			}
1471 		}
1472 		it = (sbd_it_data_t *)kmem_zalloc(sizeof (*it), KM_NOSLEEP);
1473 		if (it == NULL) {
1474 			mutex_exit(&sl->sl_lock);
1475 			stmf_scsilib_send_status(task, STATUS_BUSY, 0);
1476 			return;
1477 		}
1478 		it->sbd_it_session_id = task->task_session->ss_session_id;
1479 		bcopy(task->task_lun_no, it->sbd_it_lun, 8);
1480 		it->sbd_it_next = sl->sl_it_list;
1481 		sl->sl_it_list = it;
1482 		mutex_exit(&sl->sl_lock);
1483 
1484 		DTRACE_PROBE1(itl__nexus__start, scsi_task *, task);
1485 
1486 		sbd_pgr_initialize_it(task);
1487 		if (stmf_register_itl_handle(task->task_lu, task->task_lun_no,
1488 		    task->task_session, it->sbd_it_session_id, it)
1489 		    != STMF_SUCCESS) {
1490 			sbd_remove_it_handle(sl, it);
1491 			stmf_scsilib_send_status(task, STATUS_BUSY, 0);
1492 			return;
1493 		}
1494 		task->task_lu_itl_handle = it;
1495 		it->sbd_it_ua_conditions = SBD_UA_POR;
1496 	} else if (it->sbd_it_flags & SBD_IT_PGR_CHECK_FLAG) {
1497 		sbd_pgr_initialize_it(task);
1498 		mutex_enter(&sl->sl_lock);
1499 		it->sbd_it_flags &= ~SBD_IT_PGR_CHECK_FLAG;
1500 		mutex_exit(&sl->sl_lock);
1501 	}
1502 
1503 	if (task->task_mgmt_function) {
1504 		stmf_scsilib_handle_task_mgmt(task);
1505 		return;
1506 	}
1507 
1508 	/* Checking ua conditions as per SAM3R14 5.3.2 specified order */
1509 	if ((it->sbd_it_ua_conditions) && (task->task_cdb[0] != SCMD_INQUIRY)) {
1510 		uint32_t saa = 0;
1511 
1512 		mutex_enter(&sl->sl_lock);
1513 		if (it->sbd_it_ua_conditions & SBD_UA_POR) {
1514 			it->sbd_it_ua_conditions &= ~SBD_UA_POR;
1515 			saa = STMF_SAA_POR;
1516 		}
1517 		mutex_exit(&sl->sl_lock);
1518 		if (saa) {
1519 			stmf_scsilib_send_status(task, STATUS_CHECK, saa);
1520 			return;
1521 		}
1522 	}
1523 
1524 	/* Reservation conflict checks */
1525 	if (SBD_PGR_RSVD(sl->sl_pgr)) {
1526 		if (sbd_pgr_reservation_conflict(task)) {
1527 			stmf_scsilib_send_status(task,
1528 			    STATUS_RESERVATION_CONFLICT, 0);
1529 			return;
1530 		}
1531 	} else if ((sl->sl_flags & SL_LU_HAS_SCSI2_RESERVATION) &&
1532 	    ((it->sbd_it_flags & SBD_IT_HAS_SCSI2_RESERVATION) == 0)) {
1533 		if (!(SCSI2_CONFLICT_FREE_CMDS(task->task_cdb))) {
1534 			stmf_scsilib_send_status(task,
1535 			    STATUS_RESERVATION_CONFLICT, 0);
1536 			return;
1537 		}
1538 	}
1539 
1540 	/* Rest of the ua conndition checks */
1541 	if ((it->sbd_it_ua_conditions) && (task->task_cdb[0] != SCMD_INQUIRY)) {
1542 		uint32_t saa = 0;
1543 
1544 		mutex_enter(&sl->sl_lock);
1545 		if (it->sbd_it_ua_conditions & SBD_UA_CAPACITY_CHANGED) {
1546 			it->sbd_it_ua_conditions &= ~SBD_UA_CAPACITY_CHANGED;
1547 			if ((task->task_cdb[0] == SCMD_READ_CAPACITY) ||
1548 			    ((task->task_cdb[0] == SCMD_SVC_ACTION_IN_G4) &&
1549 			    (task->task_cdb[1] ==
1550 			    SSVC_ACTION_READ_CAPACITY_G4))) {
1551 				saa = 0;
1552 			} else {
1553 				saa = STMF_SAA_CAPACITY_DATA_HAS_CHANGED;
1554 			}
1555 		} else if (it->sbd_it_ua_conditions &
1556 		    SBD_UA_MODE_PARAMETERS_CHANGED) {
1557 			it->sbd_it_ua_conditions &=
1558 			    ~SBD_UA_MODE_PARAMETERS_CHANGED;
1559 			saa = STMF_SAA_MODE_PARAMETERS_CHANGED;
1560 		} else {
1561 			it->sbd_it_ua_conditions = 0;
1562 			saa = 0;
1563 		}
1564 		mutex_exit(&sl->sl_lock);
1565 		if (saa) {
1566 			stmf_scsilib_send_status(task, STATUS_CHECK, saa);
1567 			return;
1568 		}
1569 	}
1570 
1571 	cdb0 = task->task_cdb[0] & 0x1F;
1572 
1573 	if ((cdb0 == SCMD_READ) || (cdb0 == SCMD_WRITE)) {
1574 		if (task->task_additional_flags & TASK_AF_PORT_LOAD_HIGH) {
1575 			stmf_scsilib_send_status(task, STATUS_QFULL, 0);
1576 			return;
1577 		}
1578 		if (cdb0 == SCMD_READ) {
1579 			sbd_handle_read(task, initial_dbuf);
1580 			return;
1581 		}
1582 		sbd_handle_write(task, initial_dbuf);
1583 		return;
1584 	}
1585 
1586 	cdb0 = task->task_cdb[0];
1587 	cdb1 = task->task_cdb[1];
1588 
1589 	if (cdb0 == SCMD_INQUIRY) {		/* Inquiry */
1590 		sbd_handle_inquiry(task, initial_dbuf);
1591 		return;
1592 	}
1593 
1594 	if (cdb0  == SCMD_PERSISTENT_RESERVE_OUT) {
1595 		sbd_handle_pgr_out_cmd(task, initial_dbuf);
1596 		return;
1597 	}
1598 
1599 	if (cdb0  == SCMD_PERSISTENT_RESERVE_IN) {
1600 		sbd_handle_pgr_in_cmd(task, initial_dbuf);
1601 		return;
1602 	}
1603 
1604 	if (cdb0 == SCMD_RELEASE) {
1605 		if (cdb1) {
1606 			stmf_scsilib_send_status(task, STATUS_CHECK,
1607 			    STMF_SAA_INVALID_FIELD_IN_CDB);
1608 			return;
1609 		}
1610 
1611 		mutex_enter(&sl->sl_lock);
1612 		if (sl->sl_flags & SL_LU_HAS_SCSI2_RESERVATION) {
1613 			/* If not owner don't release it, just return good */
1614 			if (it->sbd_it_session_id !=
1615 			    sl->sl_rs_owner_session_id) {
1616 				mutex_exit(&sl->sl_lock);
1617 				stmf_scsilib_send_status(task, STATUS_GOOD, 0);
1618 				return;
1619 			}
1620 		}
1621 		sl->sl_flags &= ~SL_LU_HAS_SCSI2_RESERVATION;
1622 		it->sbd_it_flags &= ~SBD_IT_HAS_SCSI2_RESERVATION;
1623 		mutex_exit(&sl->sl_lock);
1624 		stmf_scsilib_send_status(task, STATUS_GOOD, 0);
1625 		return;
1626 	}
1627 
1628 	if (cdb0 == SCMD_RESERVE) {
1629 		if (cdb1) {
1630 			stmf_scsilib_send_status(task, STATUS_CHECK,
1631 			    STMF_SAA_INVALID_FIELD_IN_CDB);
1632 			return;
1633 		}
1634 
1635 		mutex_enter(&sl->sl_lock);
1636 		if (sl->sl_flags & SL_LU_HAS_SCSI2_RESERVATION) {
1637 			/* If not owner, return conflict status */
1638 			if (it->sbd_it_session_id !=
1639 			    sl->sl_rs_owner_session_id) {
1640 				mutex_exit(&sl->sl_lock);
1641 				stmf_scsilib_send_status(task,
1642 				    STATUS_RESERVATION_CONFLICT, 0);
1643 				return;
1644 			}
1645 		}
1646 		sl->sl_flags |= SL_LU_HAS_SCSI2_RESERVATION;
1647 		it->sbd_it_flags |= SBD_IT_HAS_SCSI2_RESERVATION;
1648 		sl->sl_rs_owner_session_id = it->sbd_it_session_id;
1649 		mutex_exit(&sl->sl_lock);
1650 		stmf_scsilib_send_status(task, STATUS_GOOD, 0);
1651 		return;
1652 	}
1653 
1654 	if (cdb0 == SCMD_REQUEST_SENSE) {
1655 		/*
1656 		 * LU provider needs to store unretrieved sense data
1657 		 * (e.g. after power-on/reset).  For now, we'll just
1658 		 * return good status with no sense.
1659 		 */
1660 
1661 		if ((cdb1 & ~1) || task->task_cdb[2] || task->task_cdb[3] ||
1662 		    task->task_cdb[5]) {
1663 			stmf_scsilib_send_status(task, STATUS_CHECK,
1664 			    STMF_SAA_INVALID_FIELD_IN_CDB);
1665 		} else {
1666 			stmf_scsilib_send_status(task, STATUS_GOOD, 0);
1667 		}
1668 
1669 		return;
1670 	}
1671 
1672 	/* Report Target Port Groups */
1673 	if ((cdb0 == SCMD_MAINTENANCE_IN) &&
1674 	    ((cdb1 & 0x1F) == 0x0A)) {
1675 		stmf_scsilib_handle_report_tpgs(task, initial_dbuf);
1676 		return;
1677 	}
1678 
1679 	if (cdb0 == SCMD_START_STOP) {			/* Start stop */
1680 		task->task_cmd_xfer_length = 0;
1681 		if (task->task_cdb[4] & 0xFC) {
1682 			stmf_scsilib_send_status(task, STATUS_CHECK,
1683 			    STMF_SAA_INVALID_FIELD_IN_CDB);
1684 			return;
1685 		}
1686 		if (task->task_cdb[4] & 2) {
1687 			stmf_scsilib_send_status(task, STATUS_CHECK,
1688 			    STMF_SAA_INVALID_FIELD_IN_CDB);
1689 		} else {
1690 			stmf_scsilib_send_status(task, STATUS_GOOD, 0);
1691 		}
1692 		return;
1693 
1694 	}
1695 
1696 	if ((cdb0 == SCMD_MODE_SENSE) || (cdb0 == SCMD_MODE_SENSE_G1)) {
1697 		uint8_t *p;
1698 		p = kmem_zalloc(512, KM_SLEEP);
1699 		sbd_handle_mode_sense(task, initial_dbuf, p);
1700 		kmem_free(p, 512);
1701 		return;
1702 	}
1703 
1704 	if ((cdb0 == SCMD_MODE_SELECT) || (cdb0 == SCMD_MODE_SELECT_G1)) {
1705 		sbd_handle_mode_select(task, initial_dbuf);
1706 		return;
1707 	}
1708 
1709 	if (cdb0 == SCMD_TEST_UNIT_READY) {	/* Test unit ready */
1710 		task->task_cmd_xfer_length = 0;
1711 		stmf_scsilib_send_status(task, STATUS_GOOD, 0);
1712 		return;
1713 	}
1714 
1715 	if (cdb0 == SCMD_READ_CAPACITY) {		/* Read Capacity */
1716 		sbd_handle_read_capacity(task, initial_dbuf);
1717 		return;
1718 	}
1719 
1720 	if (cdb0 == SCMD_SVC_ACTION_IN_G4) { 	/* Read Capacity or read long */
1721 		if (cdb1 == SSVC_ACTION_READ_CAPACITY_G4) {
1722 			sbd_handle_read_capacity(task, initial_dbuf);
1723 			return;
1724 		/*
1725 		 * } else if (cdb1 == SSVC_ACTION_READ_LONG_G4) {
1726 		 * 	sbd_handle_read(task, initial_dbuf);
1727 		 * 	return;
1728 		 */
1729 		}
1730 	}
1731 
1732 	/*
1733 	 * if (cdb0 == SCMD_SVC_ACTION_OUT_G4) {
1734 	 *	if (cdb1 == SSVC_ACTION_WRITE_LONG_G4) {
1735 	 *		 sbd_handle_write(task, initial_dbuf);
1736 	 * 		return;
1737 	 *	}
1738 	 * }
1739 	 */
1740 
1741 	if (cdb0 == SCMD_VERIFY) {
1742 		/*
1743 		 * Something more likely needs to be done here.
1744 		 */
1745 		task->task_cmd_xfer_length = 0;
1746 		stmf_scsilib_send_status(task, STATUS_GOOD, 0);
1747 		return;
1748 	}
1749 
1750 	if (cdb0 == SCMD_SYNCHRONIZE_CACHE ||
1751 	    cdb0 == SCMD_SYNCHRONIZE_CACHE_G4) {
1752 		sbd_handle_sync_cache(task, initial_dbuf);
1753 		return;
1754 	}
1755 
1756 	stmf_scsilib_send_status(task, STATUS_CHECK, STMF_SAA_INVALID_OPCODE);
1757 }
1758 
1759 void
1760 sbd_dbuf_xfer_done(struct scsi_task *task, struct stmf_data_buf *dbuf)
1761 {
1762 	sbd_cmd_t *scmd = NULL;
1763 
1764 	scmd = (sbd_cmd_t *)task->task_lu_private;
1765 	if ((scmd == NULL) || ((scmd->flags & SBD_SCSI_CMD_ACTIVE) == 0))
1766 		return;
1767 
1768 	switch (scmd->cmd_type) {
1769 	case (SBD_CMD_SCSI_READ):
1770 		sbd_handle_read_xfer_completion(task, scmd, dbuf);
1771 		break;
1772 
1773 	case (SBD_CMD_SCSI_WRITE):
1774 		sbd_handle_write_xfer_completion(task, scmd, dbuf, 1);
1775 		break;
1776 
1777 	case (SBD_CMD_SMALL_READ):
1778 		sbd_handle_short_read_xfer_completion(task, scmd, dbuf);
1779 		break;
1780 
1781 	case (SBD_CMD_SMALL_WRITE):
1782 		sbd_handle_short_write_xfer_completion(task, dbuf);
1783 		break;
1784 
1785 	default:
1786 		cmn_err(CE_PANIC, "Unknown cmd type, task = %p", (void *)task);
1787 		break;
1788 	}
1789 }
1790 
1791 /* ARGSUSED */
1792 void
1793 sbd_send_status_done(struct scsi_task *task)
1794 {
1795 	cmn_err(CE_PANIC,
1796 	    "sbd_send_status_done: this should not have been called");
1797 }
1798 
1799 void
1800 sbd_task_free(struct scsi_task *task)
1801 {
1802 	if (task->task_lu_private) {
1803 		sbd_cmd_t *scmd = (sbd_cmd_t *)task->task_lu_private;
1804 		if (scmd->flags & SBD_SCSI_CMD_ACTIVE) {
1805 			cmn_err(CE_PANIC, "cmd is active, task = %p",
1806 			    (void *)task);
1807 		}
1808 		kmem_free(scmd, sizeof (sbd_cmd_t));
1809 	}
1810 }
1811 
1812 /*
1813  * Aborts are synchronus w.r.t. I/O AND
1814  * All the I/O which SBD does is synchronous AND
1815  * Everything within a task is single threaded.
1816  *   IT MEANS
1817  * If this function is called, we are doing nothing with this task
1818  * inside of sbd module.
1819  */
1820 /* ARGSUSED */
1821 stmf_status_t
1822 sbd_abort(struct stmf_lu *lu, int abort_cmd, void *arg, uint32_t flags)
1823 {
1824 	sbd_lu_t *sl = (sbd_lu_t *)lu->lu_provider_private;
1825 	scsi_task_t *task;
1826 
1827 	if (abort_cmd == STMF_LU_RESET_STATE) {
1828 		return (sbd_lu_reset_state(lu));
1829 	}
1830 
1831 	if (abort_cmd == STMF_LU_ITL_HANDLE_REMOVED) {
1832 		sbd_check_and_clear_scsi2_reservation(sl, (sbd_it_data_t *)arg);
1833 		sbd_remove_it_handle(sl, (sbd_it_data_t *)arg);
1834 		return (STMF_SUCCESS);
1835 	}
1836 
1837 	ASSERT(abort_cmd == STMF_LU_ABORT_TASK);
1838 	task = (scsi_task_t *)arg;
1839 	if (task->task_lu_private) {
1840 		sbd_cmd_t *scmd = (sbd_cmd_t *)task->task_lu_private;
1841 
1842 		if (scmd->flags & SBD_SCSI_CMD_ACTIVE) {
1843 			scmd->flags &= ~SBD_SCSI_CMD_ACTIVE;
1844 			return (STMF_ABORT_SUCCESS);
1845 		}
1846 	}
1847 
1848 	return (STMF_NOT_FOUND);
1849 }
1850 
1851 /* ARGSUSED */
1852 void
1853 sbd_ctl(struct stmf_lu *lu, int cmd, void *arg)
1854 {
1855 	sbd_lu_t *sl = (sbd_lu_t *)lu->lu_provider_private;
1856 	stmf_change_status_t st;
1857 
1858 	ASSERT((cmd == STMF_CMD_LU_ONLINE) ||
1859 	    (cmd == STMF_CMD_LU_OFFLINE) ||
1860 	    (cmd == STMF_ACK_LU_ONLINE_COMPLETE) ||
1861 	    (cmd == STMF_ACK_LU_OFFLINE_COMPLETE));
1862 
1863 	st.st_completion_status = STMF_SUCCESS;
1864 	st.st_additional_info = NULL;
1865 
1866 	switch (cmd) {
1867 	case STMF_CMD_LU_ONLINE:
1868 		if (sl->sl_state == STMF_STATE_ONLINE)
1869 			st.st_completion_status = STMF_ALREADY;
1870 		else if (sl->sl_state != STMF_STATE_OFFLINE)
1871 			st.st_completion_status = STMF_FAILURE;
1872 		if (st.st_completion_status == STMF_SUCCESS) {
1873 			sl->sl_state = STMF_STATE_ONLINE;
1874 			sl->sl_state_not_acked = 1;
1875 		}
1876 		(void) stmf_ctl(STMF_CMD_LU_ONLINE_COMPLETE, lu, &st);
1877 		break;
1878 
1879 	case STMF_CMD_LU_OFFLINE:
1880 		if (sl->sl_state == STMF_STATE_OFFLINE)
1881 			st.st_completion_status = STMF_ALREADY;
1882 		else if (sl->sl_state != STMF_STATE_ONLINE)
1883 			st.st_completion_status = STMF_FAILURE;
1884 		if (st.st_completion_status == STMF_SUCCESS) {
1885 			sl->sl_flags &= ~(SL_MEDIUM_REMOVAL_PREVENTED |
1886 			    SL_LU_HAS_SCSI2_RESERVATION);
1887 			sl->sl_state = STMF_STATE_OFFLINE;
1888 			sl->sl_state_not_acked = 1;
1889 		}
1890 		(void) stmf_ctl(STMF_CMD_LU_OFFLINE_COMPLETE, lu, &st);
1891 		break;
1892 
1893 	case STMF_ACK_LU_ONLINE_COMPLETE:
1894 		/* Fallthrough */
1895 	case STMF_ACK_LU_OFFLINE_COMPLETE:
1896 		sl->sl_state_not_acked = 0;
1897 		break;
1898 
1899 	}
1900 }
1901 
1902 /* ARGSUSED */
1903 stmf_status_t
1904 sbd_info(uint32_t cmd, stmf_lu_t *lu, void *arg, uint8_t *buf,
1905     uint32_t *bufsizep)
1906 {
1907 	return (STMF_NOT_SUPPORTED);
1908 }
1909 
1910 stmf_status_t
1911 sbd_lu_reset_state(stmf_lu_t *lu)
1912 {
1913 	sbd_lu_t *sl = (sbd_lu_t *)lu->lu_provider_private;
1914 
1915 	mutex_enter(&sl->sl_lock);
1916 	if (sl->sl_flags & SL_SAVED_WRITE_CACHE_DISABLE) {
1917 		sl->sl_flags |= SL_WRITEBACK_CACHE_DISABLE;
1918 		mutex_exit(&sl->sl_lock);
1919 		(void) sbd_wcd_set(1, sl);
1920 	} else {
1921 		sl->sl_flags &= ~SL_WRITEBACK_CACHE_DISABLE;
1922 		mutex_exit(&sl->sl_lock);
1923 		(void) sbd_wcd_set(0, sl);
1924 	}
1925 	sbd_check_and_clear_scsi2_reservation(sl, NULL);
1926 	if (stmf_deregister_all_lu_itl_handles(lu) != STMF_SUCCESS) {
1927 		return (STMF_FAILURE);
1928 	}
1929 	return (STMF_SUCCESS);
1930 }
1931 
1932 sbd_status_t
1933 sbd_flush_data_cache(sbd_lu_t *sl, int fsync_done)
1934 {
1935 	int r = 0;
1936 	int ret;
1937 
1938 	if (fsync_done)
1939 		goto over_fsync;
1940 	if ((sl->sl_data_vtype == VREG) || (sl->sl_data_vtype == VBLK)) {
1941 		if (VOP_FSYNC(sl->sl_data_vp, FSYNC, kcred, NULL))
1942 			return (SBD_FAILURE);
1943 	}
1944 over_fsync:
1945 	if (((sl->sl_data_vtype == VCHR) || (sl->sl_data_vtype == VBLK)) &&
1946 	    ((sl->sl_flags & SL_NO_DATA_DKIOFLUSH) == 0)) {
1947 		ret = VOP_IOCTL(sl->sl_data_vp, DKIOCFLUSHWRITECACHE, NULL,
1948 		    FKIOCTL, kcred, &r, NULL);
1949 		if ((ret == ENOTTY) || (ret == ENOTSUP)) {
1950 			mutex_enter(&sl->sl_lock);
1951 			sl->sl_flags |= SL_NO_DATA_DKIOFLUSH;
1952 			mutex_exit(&sl->sl_lock);
1953 		} else if (ret != 0) {
1954 			return (SBD_FAILURE);
1955 		}
1956 	}
1957 
1958 	return (SBD_SUCCESS);
1959 }
1960 
1961 /* ARGSUSED */
1962 static void
1963 sbd_handle_sync_cache(struct scsi_task *task,
1964     struct stmf_data_buf *initial_dbuf)
1965 {
1966 	sbd_lu_t *sl = (sbd_lu_t *)task->task_lu->lu_provider_private;
1967 	uint64_t	lba, laddr;
1968 	sbd_status_t	sret;
1969 	uint32_t	len;
1970 	int		is_g4 = 0;
1971 	int		immed;
1972 
1973 	task->task_cmd_xfer_length = 0;
1974 	/*
1975 	 * Determine if this is a 10 or 16 byte CDB
1976 	 */
1977 
1978 	if (task->task_cdb[0] == SCMD_SYNCHRONIZE_CACHE_G4)
1979 		is_g4 = 1;
1980 
1981 	/*
1982 	 * Determine other requested parameters
1983 	 *
1984 	 * We don't have a non-volatile cache, so don't care about SYNC_NV.
1985 	 * Do not support the IMMED bit.
1986 	 */
1987 
1988 	immed = (task->task_cdb[1] & 0x02);
1989 
1990 	if (immed) {
1991 		stmf_scsilib_send_status(task, STATUS_CHECK,
1992 		    STMF_SAA_INVALID_FIELD_IN_CDB);
1993 		return;
1994 	}
1995 
1996 	/*
1997 	 * Check to be sure we're not being asked to sync an LBA
1998 	 * that is out of range.  While checking, verify reserved fields.
1999 	 */
2000 
2001 	if (is_g4) {
2002 		if ((task->task_cdb[1] & 0xf9) || task->task_cdb[14] ||
2003 		    task->task_cdb[15]) {
2004 			stmf_scsilib_send_status(task, STATUS_CHECK,
2005 			    STMF_SAA_INVALID_FIELD_IN_CDB);
2006 			return;
2007 		}
2008 
2009 		lba = READ_SCSI64(&task->task_cdb[2], uint64_t);
2010 		len = READ_SCSI32(&task->task_cdb[10], uint32_t);
2011 	} else {
2012 		if ((task->task_cdb[1] & 0xf9) || task->task_cdb[6] ||
2013 		    task->task_cdb[9]) {
2014 			stmf_scsilib_send_status(task, STATUS_CHECK,
2015 			    STMF_SAA_INVALID_FIELD_IN_CDB);
2016 			return;
2017 		}
2018 
2019 		lba = READ_SCSI32(&task->task_cdb[2], uint64_t);
2020 		len = READ_SCSI16(&task->task_cdb[7], uint32_t);
2021 	}
2022 
2023 	laddr = lba << sl->sl_data_blocksize_shift;
2024 	len <<= sl->sl_data_blocksize_shift;
2025 
2026 	if ((laddr + (uint64_t)len) > sl->sl_lu_size) {
2027 		stmf_scsilib_send_status(task, STATUS_CHECK,
2028 		    STMF_SAA_LBA_OUT_OF_RANGE);
2029 		return;
2030 	}
2031 
2032 	sret = sbd_flush_data_cache(sl, 0);
2033 	if (sret != SBD_SUCCESS) {
2034 		stmf_scsilib_send_status(task, STATUS_CHECK,
2035 		    STMF_SAA_WRITE_ERROR);
2036 		return;
2037 	}
2038 
2039 	stmf_scsilib_send_status(task, STATUS_GOOD, 0);
2040 }
2041