xref: /illumos-gate/usr/src/uts/common/io/comstar/lu/stmf_sbd/sbd_scsi.c (revision 257873cfc1dd3337766407f80397db60a56f2f5a)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
23  * Use is subject to license terms.
24  */
25 
26 #include <sys/conf.h>
27 #include <sys/file.h>
28 #include <sys/ddi.h>
29 #include <sys/sunddi.h>
30 #include <sys/modctl.h>
31 #include <sys/scsi/scsi.h>
32 #include <sys/scsi/impl/scsi_reset_notify.h>
33 #include <sys/scsi/generic/mode.h>
34 #include <sys/disp.h>
35 #include <sys/byteorder.h>
36 #include <sys/atomic.h>
37 
38 #include <stmf.h>
39 #include <lpif.h>
40 #include <portif.h>
41 #include <stmf_ioctl.h>
42 #include <stmf_sbd.h>
43 #include <sbd_impl.h>
44 
45 stmf_status_t sbd_lu_reset_state(stmf_lu_t *lu);
46 static void sbd_handle_sync_cache(struct scsi_task *task,
47     struct stmf_data_buf *initial_dbuf);
48 void sbd_handle_read_xfer_completion(struct scsi_task *task,
49     sbd_cmd_t *scmd, struct stmf_data_buf *dbuf);
50 
51 /*
52  * IMPORTANT NOTE:
53  * =================
54  * The whole world here is based on the assumption that everything within
55  * a scsi task executes in a single threaded manner, even the aborts.
56  * Dont ever change that. There wont be any performance gain but there
57  * will be tons of race conditions.
58  */
59 
60 void
61 sbd_do_read_xfer(struct scsi_task *task, sbd_cmd_t *scmd,
62 					struct stmf_data_buf *dbuf)
63 {
64 	sbd_store_t *sst = (sbd_store_t *)task->task_lu->lu_provider_private;
65 	sbd_lu_t *slu = (sbd_lu_t *)sst->sst_sbd_private;
66 	uint64_t laddr;
67 	uint32_t len, buflen, iolen;
68 	int ndx;
69 	int bufs_to_take;
70 
71 	/* Lets try not to hog all the buffers the port has. */
72 	bufs_to_take = ((task->task_max_nbufs > 2) &&
73 	    (task->task_cmd_xfer_length < (32 * 1024))) ? 2 :
74 	    task->task_max_nbufs;
75 
76 	len = scmd->len > dbuf->db_buf_size ? dbuf->db_buf_size : scmd->len;
77 	laddr = scmd->addr + scmd->current_ro + slu->sl_sli->sli_lu_data_offset;
78 
79 	for (buflen = 0, ndx = 0; (buflen < len) &&
80 	    (ndx < dbuf->db_sglist_length); ndx++) {
81 		iolen = min(len - buflen, dbuf->db_sglist[ndx].seg_length);
82 		if (iolen == 0)
83 			break;
84 		if (sst->sst_data_read(sst, laddr, (uint64_t)iolen,
85 		    dbuf->db_sglist[0].seg_addr) != STMF_SUCCESS) {
86 			scmd->flags |= SBD_SCSI_CMD_XFER_FAIL;
87 			/* Do not need to do xfer anymore, just complete it */
88 			dbuf->db_data_size = 0;
89 			dbuf->db_xfer_status = STMF_SUCCESS;
90 			sbd_handle_read_xfer_completion(task, scmd, dbuf);
91 			return;
92 		}
93 		buflen += iolen;
94 		laddr += (uint64_t)iolen;
95 	}
96 	dbuf->db_relative_offset = scmd->current_ro;
97 	dbuf->db_data_size = buflen;
98 	dbuf->db_flags = DB_DIRECTION_TO_RPORT;
99 	(void) stmf_xfer_data(task, dbuf, 0);
100 	scmd->len -= buflen;
101 	scmd->current_ro += buflen;
102 	if (scmd->len && (scmd->nbufs < bufs_to_take)) {
103 		uint32_t maxsize, minsize, old_minsize;
104 
105 		maxsize = (scmd->len > (128*1024)) ? 128*1024 : scmd->len;
106 		minsize = maxsize >> 2;
107 		do {
108 			/*
109 			 * A bad port implementation can keep on failing the
110 			 * the request but keep on sending us a false
111 			 * minsize.
112 			 */
113 			old_minsize = minsize;
114 			dbuf = stmf_alloc_dbuf(task, maxsize, &minsize, 0);
115 		} while ((dbuf == NULL) && (old_minsize > minsize) &&
116 		    (minsize >= 512));
117 		if (dbuf == NULL) {
118 			return;
119 		}
120 		scmd->nbufs++;
121 		sbd_do_read_xfer(task, scmd, dbuf);
122 	}
123 }
124 
125 void
126 sbd_handle_read_xfer_completion(struct scsi_task *task, sbd_cmd_t *scmd,
127 				struct stmf_data_buf *dbuf)
128 {
129 	if (dbuf->db_xfer_status != STMF_SUCCESS) {
130 		stmf_abort(STMF_QUEUE_TASK_ABORT, task,
131 		    dbuf->db_xfer_status, NULL);
132 		return;
133 	}
134 	task->task_nbytes_transferred += dbuf->db_data_size;
135 	if (scmd->len == 0 || scmd->flags & SBD_SCSI_CMD_XFER_FAIL) {
136 		stmf_free_dbuf(task, dbuf);
137 		scmd->nbufs--;
138 		if (scmd->nbufs)
139 			return;	/* wait for all buffers to complete */
140 		scmd->flags &= ~SBD_SCSI_CMD_ACTIVE;
141 		if (scmd->flags & SBD_SCSI_CMD_XFER_FAIL)
142 			stmf_scsilib_send_status(task, STATUS_CHECK,
143 			    STMF_SAA_READ_ERROR);
144 		else
145 			stmf_scsilib_send_status(task, STATUS_GOOD, 0);
146 		return;
147 	}
148 	sbd_do_read_xfer(task, scmd, dbuf);
149 }
150 
151 void
152 sbd_handle_read(struct scsi_task *task, struct stmf_data_buf *initial_dbuf)
153 {
154 	uint64_t lba, laddr;
155 	uint32_t len;
156 	uint8_t op = task->task_cdb[0];
157 	sbd_store_t *sst = (sbd_store_t *)task->task_lu->lu_provider_private;
158 	sbd_lu_t *slu = (sbd_lu_t *)sst->sst_sbd_private;
159 	sbd_cmd_t *scmd;
160 	stmf_data_buf_t *dbuf;
161 	int fast_path;
162 
163 	if (op == SCMD_READ) {
164 		lba = READ_SCSI21(&task->task_cdb[1], uint64_t);
165 		len = (uint32_t)task->task_cdb[4];
166 
167 		if (len == 0) {
168 			len = 256;
169 		}
170 	} else if (op == SCMD_READ_G1) {
171 		lba = READ_SCSI32(&task->task_cdb[2], uint64_t);
172 		len = READ_SCSI16(&task->task_cdb[7], uint32_t);
173 	} else if (op == SCMD_READ_G5) {
174 		lba = READ_SCSI32(&task->task_cdb[2], uint64_t);
175 		len = READ_SCSI32(&task->task_cdb[6], uint32_t);
176 	} else if (op == SCMD_READ_G4) {
177 		lba = READ_SCSI64(&task->task_cdb[2], uint64_t);
178 		len = READ_SCSI32(&task->task_cdb[10], uint32_t);
179 	} else {
180 		stmf_scsilib_send_status(task, STATUS_CHECK,
181 		    STMF_SAA_INVALID_OPCODE);
182 		return;
183 	}
184 
185 	laddr = lba << slu->sl_shift_count;
186 	len <<= slu->sl_shift_count;
187 
188 	if ((laddr + (uint64_t)len) > slu->sl_sli->sli_lu_data_size) {
189 		stmf_scsilib_send_status(task, STATUS_CHECK,
190 		    STMF_SAA_LBA_OUT_OF_RANGE);
191 		return;
192 	}
193 
194 	task->task_cmd_xfer_length = len;
195 	if (task->task_additional_flags & TASK_AF_NO_EXPECTED_XFER_LENGTH) {
196 		task->task_expected_xfer_length = len;
197 	}
198 
199 	if (len != task->task_expected_xfer_length) {
200 		fast_path = 0;
201 		len = (len > task->task_expected_xfer_length) ?
202 			task->task_expected_xfer_length : len;
203 	} else {
204 		fast_path = 1;
205 	}
206 
207 	if (len == 0) {
208 		stmf_scsilib_send_status(task, STATUS_GOOD, 0);
209 		return;
210 	}
211 
212 	if (initial_dbuf == NULL) {
213 		uint32_t maxsize, minsize, old_minsize;
214 
215 		maxsize = (len > (128*1024)) ? 128*1024 : len;
216 		minsize = maxsize >> 2;
217 		do {
218 			old_minsize = minsize;
219 			initial_dbuf = stmf_alloc_dbuf(task, maxsize,
220 			    &minsize, 0);
221 		} while ((initial_dbuf == NULL) && (old_minsize > minsize) &&
222 		    (minsize >= 512));
223 		if (initial_dbuf == NULL) {
224 			stmf_abort(STMF_QUEUE_TASK_ABORT, task,
225 			    STMF_ALLOC_FAILURE, NULL);
226 			return;
227 		}
228 	}
229 	dbuf = initial_dbuf;
230 
231 	if ((dbuf->db_buf_size >= len) && fast_path &&
232 	    (dbuf->db_sglist_length == 1)) {
233 		if (sst->sst_data_read(sst,
234 		    laddr + slu->sl_sli->sli_lu_data_offset, (uint64_t)len,
235 		    dbuf->db_sglist[0].seg_addr) == STMF_SUCCESS) {
236 			dbuf->db_relative_offset = 0;
237 			dbuf->db_data_size = len;
238 			dbuf->db_flags = DB_SEND_STATUS_GOOD |
239 			    DB_DIRECTION_TO_RPORT;
240 			(void) stmf_xfer_data(task, dbuf, STMF_IOF_LU_DONE);
241 		} else {
242 			stmf_scsilib_send_status(task, STATUS_CHECK,
243 			    STMF_SAA_READ_ERROR);
244 		}
245 		return;
246 	}
247 
248 	if (task->task_lu_private) {
249 		scmd = (sbd_cmd_t *)task->task_lu_private;
250 	} else {
251 		scmd = (sbd_cmd_t *)kmem_alloc(sizeof (sbd_cmd_t), KM_SLEEP);
252 		task->task_lu_private = scmd;
253 	}
254 	scmd->flags = SBD_SCSI_CMD_ACTIVE;
255 	scmd->cmd_type = SBD_CMD_SCSI_READ;
256 	scmd->nbufs = 1;
257 	scmd->addr = laddr;
258 	scmd->len = len;
259 	scmd->current_ro = 0;
260 
261 	sbd_do_read_xfer(task, scmd, dbuf);
262 }
263 
264 void
265 sbd_do_write_xfer(struct scsi_task *task, sbd_cmd_t *scmd,
266 					struct stmf_data_buf *dbuf)
267 {
268 	uint32_t len;
269 	int bufs_to_take;
270 
271 	/* Lets try not to hog all the buffers the port has. */
272 	bufs_to_take = ((task->task_max_nbufs > 2) &&
273 	    (task->task_cmd_xfer_length < (32 * 1024))) ? 2 :
274 	    task->task_max_nbufs;
275 
276 	len = scmd->len > dbuf->db_buf_size ? dbuf->db_buf_size : scmd->len;
277 
278 	dbuf->db_relative_offset = scmd->current_ro;
279 	dbuf->db_data_size = len;
280 	dbuf->db_flags = DB_DIRECTION_FROM_RPORT;
281 	(void) stmf_xfer_data(task, dbuf, 0);
282 	scmd->len -= len;
283 	scmd->current_ro += len;
284 	if (scmd->len && (scmd->nbufs < bufs_to_take)) {
285 		uint32_t maxsize, minsize, old_minsize;
286 
287 		maxsize = (scmd->len > (128*1024)) ? 128*1024 : scmd->len;
288 		minsize = maxsize >> 2;
289 		do {
290 			old_minsize = minsize;
291 			dbuf = stmf_alloc_dbuf(task, maxsize, &minsize, 0);
292 		} while ((dbuf == NULL) && (old_minsize > minsize) &&
293 		    (minsize >= 512));
294 		if (dbuf == NULL) {
295 			return;
296 		}
297 		scmd->nbufs++;
298 		sbd_do_write_xfer(task, scmd, dbuf);
299 	}
300 }
301 
302 void
303 sbd_handle_write_xfer_completion(struct scsi_task *task, sbd_cmd_t *scmd,
304     struct stmf_data_buf *dbuf, uint8_t dbuf_reusable)
305 {
306 	sbd_store_t *sst = (sbd_store_t *)task->task_lu->lu_provider_private;
307 	sbd_lu_t *slu = (sbd_lu_t *)sst->sst_sbd_private;
308 	uint64_t laddr;
309 	uint32_t buflen, iolen;
310 	int ndx;
311 
312 	if (dbuf->db_xfer_status != STMF_SUCCESS) {
313 		stmf_abort(STMF_QUEUE_TASK_ABORT, task,
314 		    dbuf->db_xfer_status, NULL);
315 		return;
316 	}
317 
318 	if (scmd->flags & SBD_SCSI_CMD_XFER_FAIL) {
319 		goto WRITE_XFER_DONE;
320 	}
321 
322 	laddr = scmd->addr + dbuf->db_relative_offset +
323 				slu->sl_sli->sli_lu_data_offset;
324 
325 	for (buflen = 0, ndx = 0; (buflen < dbuf->db_data_size) &&
326 	    (ndx < dbuf->db_sglist_length); ndx++) {
327 		iolen = min(dbuf->db_data_size - buflen,
328 					dbuf->db_sglist[ndx].seg_length);
329 		if (iolen == 0)
330 			break;
331 		if (sst->sst_data_write(sst, laddr, (uint64_t)iolen,
332 		    dbuf->db_sglist[0].seg_addr) != STMF_SUCCESS) {
333 			scmd->flags |= SBD_SCSI_CMD_XFER_FAIL;
334 			break;
335 		}
336 		buflen += iolen;
337 		laddr += (uint64_t)iolen;
338 	}
339 	task->task_nbytes_transferred += buflen;
340 WRITE_XFER_DONE:
341 	if (scmd->len == 0 || scmd->flags & SBD_SCSI_CMD_XFER_FAIL) {
342 		stmf_free_dbuf(task, dbuf);
343 		scmd->nbufs--;
344 		if (scmd->nbufs)
345 			return;	/* wait for all buffers to complete */
346 		scmd->flags &= ~SBD_SCSI_CMD_ACTIVE;
347 		if (scmd->flags & SBD_SCSI_CMD_XFER_FAIL)
348 			stmf_scsilib_send_status(task, STATUS_CHECK,
349 			    STMF_SAA_WRITE_ERROR);
350 		else
351 			stmf_scsilib_send_status(task, STATUS_GOOD, 0);
352 		return;
353 	}
354 	if (dbuf_reusable == 0) {
355 		uint32_t maxsize, minsize, old_minsize;
356 		/* free current dbuf and allocate a new one */
357 		stmf_free_dbuf(task, dbuf);
358 
359 		maxsize = (scmd->len > (128*1024)) ? 128*1024 : scmd->len;
360 		minsize = maxsize >> 2;
361 		do {
362 			old_minsize = minsize;
363 			dbuf = stmf_alloc_dbuf(task, maxsize, &minsize, 0);
364 		} while ((dbuf == NULL) && (old_minsize > minsize) &&
365 		    (minsize >= 512));
366 		if (dbuf == NULL) {
367 			scmd->nbufs --;
368 			if (scmd->nbufs == 0) {
369 				stmf_abort(STMF_QUEUE_TASK_ABORT, task,
370 				    STMF_ALLOC_FAILURE, NULL);
371 			}
372 			return;
373 		}
374 	}
375 	sbd_do_write_xfer(task, scmd, dbuf);
376 }
377 
378 void
379 sbd_handle_write(struct scsi_task *task, struct stmf_data_buf *initial_dbuf)
380 {
381 	uint64_t lba, laddr;
382 	uint32_t len;
383 	uint8_t op = task->task_cdb[0], do_immediate_data = 0;
384 	sbd_store_t *sst = (sbd_store_t *)task->task_lu->lu_provider_private;
385 	sbd_lu_t *slu = (sbd_lu_t *)sst->sst_sbd_private;
386 	sbd_cmd_t *scmd;
387 	stmf_data_buf_t *dbuf;
388 
389 	if (op == SCMD_WRITE) {
390 		lba = READ_SCSI21(&task->task_cdb[1], uint64_t);
391 		len = (uint32_t)task->task_cdb[4];
392 
393 		if (len == 0) {
394 			len = 256;
395 		}
396 	} else if (op == SCMD_WRITE_G1) {
397 		lba = READ_SCSI32(&task->task_cdb[2], uint64_t);
398 		len = READ_SCSI16(&task->task_cdb[7], uint32_t);
399 	} else if (op == SCMD_WRITE_G5) {
400 		lba = READ_SCSI32(&task->task_cdb[2], uint64_t);
401 		len = READ_SCSI32(&task->task_cdb[6], uint32_t);
402 	} else if (op == SCMD_WRITE_G4) {
403 		lba = READ_SCSI64(&task->task_cdb[2], uint64_t);
404 		len = READ_SCSI32(&task->task_cdb[10], uint32_t);
405 	} else {
406 		stmf_scsilib_send_status(task, STATUS_CHECK,
407 		    STMF_SAA_INVALID_OPCODE);
408 		return;
409 	}
410 
411 	laddr = lba << slu->sl_shift_count;
412 	len <<= slu->sl_shift_count;
413 
414 	if ((laddr + (uint64_t)len) > slu->sl_sli->sli_lu_data_size) {
415 		stmf_scsilib_send_status(task, STATUS_CHECK,
416 		    STMF_SAA_LBA_OUT_OF_RANGE);
417 		return;
418 	}
419 
420 	task->task_cmd_xfer_length = len;
421 	if (task->task_additional_flags & TASK_AF_NO_EXPECTED_XFER_LENGTH) {
422 		task->task_expected_xfer_length = len;
423 	}
424 
425 	len = (len > task->task_expected_xfer_length) ?
426 	    task->task_expected_xfer_length : len;
427 
428 	if (len == 0) {
429 		stmf_scsilib_send_status(task, STATUS_GOOD, 0);
430 		return;
431 	}
432 
433 	if (initial_dbuf == NULL) {
434 		uint32_t maxsize, minsize, old_minsize;
435 
436 		maxsize = (len > (128*1024)) ? 128*1024 : len;
437 		minsize = maxsize >> 2;
438 		do {
439 			old_minsize = minsize;
440 			initial_dbuf = stmf_alloc_dbuf(task, maxsize,
441 			    &minsize, 0);
442 		} while ((initial_dbuf == NULL) && (old_minsize > minsize) &&
443 		    (minsize >= 512));
444 		if (initial_dbuf == NULL) {
445 			stmf_abort(STMF_QUEUE_TASK_ABORT, task,
446 			    STMF_ALLOC_FAILURE, NULL);
447 			return;
448 		}
449 	} else if (task->task_flags & TF_INITIAL_BURST) {
450 		if (initial_dbuf->db_data_size > len) {
451 			if (initial_dbuf->db_data_size >
452 			    task->task_expected_xfer_length) {
453 				/* protocol error */
454 				stmf_abort(STMF_QUEUE_TASK_ABORT, task,
455 				    STMF_INVALID_ARG, NULL);
456 				return;
457 			}
458 			initial_dbuf->db_data_size = len;
459 		}
460 		do_immediate_data = 1;
461 	}
462 	dbuf = initial_dbuf;
463 
464 	if (task->task_lu_private) {
465 		scmd = (sbd_cmd_t *)task->task_lu_private;
466 	} else {
467 		scmd = (sbd_cmd_t *)kmem_alloc(sizeof (sbd_cmd_t), KM_SLEEP);
468 		task->task_lu_private = scmd;
469 	}
470 	scmd->flags = SBD_SCSI_CMD_ACTIVE;
471 	scmd->cmd_type = SBD_CMD_SCSI_WRITE;
472 	scmd->nbufs = 1;
473 	scmd->addr = laddr;
474 	scmd->len = len;
475 	scmd->current_ro = 0;
476 
477 	if (do_immediate_data) {
478 		scmd->len -= dbuf->db_data_size;
479 		scmd->current_ro += dbuf->db_data_size;
480 		dbuf->db_xfer_status = STMF_SUCCESS;
481 		sbd_handle_write_xfer_completion(task, scmd, dbuf, 0);
482 	} else {
483 		sbd_do_write_xfer(task, scmd, dbuf);
484 	}
485 }
486 
487 /*
488  * Utility routine to handle small non performance data transfers to the
489  * initiators. dbuf is an initial data buf (if any), 'p' points to a data
490  * buffer which is source of data for transfer, cdb_xfer_size is the
491  * transfer size based on CDB, cmd_xfer_size is the actual amount of data
492  * which this command would transfer (the size of data pointed to by 'p').
493  */
494 void
495 sbd_handle_short_read_transfers(scsi_task_t *task, stmf_data_buf_t *dbuf,
496     uint8_t *p, uint32_t cdb_xfer_size, uint32_t cmd_xfer_size)
497 {
498 	uint32_t bufsize, ndx;
499 	sbd_cmd_t *scmd;
500 
501 	cmd_xfer_size = min(cmd_xfer_size, cdb_xfer_size);
502 
503 	task->task_cmd_xfer_length = cmd_xfer_size;
504 	if (task->task_additional_flags & TASK_AF_NO_EXPECTED_XFER_LENGTH) {
505 		task->task_expected_xfer_length = cmd_xfer_size;
506 	} else {
507 		cmd_xfer_size = min(cmd_xfer_size,
508 		    task->task_expected_xfer_length);
509 	}
510 
511 	if (cmd_xfer_size == 0) {
512 		stmf_scsilib_send_status(task, STATUS_CHECK,
513 		    STMF_SAA_INVALID_FIELD_IN_CDB);
514 		return;
515 	}
516 	if (dbuf == NULL) {
517 		uint32_t minsize = cmd_xfer_size;
518 
519 		dbuf = stmf_alloc_dbuf(task, cmd_xfer_size, &minsize, 0);
520 	}
521 	if (dbuf == NULL) {
522 		stmf_abort(STMF_QUEUE_TASK_ABORT, task,
523 		    STMF_ALLOC_FAILURE, NULL);
524 		return;
525 	}
526 
527 	for (bufsize = 0, ndx = 0; bufsize < cmd_xfer_size; ndx++) {
528 		uint8_t *d;
529 		uint32_t s;
530 
531 		d = dbuf->db_sglist[ndx].seg_addr;
532 		s = min((cmd_xfer_size - bufsize),
533 		    dbuf->db_sglist[ndx].seg_length);
534 		bcopy(p+bufsize, d, s);
535 		bufsize += s;
536 	}
537 	dbuf->db_relative_offset = 0;
538 	dbuf->db_data_size = cmd_xfer_size;
539 	dbuf->db_flags = DB_DIRECTION_TO_RPORT;
540 
541 	if (task->task_lu_private == NULL) {
542 		task->task_lu_private =
543 		    kmem_alloc(sizeof (sbd_cmd_t), KM_SLEEP);
544 	}
545 	scmd = (sbd_cmd_t *)task->task_lu_private;
546 
547 	scmd->cmd_type = SBD_CMD_SMALL_READ;
548 	scmd->flags = SBD_SCSI_CMD_ACTIVE;
549 	(void) stmf_xfer_data(task, dbuf, 0);
550 }
551 
552 void
553 sbd_handle_short_read_xfer_completion(struct scsi_task *task, sbd_cmd_t *scmd,
554 				struct stmf_data_buf *dbuf)
555 {
556 	if (dbuf->db_xfer_status != STMF_SUCCESS) {
557 		stmf_abort(STMF_QUEUE_TASK_ABORT, task,
558 		    dbuf->db_xfer_status, NULL);
559 		return;
560 	}
561 	task->task_nbytes_transferred = dbuf->db_data_size;
562 	scmd->flags &= ~SBD_SCSI_CMD_ACTIVE;
563 	stmf_scsilib_send_status(task, STATUS_GOOD, 0);
564 }
565 
566 void
567 sbd_handle_read_capacity(struct scsi_task *task,
568     struct stmf_data_buf *initial_dbuf)
569 {
570 	sbd_store_t *sst = (sbd_store_t *)task->task_lu->lu_provider_private;
571 	sbd_lu_t *slu = (sbd_lu_t *)sst->sst_sbd_private;
572 	sbd_lu_info_t *sli = slu->sl_sli;
573 	uint32_t cdb_len;
574 	uint8_t p[32];
575 	uint64_t s;
576 
577 	s = sli->sli_lu_data_size >> slu->sl_shift_count;
578 	s--;
579 	switch (task->task_cdb[0]) {
580 	case SCMD_READ_CAPACITY:
581 		if (s & 0xffffffff00000000ull) {
582 			p[0] = p[1] = p[2] = p[3] = 0xFF;
583 		} else {
584 			p[0] = (s >> 24) & 0xff;
585 			p[1] = (s >> 16) & 0xff;
586 			p[2] = (s >> 8) & 0xff;
587 			p[3] = s & 0xff;
588 		}
589 		p[4] = 0; p[5] = 0;
590 		p[6] = (sli->sli_blocksize >> 8) & 0xff;
591 		p[7] = sli->sli_blocksize & 0xff;
592 		sbd_handle_short_read_transfers(task, initial_dbuf, p, 8, 8);
593 		return;
594 
595 	case SCMD_SVC_ACTION_IN_G4:
596 		cdb_len = READ_SCSI32(&task->task_cdb[10], uint32_t);
597 		bzero(p, 32);
598 		p[0] = (s >> 56) & 0xff;
599 		p[1] = (s >> 48) & 0xff;
600 		p[2] = (s >> 40) & 0xff;
601 		p[3] = (s >> 32) & 0xff;
602 		p[4] = (s >> 24) & 0xff;
603 		p[5] = (s >> 16) & 0xff;
604 		p[6] = (s >> 8) & 0xff;
605 		p[7] = s & 0xff;
606 		p[10] = (sli->sli_blocksize >> 8) & 0xff;
607 		p[11] = sli->sli_blocksize & 0xff;
608 		sbd_handle_short_read_transfers(task, initial_dbuf, p,
609 		    cdb_len, 32);
610 		return;
611 	}
612 }
613 
614 static uint8_t sbd_p3[] =
615 	{3, 0x16, 0, 0, 0, 0, 0, 0, 0, 0, 0, 32, 2, 0, 0, 0,
616 	    0, 0, 0, 0, 0x80, 0, 0, 0};
617 static uint8_t sbd_p4[] =
618 	{4, 0x16, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
619 	    0, 0, 0, 0, 0x15, 0x18, 0, 0};
620 static uint8_t sbd_pa[] = {0xa, 0xa, 0, 0x10, 0, 0, 0, 0, 0, 0, 0, 0};
621 static uint8_t sbd_bd[] = {0, 0, 0, 0, 0, 0, 0x02, 0};
622 
623 void
624 sbd_handle_mode_sense(struct scsi_task *task,
625     struct stmf_data_buf *initial_dbuf)
626 {
627 	sbd_store_t *sst = (sbd_store_t *)task->task_lu->lu_provider_private;
628 	sbd_lu_t *slu = (sbd_lu_t *)sst->sst_sbd_private;
629 	sbd_lu_info_t *sli = slu->sl_sli;
630 	uint32_t cmd_size, hdrsize, xfer_size, ncyl;
631 	uint8_t payload_buf[8 + 8 + 24 + 24 + 12];
632 	uint8_t *payload, *p;
633 	uint8_t ctrl, page;
634 	uint16_t ps;
635 	uint64_t s = sli->sli_lu_data_size;
636 	uint8_t dbd;
637 
638 	p = &task->task_cdb[0];
639 	page = p[2] & 0x3F;
640 	ctrl = (p[2] >> 6) & 3;
641 	dbd = p[1] & 0x08;
642 
643 	hdrsize = (p[0] == SCMD_MODE_SENSE) ? 4 : 8;
644 
645 	cmd_size = (p[0] == SCMD_MODE_SENSE) ? p[4] :
646 	    READ_SCSI16(&p[7], uint32_t);
647 
648 	switch (page) {
649 	case 0x03:
650 		ps = hdrsize + sizeof (sbd_p3);
651 		break;
652 	case 0x04:
653 		ps = hdrsize + sizeof (sbd_p4);
654 		break;
655 	case 0x0A:
656 		ps = hdrsize + sizeof (sbd_pa);
657 		break;
658 	case MODEPAGE_ALLPAGES:
659 		ps = hdrsize + sizeof (sbd_p3) + sizeof (sbd_p4)
660 		    + sizeof (sbd_pa);
661 
662 		/*
663 		 * If the buffer is big enough, include the block
664 		 * descriptor; otherwise, leave it out.
665 		 */
666 		if (cmd_size < ps) {
667 			dbd = 1;
668 		}
669 
670 		if (dbd == 0) {
671 			ps += 8;
672 		}
673 
674 		break;
675 	default:
676 		stmf_scsilib_send_status(task, STATUS_CHECK,
677 		    STMF_SAA_INVALID_FIELD_IN_CDB);
678 		return;
679 	}
680 
681 	xfer_size = min(cmd_size, ps);
682 
683 	if ((xfer_size < hdrsize) || (ctrl == 1) ||
684 	    (((task->task_additional_flags &
685 	    TASK_AF_NO_EXPECTED_XFER_LENGTH) == 0) &&
686 	    (xfer_size > task->task_expected_xfer_length))) {
687 		stmf_scsilib_send_status(task, STATUS_CHECK,
688 		    STMF_SAA_INVALID_FIELD_IN_CDB);
689 		return;
690 	}
691 
692 	bzero(payload_buf, xfer_size);
693 
694 	if (p[0] == SCMD_MODE_SENSE) {
695 		payload_buf[0] = ps - 1;
696 	} else {
697 		ps -= 2;
698 		*((uint16_t *)payload_buf) = BE_16(ps);
699 	}
700 
701 	payload = payload_buf + hdrsize;
702 
703 	switch (page) {
704 	case 0x03:
705 		bcopy(sbd_p3, payload, sizeof (sbd_p3));
706 		break;
707 
708 	case 0x0A:
709 		bcopy(sbd_pa, payload, sizeof (sbd_pa));
710 		break;
711 
712 	case MODEPAGE_ALLPAGES:
713 		if (dbd == 0) {
714 			payload_buf[3] = sizeof (sbd_bd);
715 			bcopy(sbd_bd, payload, sizeof (sbd_bd));
716 			payload += sizeof (sbd_bd);
717 		}
718 
719 		bcopy(sbd_p3, payload, sizeof (sbd_p3));
720 		payload += sizeof (sbd_p3);
721 		bcopy(sbd_pa, payload, sizeof (sbd_pa));
722 		payload += sizeof (sbd_pa);
723 		/* FALLTHROUGH */
724 
725 	case 0x04:
726 		bcopy(sbd_p4, payload, sizeof (sbd_p4));
727 
728 		if (s > 1024 * 1024 * 1024) {
729 			payload[5] = 16;
730 		} else {
731 			payload[5] = 2;
732 		}
733 		ncyl = (uint32_t)((s/(((uint64_t)payload[5]) * 32 * 512)) + 1);
734 		payload[4] = (uchar_t)ncyl;
735 		payload[3] = (uchar_t)(ncyl >> 8);
736 		payload[2] = (uchar_t)(ncyl >> 16);
737 		break;
738 
739 	}
740 
741 	sbd_handle_short_read_transfers(task, initial_dbuf, payload_buf,
742 	    cmd_size, xfer_size);
743 }
744 
745 
746 void
747 sbd_handle_inquiry(struct scsi_task *task, struct stmf_data_buf *initial_dbuf,
748 			uint8_t *p, int bsize)
749 {
750 	uint8_t *cdbp = (uint8_t *)&task->task_cdb[0];
751 	uint32_t cmd_size;
752 	uint8_t page_length;
753 
754 	/*
755 	 * Basic protocol checks.
756 	 */
757 
758 	if ((((cdbp[1] & 1) == 0) && cdbp[2]) || cdbp[5]) {
759 		stmf_scsilib_send_status(task, STATUS_CHECK,
760 		    STMF_SAA_INVALID_FIELD_IN_CDB);
761 		return;
762 	}
763 
764 	/*
765 	 * Zero byte allocation length is not an error.  Just
766 	 * return success.
767 	 */
768 
769 	cmd_size = (((uint32_t)cdbp[3]) << 8) | cdbp[4];
770 
771 	if (cmd_size == 0) {
772 		task->task_cmd_xfer_length = 0;
773 		if (task->task_additional_flags &
774 		    TASK_AF_NO_EXPECTED_XFER_LENGTH) {
775 			task->task_expected_xfer_length = 0;
776 		}
777 		stmf_scsilib_send_status(task, STATUS_GOOD, 0);
778 		return;
779 	}
780 
781 	/*
782 	 * Standard inquiry
783 	 */
784 
785 	if ((cdbp[1] & 1) == 0) {
786 		struct scsi_inquiry *inq = (struct scsi_inquiry *)p;
787 
788 		page_length = 31;
789 		bzero(inq, page_length + 5);
790 
791 		inq->inq_dtype = 0;
792 		inq->inq_ansi = 5;	/* SPC-3 */
793 		inq->inq_hisup = 1;
794 		inq->inq_rdf = 2;	/* Response data format for SPC-3 */
795 		inq->inq_len = page_length;
796 
797 		inq->inq_tpgs = 1;
798 
799 		inq->inq_cmdque = 1;
800 
801 		(void) strncpy((char *)inq->inq_vid, "SUN     ", 8);
802 		(void) strncpy((char *)inq->inq_pid, "COMSTAR         ", 16);
803 		(void) strncpy((char *)inq->inq_revision, "1.0 ", 4);
804 
805 		sbd_handle_short_read_transfers(task, initial_dbuf, p, cmd_size,
806 		    min(cmd_size, page_length + 5));
807 
808 		return;
809 	}
810 
811 	/*
812 	 * EVPD handling
813 	 */
814 
815 	switch (cdbp[2]) {
816 	case 0x00:
817 		page_length = 3;
818 
819 		bzero(p, page_length + 4);
820 
821 		p[0] = 0;
822 		p[3] = page_length;	/* we support 3 pages, 0, 0x83, 0x86 */
823 		p[5] = 0x83;
824 		p[6] = 0x86;
825 
826 		break;
827 
828 	case 0x83:
829 
830 		page_length = stmf_scsilib_prepare_vpd_page83(task, p,
831 		    bsize, 0, STMF_VPD_LU_ID|STMF_VPD_TARGET_ID|
832 		    STMF_VPD_TP_GROUP|STMF_VPD_RELATIVE_TP_ID) - 4;
833 		break;
834 
835 	case 0x86:
836 		page_length = 0x3c;
837 
838 		bzero(p, page_length + 4);
839 
840 		p[0] = 0;
841 		p[1] = 0x86;		/* Page 86 response */
842 		p[3] = page_length;
843 
844 		/*
845 		 * Bits 0, 1, and 2 will need to be updated
846 		 * to reflect the queue tag handling if/when
847 		 * that is implemented.  For now, we're going
848 		 * to claim support only for Simple TA.
849 		 */
850 		p[5] = 1;
851 
852 		break;
853 
854 	default:
855 		stmf_scsilib_send_status(task, STATUS_CHECK,
856 		    STMF_SAA_INVALID_FIELD_IN_CDB);
857 		return;
858 	}
859 
860 	sbd_handle_short_read_transfers(task, initial_dbuf, p, cmd_size,
861 	    min(cmd_size, page_length + 4));
862 }
863 
864 stmf_status_t
865 sbd_task_alloc(struct scsi_task *task)
866 {
867 	if ((task->task_lu_private =
868 	    kmem_alloc(sizeof (sbd_cmd_t), KM_NOSLEEP)) != NULL) {
869 		sbd_cmd_t *scmd = (sbd_cmd_t *)task->task_lu_private;
870 		scmd->flags = 0;
871 		return (STMF_SUCCESS);
872 	}
873 	return (STMF_ALLOC_FAILURE);
874 }
875 
876 void
877 sbd_remove_it_handle(sbd_lu_t *slu, sbd_it_data_t *it)
878 {
879 	sbd_it_data_t **ppit;
880 
881 	mutex_enter(&slu->sl_it_list_lock);
882 	for (ppit = &slu->sl_it_list; *ppit != NULL;
883 					ppit = &((*ppit)->sbd_it_next)) {
884 		if ((*ppit) == it) {
885 			*ppit = it->sbd_it_next;
886 			break;
887 		}
888 	}
889 	mutex_exit(&slu->sl_it_list_lock);
890 	kmem_free(it, sizeof (*it));
891 }
892 
893 void
894 sbd_check_and_clear_scsi2_reservation(sbd_lu_t *slu, sbd_it_data_t *it)
895 {
896 	mutex_enter(&slu->sl_it_list_lock);
897 	if ((slu->sl_flags & SBD_LU_HAS_SCSI2_RESERVATION) == 0) {
898 		/* If we dont have any reservations, just get out. */
899 		mutex_exit(&slu->sl_it_list_lock);
900 		return;
901 	}
902 
903 	if (it == NULL) {
904 		/* Find the I_T nexus which is holding the reservation. */
905 		for (it = slu->sl_it_list; it != NULL; it = it->sbd_it_next) {
906 			if (it->sbd_it_flags & SBD_IT_HAS_SCSI2_RESERVATION) {
907 				ASSERT(it->sbd_it_session_id ==
908 					slu->sl_rs_owner_session_id);
909 				break;
910 			}
911 		}
912 		ASSERT(it != NULL);
913 	} else {
914 		/*
915 		 * We were passed an I_T nexus. If this nexus does not hold
916 		 * the reservation, do nothing. This is why this function is
917 		 * called "check_and_clear".
918 		 */
919 		if ((it->sbd_it_flags & SBD_IT_HAS_SCSI2_RESERVATION) == 0) {
920 			mutex_exit(&slu->sl_it_list_lock);
921 			return;
922 		}
923 	}
924 	it->sbd_it_flags &= ~SBD_IT_HAS_SCSI2_RESERVATION;
925 	slu->sl_flags &= ~SBD_IT_HAS_SCSI2_RESERVATION;
926 	mutex_exit(&slu->sl_it_list_lock);
927 }
928 
929 /*
930  * returns non-zero, if this command can be allowed to run even if the
931  * lu has been reserved by another initiator.
932  */
933 int
934 sbd_reserve_allow(scsi_task_t *task)
935 {
936 	uint8_t cdb0 = task->task_cdb[0];
937 	uint8_t cdb1 = task->task_cdb[1];
938 
939 	if ((cdb0 == SCMD_INQUIRY) || (cdb0 == SCMD_READ_CAPACITY) ||
940 	    ((cdb0 == SCMD_SVC_ACTION_IN_G4) &&
941 	    (cdb1 == SSVC_ACTION_READ_CAPACITY_G4))) {
942 		return (1);
943 	}
944 	return (0);
945 }
946 
947 void
948 sbd_new_task(struct scsi_task *task, struct stmf_data_buf *initial_dbuf)
949 {
950 	sbd_store_t *sst = (sbd_store_t *)task->task_lu->lu_provider_private;
951 	sbd_lu_t *slu = (sbd_lu_t *)sst->sst_sbd_private;
952 	sbd_it_data_t *it;
953 	uint8_t cdb0, cdb1;
954 
955 	if ((it = task->task_lu_itl_handle) == NULL) {
956 		mutex_enter(&slu->sl_it_list_lock);
957 		for (it = slu->sl_it_list; it != NULL; it = it->sbd_it_next) {
958 			if (it->sbd_it_session_id ==
959 			    task->task_session->ss_session_id) {
960 				mutex_exit(&slu->sl_it_list_lock);
961 				stmf_scsilib_send_status(task, STATUS_BUSY, 0);
962 				return;
963 			}
964 		}
965 		it = (sbd_it_data_t *)kmem_zalloc(sizeof (*it), KM_NOSLEEP);
966 		if (it == NULL) {
967 			mutex_exit(&slu->sl_it_list_lock);
968 			stmf_scsilib_send_status(task, STATUS_BUSY, 0);
969 			return;
970 		}
971 		it->sbd_it_session_id = task->task_session->ss_session_id;
972 		bcopy(task->task_lun_no, it->sbd_it_lun, 8);
973 		it->sbd_it_next = slu->sl_it_list;
974 		slu->sl_it_list = it;
975 		mutex_exit(&slu->sl_it_list_lock);
976 		if (stmf_register_itl_handle(task->task_lu, task->task_lun_no,
977 		    task->task_session, it->sbd_it_session_id, it)
978 		    != STMF_SUCCESS) {
979 			sbd_remove_it_handle(slu, it);
980 			stmf_scsilib_send_status(task, STATUS_BUSY, 0);
981 			return;
982 		}
983 		task->task_lu_itl_handle = it;
984 		it->sbd_it_ua_conditions = SBD_UA_POR;
985 	}
986 
987 	if (task->task_mgmt_function) {
988 		stmf_scsilib_handle_task_mgmt(task);
989 		return;
990 	}
991 
992 	if ((slu->sl_flags & SBD_LU_HAS_SCSI2_RESERVATION) &&
993 	    ((it->sbd_it_flags & SBD_IT_HAS_SCSI2_RESERVATION) == 0)) {
994 		if (!sbd_reserve_allow(task)) {
995 			stmf_scsilib_send_status(task,
996 			    STATUS_RESERVATION_CONFLICT, 0);
997 			return;
998 		}
999 	}
1000 
1001 	if ((it->sbd_it_ua_conditions) && (task->task_cdb[0] != SCMD_INQUIRY)) {
1002 		uint32_t saa = 0;
1003 
1004 		mutex_enter(&slu->sl_it_list_lock);
1005 		if (it->sbd_it_ua_conditions & SBD_UA_POR) {
1006 			it->sbd_it_ua_conditions &= ~SBD_UA_POR;
1007 			saa = STMF_SAA_POR;
1008 		} else if (it->sbd_it_ua_conditions & SBD_UA_CAPACITY_CHANGED) {
1009 			it->sbd_it_ua_conditions &= ~SBD_UA_CAPACITY_CHANGED;
1010 			if ((task->task_cdb[0] == SCMD_READ_CAPACITY) ||
1011 			    ((task->task_cdb[0] == SCMD_SVC_ACTION_IN_G4) &&
1012 			    (task->task_cdb[1] ==
1013 			    SSVC_ACTION_READ_CAPACITY_G4))) {
1014 				saa = 0;
1015 			} else {
1016 				saa = STMF_SAA_CAPACITY_DATA_HAS_CHANGED;
1017 			}
1018 		} else {
1019 			it->sbd_it_ua_conditions = 0;
1020 			saa = 0;
1021 		}
1022 		mutex_exit(&slu->sl_it_list_lock);
1023 		if (saa) {
1024 			stmf_scsilib_send_status(task, STATUS_CHECK, saa);
1025 			return;
1026 		}
1027 	}
1028 
1029 
1030 	cdb0 = task->task_cdb[0] & 0x1F;
1031 
1032 	if ((cdb0 == SCMD_READ) || (cdb0 == SCMD_WRITE)) {
1033 		if (task->task_additional_flags & TASK_AF_PORT_LOAD_HIGH) {
1034 			stmf_scsilib_send_status(task, STATUS_QFULL, 0);
1035 			return;
1036 		}
1037 		if (cdb0 == SCMD_READ) {
1038 			sbd_handle_read(task, initial_dbuf);
1039 			return;
1040 		}
1041 		sbd_handle_write(task, initial_dbuf);
1042 		return;
1043 	}
1044 
1045 	cdb0 = task->task_cdb[0];
1046 	cdb1 = task->task_cdb[1];
1047 
1048 	if (cdb0 == SCMD_TEST_UNIT_READY) {	/* Test unit ready */
1049 		task->task_cmd_xfer_length = 0;
1050 		stmf_scsilib_send_status(task, STATUS_GOOD, 0);
1051 		return;
1052 	}
1053 
1054 	if (cdb0 == SCMD_READ_CAPACITY) {		/* Read Capacity */
1055 		sbd_handle_read_capacity(task, initial_dbuf);
1056 		return;
1057 	}
1058 
1059 	if (cdb0 == SCMD_INQUIRY) {		/* Inquiry */
1060 		uint8_t *p;
1061 
1062 		p = (uint8_t *)kmem_zalloc(512, KM_SLEEP);
1063 		sbd_handle_inquiry(task, initial_dbuf, p, 512);
1064 		kmem_free(p, 512);
1065 		return;
1066 	}
1067 
1068 	if (cdb0 == SCMD_SVC_ACTION_IN_G4) { 	/* Read Capacity or read long */
1069 		if (cdb1 == SSVC_ACTION_READ_CAPACITY_G4) {
1070 			sbd_handle_read_capacity(task, initial_dbuf);
1071 			return;
1072 		/*
1073 		 * } else if (cdb1 == SSVC_ACTION_READ_LONG_G4) {
1074 		 * 	sbd_handle_read(task, initial_dbuf);
1075 		 * 	return;
1076 		 */
1077 		}
1078 	}
1079 
1080 	/*
1081 	 * if (cdb0 == SCMD_SVC_ACTION_OUT_G4) {
1082 	 *	if (cdb1 == SSVC_ACTION_WRITE_LONG_G4) {
1083 	 *		 sbd_handle_write(task, initial_dbuf);
1084 	 * 		return;
1085 	 *	}
1086 	 * }
1087 	 */
1088 
1089 	if (cdb0 == SCMD_START_STOP) {			/* Start stop */
1090 		/* XXX Implement power management */
1091 		task->task_cmd_xfer_length = 0;
1092 		stmf_scsilib_send_status(task, STATUS_GOOD, 0);
1093 		return;
1094 	}
1095 #if 0
1096 	/* XXX Remove #if 0 above */
1097 	if ((cdb0 == SCMD_MODE_SELECT) || (cdb0 == SCMD_MODE_SELECT_G1)) {
1098 		sbd_handle_mode_select(task, initial_dbuf);
1099 		return;
1100 	}
1101 #endif
1102 	if ((cdb0 == SCMD_MODE_SENSE) || (cdb0 == SCMD_MODE_SENSE_G1)) {
1103 		sbd_handle_mode_sense(task, initial_dbuf);
1104 		return;
1105 	}
1106 
1107 	if (cdb0 == SCMD_REQUEST_SENSE) {
1108 		/*
1109 		 * LU provider needs to store unretrieved sense data
1110 		 * (e.g. after power-on/reset).  For now, we'll just
1111 		 * return good status with no sense.
1112 		 */
1113 
1114 		if ((cdb1 & ~1) || task->task_cdb[2] || task->task_cdb[3] ||
1115 		    task->task_cdb[5]) {
1116 			stmf_scsilib_send_status(task, STATUS_CHECK,
1117 			    STMF_SAA_INVALID_FIELD_IN_CDB);
1118 		} else {
1119 			stmf_scsilib_send_status(task, STATUS_GOOD, 0);
1120 		}
1121 
1122 		return;
1123 	}
1124 
1125 	if (cdb0 == SCMD_VERIFY) {
1126 		/*
1127 		 * Something more likely needs to be done here.
1128 		 */
1129 		task->task_cmd_xfer_length = 0;
1130 		stmf_scsilib_send_status(task, STATUS_GOOD, 0);
1131 		return;
1132 	}
1133 
1134 	if ((cdb0 == SCMD_RESERVE) || (cdb0 == SCMD_RELEASE)) {
1135 		if (cdb1) {
1136 			stmf_scsilib_send_status(task, STATUS_CHECK,
1137 				STMF_SAA_INVALID_FIELD_IN_CDB);
1138 			return;
1139 		}
1140 		mutex_enter(&slu->sl_it_list_lock);
1141 		if (slu->sl_flags & SBD_LU_HAS_SCSI2_RESERVATION) {
1142 			if (it->sbd_it_session_id !=
1143 			    slu->sl_rs_owner_session_id) {
1144 				/*
1145 				 * This can only happen if things were in
1146 				 * flux.
1147 				 */
1148 				mutex_exit(&slu->sl_it_list_lock);
1149 				stmf_scsilib_send_status(task,
1150 						STATUS_RESERVATION_CONFLICT, 0);
1151 				return;
1152 			}
1153 		}
1154 	}
1155 
1156 	if (cdb0 == SCMD_RELEASE) {
1157 		slu->sl_flags &= ~SBD_LU_HAS_SCSI2_RESERVATION;
1158 		it->sbd_it_flags &= ~SBD_IT_HAS_SCSI2_RESERVATION;
1159 		mutex_exit(&slu->sl_it_list_lock);
1160 		stmf_scsilib_send_status(task, STATUS_GOOD, 0);
1161 		return;
1162 	}
1163 	if (cdb0 == SCMD_RESERVE) {
1164 		slu->sl_flags |= SBD_LU_HAS_SCSI2_RESERVATION;
1165 		it->sbd_it_flags |= SBD_IT_HAS_SCSI2_RESERVATION;
1166 		slu->sl_rs_owner_session_id = it->sbd_it_session_id;
1167 		mutex_exit(&slu->sl_it_list_lock);
1168 		stmf_scsilib_send_status(task, STATUS_GOOD, 0);
1169 		return;
1170 	}
1171 
1172 	if (cdb0 == SCMD_SYNCHRONIZE_CACHE ||
1173 	    cdb0 == SCMD_SYNCHRONIZE_CACHE_G4) {
1174 		sbd_handle_sync_cache(task, initial_dbuf);
1175 		return;
1176 	}
1177 
1178 	/* Report Target Port Groups */
1179 	if ((cdb0 == SCMD_MAINTENANCE_IN) &&
1180 	    ((cdb1 & 0x1F) == 0x0A)) {
1181 		stmf_scsilib_handle_report_tpgs(task, initial_dbuf);
1182 		return;
1183 	}
1184 
1185 	stmf_scsilib_send_status(task, STATUS_CHECK, STMF_SAA_INVALID_OPCODE);
1186 }
1187 
1188 void
1189 sbd_dbuf_xfer_done(struct scsi_task *task, struct stmf_data_buf *dbuf)
1190 {
1191 	sbd_cmd_t *scmd = NULL;
1192 
1193 	scmd = (sbd_cmd_t *)task->task_lu_private;
1194 	if ((scmd == NULL) || ((scmd->flags & SBD_SCSI_CMD_ACTIVE) == 0))
1195 		return;
1196 
1197 	if (scmd->cmd_type == SBD_CMD_SCSI_READ) {
1198 		sbd_handle_read_xfer_completion(task, scmd, dbuf);
1199 	} else if (scmd->cmd_type == SBD_CMD_SCSI_WRITE) {
1200 		sbd_handle_write_xfer_completion(task, scmd, dbuf, 1);
1201 	} else if (scmd->cmd_type == SBD_CMD_SMALL_READ) {
1202 		sbd_handle_short_read_xfer_completion(task, scmd, dbuf);
1203 	} else {
1204 		cmn_err(CE_PANIC, "Unknown cmd type, task = %p", (void *)task);
1205 	}
1206 }
1207 
1208 /* ARGSUSED */
1209 void
1210 sbd_send_status_done(struct scsi_task *task)
1211 {
1212 	cmn_err(CE_PANIC,
1213 		"sbd_send_status_done: this should not have been called");
1214 }
1215 
1216 void
1217 sbd_task_free(struct scsi_task *task)
1218 {
1219 	if (task->task_lu_private) {
1220 		sbd_cmd_t *scmd = (sbd_cmd_t *)task->task_lu_private;
1221 		if (scmd->flags & SBD_SCSI_CMD_ACTIVE) {
1222 			cmn_err(CE_PANIC, "cmd is active, task = %p",
1223 			    (void *)task);
1224 		}
1225 		kmem_free(scmd, sizeof (sbd_cmd_t));
1226 	}
1227 }
1228 
1229 /*
1230  * Aborts are synchronus w.r.t. I/O AND
1231  * All the I/O which SBD does is synchronous AND
1232  * Everything within a task is single threaded.
1233  *   IT MEANS
1234  * If this function is called, we are doing nothing with this task
1235  * inside of sbd module.
1236  */
1237 /* ARGSUSED */
1238 stmf_status_t
1239 sbd_abort(struct stmf_lu *lu, int abort_cmd, void *arg, uint32_t flags)
1240 {
1241 	sbd_store_t *sst = (sbd_store_t *)lu->lu_provider_private;
1242 	sbd_lu_t *slu = (sbd_lu_t *)sst->sst_sbd_private;
1243 	scsi_task_t *task;
1244 
1245 	if (abort_cmd == STMF_LU_RESET_STATE) {
1246 		return (sbd_lu_reset_state(lu));
1247 	}
1248 
1249 	if (abort_cmd == STMF_LU_ITL_HANDLE_REMOVED) {
1250 		sbd_check_and_clear_scsi2_reservation(slu,
1251 					(sbd_it_data_t *)arg);
1252 		sbd_remove_it_handle(slu, (sbd_it_data_t *)arg);
1253 		return (STMF_SUCCESS);
1254 	}
1255 
1256 	ASSERT(abort_cmd == STMF_LU_ABORT_TASK);
1257 	task = (scsi_task_t *)arg;
1258 	if (task->task_lu_private) {
1259 		sbd_cmd_t *scmd = (sbd_cmd_t *)task->task_lu_private;
1260 
1261 		if (scmd->flags & SBD_SCSI_CMD_ACTIVE) {
1262 			scmd->flags &= ~SBD_SCSI_CMD_ACTIVE;
1263 			return (STMF_ABORT_SUCCESS);
1264 		}
1265 	}
1266 
1267 	return (STMF_NOT_FOUND);
1268 }
1269 
1270 /* ARGSUSED */
1271 void
1272 sbd_ctl(struct stmf_lu *lu, int cmd, void *arg)
1273 {
1274 	sbd_store_t *sst = (sbd_store_t *)lu->lu_provider_private;
1275 	sbd_lu_t *slu = (sbd_lu_t *)sst->sst_sbd_private;
1276 	stmf_change_status_t st;
1277 
1278 	ASSERT((cmd == STMF_CMD_LU_ONLINE) ||
1279 		(cmd == STMF_CMD_LU_OFFLINE) ||
1280 		(cmd == STMF_ACK_LU_ONLINE_COMPLETE) ||
1281 		(cmd == STMF_ACK_LU_OFFLINE_COMPLETE));
1282 
1283 	st.st_completion_status = STMF_SUCCESS;
1284 	st.st_additional_info = NULL;
1285 
1286 	switch (cmd) {
1287 	case STMF_CMD_LU_ONLINE:
1288 		if (slu->sl_state == STMF_STATE_ONLINE)
1289 			st.st_completion_status = STMF_ALREADY;
1290 		else if (slu->sl_state != STMF_STATE_OFFLINE)
1291 			st.st_completion_status = STMF_FAILURE;
1292 		if (st.st_completion_status == STMF_SUCCESS) {
1293 			slu->sl_state = STMF_STATE_ONLINING;
1294 			slu->sl_state_not_acked = 1;
1295 			st.st_completion_status = sst->sst_online(sst);
1296 			if (st.st_completion_status != STMF_SUCCESS) {
1297 				slu->sl_state = STMF_STATE_OFFLINE;
1298 				slu->sl_state_not_acked = 0;
1299 			} else {
1300 				slu->sl_state = STMF_STATE_ONLINE;
1301 			}
1302 		}
1303 		(void) stmf_ctl(STMF_CMD_LU_ONLINE_COMPLETE, lu, &st);
1304 		break;
1305 
1306 	case STMF_CMD_LU_OFFLINE:
1307 		if (slu->sl_state == STMF_STATE_OFFLINE)
1308 			st.st_completion_status = STMF_ALREADY;
1309 		else if (slu->sl_state != STMF_STATE_ONLINE)
1310 			st.st_completion_status = STMF_FAILURE;
1311 		if (st.st_completion_status == STMF_SUCCESS) {
1312 			slu->sl_state = STMF_STATE_OFFLINING;
1313 			slu->sl_state_not_acked = 1;
1314 			st.st_completion_status = sst->sst_offline(sst);
1315 			if (st.st_completion_status != STMF_SUCCESS) {
1316 				slu->sl_state = STMF_STATE_ONLINE;
1317 				slu->sl_state_not_acked = 0;
1318 			} else {
1319 				slu->sl_state = STMF_STATE_OFFLINE;
1320 			}
1321 		}
1322 		(void) stmf_ctl(STMF_CMD_LU_OFFLINE_COMPLETE, lu, &st);
1323 		break;
1324 
1325 	case STMF_ACK_LU_ONLINE_COMPLETE:
1326 		/* Fallthrough */
1327 	case STMF_ACK_LU_OFFLINE_COMPLETE:
1328 		slu->sl_state_not_acked = 0;
1329 		break;
1330 
1331 	}
1332 }
1333 
1334 /* ARGSUSED */
1335 stmf_status_t
1336 sbd_info(uint32_t cmd, stmf_lu_t *lu, void *arg, uint8_t *buf,
1337 						uint32_t *bufsizep)
1338 {
1339 	return (STMF_NOT_SUPPORTED);
1340 }
1341 
1342 stmf_status_t
1343 sbd_lu_reset_state(stmf_lu_t *lu)
1344 {
1345 	sbd_store_t *sst = (sbd_store_t *)lu->lu_provider_private;
1346 	sbd_lu_t *slu = (sbd_lu_t *)sst->sst_sbd_private;
1347 
1348 	sbd_check_and_clear_scsi2_reservation(slu, NULL);
1349 	if (stmf_deregister_all_lu_itl_handles(lu) != STMF_SUCCESS) {
1350 		return (STMF_FAILURE);
1351 	}
1352 	return (STMF_SUCCESS);
1353 }
1354 
1355 /* ARGSUSED */
1356 static void
1357 sbd_handle_sync_cache(struct scsi_task *task,
1358     struct stmf_data_buf *initial_dbuf)
1359 {
1360 	sbd_store_t	*sst =
1361 	    (sbd_store_t *)task->task_lu->lu_provider_private;
1362 	sbd_lu_t	*slu = (sbd_lu_t *)sst->sst_sbd_private;
1363 	uint64_t	lba, laddr;
1364 	uint32_t	len;
1365 	int		is_g4 = 0;
1366 	int		immed;
1367 
1368 	/*
1369 	 * Determine if this is a 10 or 16 byte CDB
1370 	 */
1371 
1372 	if (task->task_cdb[0] == SCMD_SYNCHRONIZE_CACHE_G4)
1373 		is_g4 = 1;
1374 
1375 	/*
1376 	 * Determine other requested parameters
1377 	 *
1378 	 * We don't have a non-volatile cache, so don't care about SYNC_NV.
1379 	 * Do not support the IMMED bit.
1380 	 */
1381 
1382 	immed = (task->task_cdb[1] & 0x02);
1383 
1384 	if (immed) {
1385 		stmf_scsilib_send_status(task, STATUS_CHECK,
1386 		    STMF_SAA_INVALID_FIELD_IN_CDB);
1387 		return;
1388 	}
1389 
1390 	/*
1391 	 * Check to be sure we're not being asked to sync an LBA
1392 	 * that is out of range.  While checking, verify reserved fields.
1393 	 */
1394 
1395 	if (is_g4) {
1396 		if ((task->task_cdb[1] & 0xf9) || task->task_cdb[14] ||
1397 		    task->task_cdb[15]) {
1398 			stmf_scsilib_send_status(task, STATUS_CHECK,
1399 			    STMF_SAA_INVALID_FIELD_IN_CDB);
1400 			return;
1401 		}
1402 
1403 		lba = READ_SCSI64(&task->task_cdb[2], uint64_t);
1404 		len = READ_SCSI32(&task->task_cdb[10], uint32_t);
1405 	} else {
1406 		if ((task->task_cdb[1] & 0xf9) || task->task_cdb[6] ||
1407 		    task->task_cdb[9]) {
1408 			stmf_scsilib_send_status(task, STATUS_CHECK,
1409 			    STMF_SAA_INVALID_FIELD_IN_CDB);
1410 			return;
1411 		}
1412 
1413 		lba = READ_SCSI32(&task->task_cdb[2], uint64_t);
1414 		len = READ_SCSI16(&task->task_cdb[7], uint32_t);
1415 	}
1416 
1417 	laddr = lba << slu->sl_shift_count;
1418 	len <<= slu->sl_shift_count;
1419 
1420 	if ((laddr + (uint64_t)len) > slu->sl_sli->sli_lu_data_size) {
1421 		stmf_scsilib_send_status(task, STATUS_CHECK,
1422 		    STMF_SAA_LBA_OUT_OF_RANGE);
1423 		return;
1424 	}
1425 
1426 	if (sst->sst_data_flush(sst) != STMF_SUCCESS) {
1427 		stmf_scsilib_send_status(task, STATUS_CHECK,
1428 		    STMF_SAA_WRITE_ERROR);
1429 		return;
1430 	}
1431 
1432 	stmf_scsilib_send_status(task, STATUS_GOOD, 0);
1433 }
1434