1 /* 2 * This file and its contents are supplied under the terms of the 3 * Common Development and Distribution License ("CDDL"), version 1.0. 4 * You may only use this file in accordance with the terms of version 5 * 1.0 of the CDDL. 6 * 7 * A full copy of the text of the CDDL should have accompanied this 8 * source. A copy of the CDDL is also available via the Internet at 9 * http://www.illumos.org/license/CDDL. 10 */ 11 12 /* 13 * Copyright 2016 Nexenta Systems, Inc. All rights reserved. 14 */ 15 16 /* 17 * blkdev driver for NVMe compliant storage devices 18 * 19 * This driver was written to conform to version 1.0e of the NVMe specification. 20 * It may work with newer versions, but that is completely untested and disabled 21 * by default. 22 * 23 * The driver has only been tested on x86 systems and will not work on big- 24 * endian systems without changes to the code accessing registers and data 25 * structures used by the hardware. 26 * 27 * 28 * Interrupt Usage: 29 * 30 * The driver will use a FIXED interrupt while configuring the device as the 31 * specification requires. Later in the attach process it will switch to MSI-X 32 * or MSI if supported. The driver wants to have one interrupt vector per CPU, 33 * but it will work correctly if less are available. Interrupts can be shared 34 * by queues, the interrupt handler will iterate through the I/O queue array by 35 * steps of n_intr_cnt. Usually only the admin queue will share an interrupt 36 * with one I/O queue. The interrupt handler will retrieve completed commands 37 * from all queues sharing an interrupt vector and will post them to a taskq 38 * for completion processing. 39 * 40 * 41 * Command Processing: 42 * 43 * NVMe devices can have up to 65536 I/O queue pairs, with each queue holding up 44 * to 65536 I/O commands. The driver will configure one I/O queue pair per 45 * available interrupt vector, with the queue length usually much smaller than 46 * the maximum of 65536. If the hardware doesn't provide enough queues, fewer 47 * interrupt vectors will be used. 48 * 49 * Additionally the hardware provides a single special admin queue pair that can 50 * hold up to 4096 admin commands. 51 * 52 * From the hardware perspective both queues of a queue pair are independent, 53 * but they share some driver state: the command array (holding pointers to 54 * commands currently being processed by the hardware) and the active command 55 * counter. Access to the submission side of a queue pair and the shared state 56 * is protected by nq_mutex. The completion side of a queue pair does not need 57 * that protection apart from its access to the shared state; it is called only 58 * in the interrupt handler which does not run concurrently for the same 59 * interrupt vector. 60 * 61 * When a command is submitted to a queue pair the active command counter is 62 * incremented and a pointer to the command is stored in the command array. The 63 * array index is used as command identifier (CID) in the submission queue 64 * entry. Some commands may take a very long time to complete, and if the queue 65 * wraps around in that time a submission may find the next array slot to still 66 * be used by a long-running command. In this case the array is sequentially 67 * searched for the next free slot. The length of the command array is the same 68 * as the configured queue length. 69 * 70 * 71 * Namespace Support: 72 * 73 * NVMe devices can have multiple namespaces, each being a independent data 74 * store. The driver supports multiple namespaces and creates a blkdev interface 75 * for each namespace found. Namespaces can have various attributes to support 76 * thin provisioning, extended LBAs, and protection information. This driver 77 * does not support any of this and ignores namespaces that have these 78 * attributes. 79 * 80 * 81 * Blkdev Interface: 82 * 83 * This driver uses blkdev to do all the heavy lifting involved with presenting 84 * a disk device to the system. As a result, the processing of I/O requests is 85 * relatively simple as blkdev takes care of partitioning, boundary checks, DMA 86 * setup, and splitting of transfers into manageable chunks. 87 * 88 * I/O requests coming in from blkdev are turned into NVM commands and posted to 89 * an I/O queue. The queue is selected by taking the CPU id modulo the number of 90 * queues. There is currently no timeout handling of I/O commands. 91 * 92 * Blkdev also supports querying device/media information and generating a 93 * devid. The driver reports the best block size as determined by the namespace 94 * format back to blkdev as physical block size to support partition and block 95 * alignment. The devid is composed using the device vendor ID, model number, 96 * serial number, and the namespace ID. 97 * 98 * 99 * Error Handling: 100 * 101 * Error handling is currently limited to detecting fatal hardware errors, 102 * either by asynchronous events, or synchronously through command status or 103 * admin command timeouts. In case of severe errors the device is fenced off, 104 * all further requests will return EIO. FMA is then called to fault the device. 105 * 106 * The hardware has a limit for outstanding asynchronous event requests. Before 107 * this limit is known the driver assumes it is at least 1 and posts a single 108 * asynchronous request. Later when the limit is known more asynchronous event 109 * requests are posted to allow quicker reception of error information. When an 110 * asynchronous event is posted by the hardware the driver will parse the error 111 * status fields and log information or fault the device, depending on the 112 * severity of the asynchronous event. The asynchronous event request is then 113 * reused and posted to the admin queue again. 114 * 115 * On command completion the command status is checked for errors. In case of 116 * errors indicating a driver bug the driver panics. Almost all other error 117 * status values just cause EIO to be returned. 118 * 119 * Command timeouts are currently detected for all admin commands except 120 * asynchronous event requests. If a command times out and the hardware appears 121 * to be healthy the driver attempts to abort the command. If this fails the 122 * driver assumes the device to be dead, fences it off, and calls FMA to retire 123 * it. In general admin commands are issued at attach time only. No timeout 124 * handling of normal I/O commands is presently done. 125 * 126 * In some cases it may be possible that the ABORT command times out, too. In 127 * that case the device is also declared dead and fenced off. 128 * 129 * 130 * Quiesce / Fast Reboot: 131 * 132 * The driver currently does not support fast reboot. A quiesce(9E) entry point 133 * is still provided which is used to send a shutdown notification to the 134 * device. 135 * 136 * 137 * Driver Configuration: 138 * 139 * The following driver properties can be changed to control some aspects of the 140 * drivers operation: 141 * - strict-version: can be set to 0 to allow devices conforming to newer 142 * versions to be used 143 * - ignore-unknown-vendor-status: can be set to 1 to not handle any vendor 144 * specific command status as a fatal error leading device faulting 145 * - admin-queue-len: the maximum length of the admin queue (16-4096) 146 * - io-queue-len: the maximum length of the I/O queues (16-65536) 147 * - async-event-limit: the maximum number of asynchronous event requests to be 148 * posted by the driver 149 * 150 * 151 * TODO: 152 * - figure out sane default for I/O queue depth reported to blkdev 153 * - polled I/O support to support kernel core dumping 154 * - FMA handling of media errors 155 * - support for the Volatile Write Cache 156 * - support for devices supporting very large I/O requests using chained PRPs 157 * - support for querying log pages from user space 158 * - support for configuring hardware parameters like interrupt coalescing 159 * - support for media formatting and hard partitioning into namespaces 160 * - support for big-endian systems 161 * - support for fast reboot 162 */ 163 164 #include <sys/byteorder.h> 165 #ifdef _BIG_ENDIAN 166 #error nvme driver needs porting for big-endian platforms 167 #endif 168 169 #include <sys/modctl.h> 170 #include <sys/conf.h> 171 #include <sys/devops.h> 172 #include <sys/ddi.h> 173 #include <sys/sunddi.h> 174 #include <sys/bitmap.h> 175 #include <sys/sysmacros.h> 176 #include <sys/param.h> 177 #include <sys/varargs.h> 178 #include <sys/cpuvar.h> 179 #include <sys/disp.h> 180 #include <sys/blkdev.h> 181 #include <sys/atomic.h> 182 #include <sys/archsystm.h> 183 #include <sys/sata/sata_hba.h> 184 185 #include "nvme_reg.h" 186 #include "nvme_var.h" 187 188 189 /* NVMe spec version supported */ 190 static const int nvme_version_major = 1; 191 static const int nvme_version_minor = 0; 192 193 static int nvme_attach(dev_info_t *, ddi_attach_cmd_t); 194 static int nvme_detach(dev_info_t *, ddi_detach_cmd_t); 195 static int nvme_quiesce(dev_info_t *); 196 static int nvme_fm_errcb(dev_info_t *, ddi_fm_error_t *, const void *); 197 static int nvme_setup_interrupts(nvme_t *, int, int); 198 static void nvme_release_interrupts(nvme_t *); 199 static uint_t nvme_intr(caddr_t, caddr_t); 200 201 static void nvme_shutdown(nvme_t *, int, boolean_t); 202 static boolean_t nvme_reset(nvme_t *, boolean_t); 203 static int nvme_init(nvme_t *); 204 static nvme_cmd_t *nvme_alloc_cmd(nvme_t *, int); 205 static void nvme_free_cmd(nvme_cmd_t *); 206 static nvme_cmd_t *nvme_create_nvm_cmd(nvme_namespace_t *, uint8_t, 207 bd_xfer_t *); 208 static int nvme_admin_cmd(nvme_cmd_t *, int); 209 static int nvme_submit_cmd(nvme_qpair_t *, nvme_cmd_t *); 210 static nvme_cmd_t *nvme_retrieve_cmd(nvme_t *, nvme_qpair_t *); 211 static boolean_t nvme_wait_cmd(nvme_cmd_t *, uint_t); 212 static void nvme_wakeup_cmd(void *); 213 static void nvme_async_event_task(void *); 214 215 static int nvme_check_unknown_cmd_status(nvme_cmd_t *); 216 static int nvme_check_vendor_cmd_status(nvme_cmd_t *); 217 static int nvme_check_integrity_cmd_status(nvme_cmd_t *); 218 static int nvme_check_specific_cmd_status(nvme_cmd_t *); 219 static int nvme_check_generic_cmd_status(nvme_cmd_t *); 220 static inline int nvme_check_cmd_status(nvme_cmd_t *); 221 222 static void nvme_abort_cmd(nvme_cmd_t *); 223 static int nvme_async_event(nvme_t *); 224 static void *nvme_get_logpage(nvme_t *, uint8_t, ...); 225 static void *nvme_identify(nvme_t *, uint32_t); 226 static int nvme_set_nqueues(nvme_t *, uint16_t); 227 228 static void nvme_free_dma(nvme_dma_t *); 229 static int nvme_zalloc_dma(nvme_t *, size_t, uint_t, ddi_dma_attr_t *, 230 nvme_dma_t **); 231 static int nvme_zalloc_queue_dma(nvme_t *, uint32_t, uint16_t, uint_t, 232 nvme_dma_t **); 233 static void nvme_free_qpair(nvme_qpair_t *); 234 static int nvme_alloc_qpair(nvme_t *, uint32_t, nvme_qpair_t **, int); 235 static int nvme_create_io_qpair(nvme_t *, nvme_qpair_t *, uint16_t); 236 237 static inline void nvme_put64(nvme_t *, uintptr_t, uint64_t); 238 static inline void nvme_put32(nvme_t *, uintptr_t, uint32_t); 239 static inline uint64_t nvme_get64(nvme_t *, uintptr_t); 240 static inline uint32_t nvme_get32(nvme_t *, uintptr_t); 241 242 static boolean_t nvme_check_regs_hdl(nvme_t *); 243 static boolean_t nvme_check_dma_hdl(nvme_dma_t *); 244 245 static int nvme_fill_prp(nvme_cmd_t *, bd_xfer_t *); 246 247 static void nvme_bd_xfer_done(void *); 248 static void nvme_bd_driveinfo(void *, bd_drive_t *); 249 static int nvme_bd_mediainfo(void *, bd_media_t *); 250 static int nvme_bd_cmd(nvme_namespace_t *, bd_xfer_t *, uint8_t); 251 static int nvme_bd_read(void *, bd_xfer_t *); 252 static int nvme_bd_write(void *, bd_xfer_t *); 253 static int nvme_bd_sync(void *, bd_xfer_t *); 254 static int nvme_bd_devid(void *, dev_info_t *, ddi_devid_t *); 255 256 static void nvme_prepare_devid(nvme_t *, uint32_t); 257 258 static void *nvme_state; 259 static kmem_cache_t *nvme_cmd_cache; 260 261 /* 262 * DMA attributes for queue DMA memory 263 * 264 * Queue DMA memory must be page aligned. The maximum length of a queue is 265 * 65536 entries, and an entry can be 64 bytes long. 266 */ 267 static ddi_dma_attr_t nvme_queue_dma_attr = { 268 .dma_attr_version = DMA_ATTR_V0, 269 .dma_attr_addr_lo = 0, 270 .dma_attr_addr_hi = 0xffffffffffffffffULL, 271 .dma_attr_count_max = (UINT16_MAX + 1) * sizeof (nvme_sqe_t), 272 .dma_attr_align = 0x1000, 273 .dma_attr_burstsizes = 0x7ff, 274 .dma_attr_minxfer = 0x1000, 275 .dma_attr_maxxfer = (UINT16_MAX + 1) * sizeof (nvme_sqe_t), 276 .dma_attr_seg = 0xffffffffffffffffULL, 277 .dma_attr_sgllen = 1, 278 .dma_attr_granular = 1, 279 .dma_attr_flags = 0, 280 }; 281 282 /* 283 * DMA attributes for transfers using Physical Region Page (PRP) entries 284 * 285 * A PRP entry describes one page of DMA memory using the page size specified 286 * in the controller configuration's memory page size register (CC.MPS). It uses 287 * a 64bit base address aligned to this page size. There is no limitation on 288 * chaining PRPs together for arbitrarily large DMA transfers. 289 */ 290 static ddi_dma_attr_t nvme_prp_dma_attr = { 291 .dma_attr_version = DMA_ATTR_V0, 292 .dma_attr_addr_lo = 0, 293 .dma_attr_addr_hi = 0xffffffffffffffffULL, 294 .dma_attr_count_max = 0xfff, 295 .dma_attr_align = 0x1000, 296 .dma_attr_burstsizes = 0x7ff, 297 .dma_attr_minxfer = 0x1000, 298 .dma_attr_maxxfer = 0x1000, 299 .dma_attr_seg = 0xffffffffffffffffULL, 300 .dma_attr_sgllen = -1, 301 .dma_attr_granular = 1, 302 .dma_attr_flags = 0, 303 }; 304 305 /* 306 * DMA attributes for transfers using scatter/gather lists 307 * 308 * A SGL entry describes a chunk of DMA memory using a 64bit base address and a 309 * 32bit length field. SGL Segment and SGL Last Segment entries require the 310 * length to be a multiple of 16 bytes. 311 */ 312 static ddi_dma_attr_t nvme_sgl_dma_attr = { 313 .dma_attr_version = DMA_ATTR_V0, 314 .dma_attr_addr_lo = 0, 315 .dma_attr_addr_hi = 0xffffffffffffffffULL, 316 .dma_attr_count_max = 0xffffffffUL, 317 .dma_attr_align = 1, 318 .dma_attr_burstsizes = 0x7ff, 319 .dma_attr_minxfer = 0x10, 320 .dma_attr_maxxfer = 0xfffffffffULL, 321 .dma_attr_seg = 0xffffffffffffffffULL, 322 .dma_attr_sgllen = -1, 323 .dma_attr_granular = 0x10, 324 .dma_attr_flags = 0 325 }; 326 327 static ddi_device_acc_attr_t nvme_reg_acc_attr = { 328 .devacc_attr_version = DDI_DEVICE_ATTR_V0, 329 .devacc_attr_endian_flags = DDI_STRUCTURE_LE_ACC, 330 .devacc_attr_dataorder = DDI_STRICTORDER_ACC 331 }; 332 333 static struct dev_ops nvme_dev_ops = { 334 .devo_rev = DEVO_REV, 335 .devo_refcnt = 0, 336 .devo_getinfo = ddi_no_info, 337 .devo_identify = nulldev, 338 .devo_probe = nulldev, 339 .devo_attach = nvme_attach, 340 .devo_detach = nvme_detach, 341 .devo_reset = nodev, 342 .devo_cb_ops = NULL, 343 .devo_bus_ops = NULL, 344 .devo_power = NULL, 345 .devo_quiesce = nvme_quiesce, 346 }; 347 348 static struct modldrv nvme_modldrv = { 349 .drv_modops = &mod_driverops, 350 .drv_linkinfo = "NVMe v1.0e", 351 .drv_dev_ops = &nvme_dev_ops 352 }; 353 354 static struct modlinkage nvme_modlinkage = { 355 .ml_rev = MODREV_1, 356 .ml_linkage = { &nvme_modldrv, NULL } 357 }; 358 359 static bd_ops_t nvme_bd_ops = { 360 .o_version = BD_OPS_VERSION_0, 361 .o_drive_info = nvme_bd_driveinfo, 362 .o_media_info = nvme_bd_mediainfo, 363 .o_devid_init = nvme_bd_devid, 364 .o_sync_cache = nvme_bd_sync, 365 .o_read = nvme_bd_read, 366 .o_write = nvme_bd_write, 367 }; 368 369 int 370 _init(void) 371 { 372 int error; 373 374 error = ddi_soft_state_init(&nvme_state, sizeof (nvme_t), 1); 375 if (error != DDI_SUCCESS) 376 return (error); 377 378 nvme_cmd_cache = kmem_cache_create("nvme_cmd_cache", 379 sizeof (nvme_cmd_t), 64, NULL, NULL, NULL, NULL, NULL, 0); 380 381 bd_mod_init(&nvme_dev_ops); 382 383 error = mod_install(&nvme_modlinkage); 384 if (error != DDI_SUCCESS) { 385 ddi_soft_state_fini(&nvme_state); 386 bd_mod_fini(&nvme_dev_ops); 387 } 388 389 return (error); 390 } 391 392 int 393 _fini(void) 394 { 395 int error; 396 397 error = mod_remove(&nvme_modlinkage); 398 if (error == DDI_SUCCESS) { 399 ddi_soft_state_fini(&nvme_state); 400 kmem_cache_destroy(nvme_cmd_cache); 401 bd_mod_fini(&nvme_dev_ops); 402 } 403 404 return (error); 405 } 406 407 int 408 _info(struct modinfo *modinfop) 409 { 410 return (mod_info(&nvme_modlinkage, modinfop)); 411 } 412 413 static inline void 414 nvme_put64(nvme_t *nvme, uintptr_t reg, uint64_t val) 415 { 416 ASSERT(((uintptr_t)(nvme->n_regs + reg) & 0x7) == 0); 417 418 /*LINTED: E_BAD_PTR_CAST_ALIGN*/ 419 ddi_put64(nvme->n_regh, (uint64_t *)(nvme->n_regs + reg), val); 420 } 421 422 static inline void 423 nvme_put32(nvme_t *nvme, uintptr_t reg, uint32_t val) 424 { 425 ASSERT(((uintptr_t)(nvme->n_regs + reg) & 0x3) == 0); 426 427 /*LINTED: E_BAD_PTR_CAST_ALIGN*/ 428 ddi_put32(nvme->n_regh, (uint32_t *)(nvme->n_regs + reg), val); 429 } 430 431 static inline uint64_t 432 nvme_get64(nvme_t *nvme, uintptr_t reg) 433 { 434 uint64_t val; 435 436 ASSERT(((uintptr_t)(nvme->n_regs + reg) & 0x7) == 0); 437 438 /*LINTED: E_BAD_PTR_CAST_ALIGN*/ 439 val = ddi_get64(nvme->n_regh, (uint64_t *)(nvme->n_regs + reg)); 440 441 return (val); 442 } 443 444 static inline uint32_t 445 nvme_get32(nvme_t *nvme, uintptr_t reg) 446 { 447 uint32_t val; 448 449 ASSERT(((uintptr_t)(nvme->n_regs + reg) & 0x3) == 0); 450 451 /*LINTED: E_BAD_PTR_CAST_ALIGN*/ 452 val = ddi_get32(nvme->n_regh, (uint32_t *)(nvme->n_regs + reg)); 453 454 return (val); 455 } 456 457 static boolean_t 458 nvme_check_regs_hdl(nvme_t *nvme) 459 { 460 ddi_fm_error_t error; 461 462 ddi_fm_acc_err_get(nvme->n_regh, &error, DDI_FME_VERSION); 463 464 if (error.fme_status != DDI_FM_OK) 465 return (B_TRUE); 466 467 return (B_FALSE); 468 } 469 470 static boolean_t 471 nvme_check_dma_hdl(nvme_dma_t *dma) 472 { 473 ddi_fm_error_t error; 474 475 if (dma == NULL) 476 return (B_FALSE); 477 478 ddi_fm_dma_err_get(dma->nd_dmah, &error, DDI_FME_VERSION); 479 480 if (error.fme_status != DDI_FM_OK) 481 return (B_TRUE); 482 483 return (B_FALSE); 484 } 485 486 static void 487 nvme_free_dma(nvme_dma_t *dma) 488 { 489 if (dma->nd_dmah != NULL) 490 (void) ddi_dma_unbind_handle(dma->nd_dmah); 491 if (dma->nd_acch != NULL) 492 ddi_dma_mem_free(&dma->nd_acch); 493 if (dma->nd_dmah != NULL) 494 ddi_dma_free_handle(&dma->nd_dmah); 495 kmem_free(dma, sizeof (nvme_dma_t)); 496 } 497 498 static int 499 nvme_zalloc_dma(nvme_t *nvme, size_t len, uint_t flags, 500 ddi_dma_attr_t *dma_attr, nvme_dma_t **ret) 501 { 502 nvme_dma_t *dma = kmem_zalloc(sizeof (nvme_dma_t), KM_SLEEP); 503 504 if (ddi_dma_alloc_handle(nvme->n_dip, dma_attr, DDI_DMA_SLEEP, NULL, 505 &dma->nd_dmah) != DDI_SUCCESS) { 506 /* 507 * Due to DDI_DMA_SLEEP this can't be DDI_DMA_NORESOURCES, and 508 * the only other possible error is DDI_DMA_BADATTR which 509 * indicates a driver bug which should cause a panic. 510 */ 511 dev_err(nvme->n_dip, CE_PANIC, 512 "!failed to get DMA handle, check DMA attributes"); 513 return (DDI_FAILURE); 514 } 515 516 /* 517 * ddi_dma_mem_alloc() can only fail when DDI_DMA_NOSLEEP is specified 518 * or the flags are conflicting, which isn't the case here. 519 */ 520 (void) ddi_dma_mem_alloc(dma->nd_dmah, len, &nvme->n_reg_acc_attr, 521 DDI_DMA_CONSISTENT, DDI_DMA_SLEEP, NULL, &dma->nd_memp, 522 &dma->nd_len, &dma->nd_acch); 523 524 if (ddi_dma_addr_bind_handle(dma->nd_dmah, NULL, dma->nd_memp, 525 dma->nd_len, flags | DDI_DMA_CONSISTENT, DDI_DMA_SLEEP, NULL, 526 &dma->nd_cookie, &dma->nd_ncookie) != DDI_DMA_MAPPED) { 527 dev_err(nvme->n_dip, CE_WARN, 528 "!failed to bind DMA memory"); 529 atomic_inc_32(&nvme->n_dma_bind_err); 530 *ret = NULL; 531 nvme_free_dma(dma); 532 return (DDI_FAILURE); 533 } 534 535 bzero(dma->nd_memp, dma->nd_len); 536 537 *ret = dma; 538 return (DDI_SUCCESS); 539 } 540 541 static int 542 nvme_zalloc_queue_dma(nvme_t *nvme, uint32_t nentry, uint16_t qe_len, 543 uint_t flags, nvme_dma_t **dma) 544 { 545 uint32_t len = nentry * qe_len; 546 ddi_dma_attr_t q_dma_attr = nvme->n_queue_dma_attr; 547 548 len = roundup(len, nvme->n_pagesize); 549 550 q_dma_attr.dma_attr_minxfer = len; 551 552 if (nvme_zalloc_dma(nvme, len, flags, &q_dma_attr, dma) 553 != DDI_SUCCESS) { 554 dev_err(nvme->n_dip, CE_WARN, 555 "!failed to get DMA memory for queue"); 556 goto fail; 557 } 558 559 if ((*dma)->nd_ncookie != 1) { 560 dev_err(nvme->n_dip, CE_WARN, 561 "!got too many cookies for queue DMA"); 562 goto fail; 563 } 564 565 return (DDI_SUCCESS); 566 567 fail: 568 if (*dma) { 569 nvme_free_dma(*dma); 570 *dma = NULL; 571 } 572 573 return (DDI_FAILURE); 574 } 575 576 static void 577 nvme_free_qpair(nvme_qpair_t *qp) 578 { 579 int i; 580 581 mutex_destroy(&qp->nq_mutex); 582 583 if (qp->nq_sqdma != NULL) 584 nvme_free_dma(qp->nq_sqdma); 585 if (qp->nq_cqdma != NULL) 586 nvme_free_dma(qp->nq_cqdma); 587 588 if (qp->nq_active_cmds > 0) 589 for (i = 0; i != qp->nq_nentry; i++) 590 if (qp->nq_cmd[i] != NULL) 591 nvme_free_cmd(qp->nq_cmd[i]); 592 593 if (qp->nq_cmd != NULL) 594 kmem_free(qp->nq_cmd, sizeof (nvme_cmd_t *) * qp->nq_nentry); 595 596 kmem_free(qp, sizeof (nvme_qpair_t)); 597 } 598 599 static int 600 nvme_alloc_qpair(nvme_t *nvme, uint32_t nentry, nvme_qpair_t **nqp, 601 int idx) 602 { 603 nvme_qpair_t *qp = kmem_zalloc(sizeof (*qp), KM_SLEEP); 604 605 mutex_init(&qp->nq_mutex, NULL, MUTEX_DRIVER, 606 DDI_INTR_PRI(nvme->n_intr_pri)); 607 608 if (nvme_zalloc_queue_dma(nvme, nentry, sizeof (nvme_sqe_t), 609 DDI_DMA_WRITE, &qp->nq_sqdma) != DDI_SUCCESS) 610 goto fail; 611 612 if (nvme_zalloc_queue_dma(nvme, nentry, sizeof (nvme_cqe_t), 613 DDI_DMA_READ, &qp->nq_cqdma) != DDI_SUCCESS) 614 goto fail; 615 616 qp->nq_sq = (nvme_sqe_t *)qp->nq_sqdma->nd_memp; 617 qp->nq_cq = (nvme_cqe_t *)qp->nq_cqdma->nd_memp; 618 qp->nq_nentry = nentry; 619 620 qp->nq_sqtdbl = NVME_REG_SQTDBL(nvme, idx); 621 qp->nq_cqhdbl = NVME_REG_CQHDBL(nvme, idx); 622 623 qp->nq_cmd = kmem_zalloc(sizeof (nvme_cmd_t *) * nentry, KM_SLEEP); 624 qp->nq_next_cmd = 0; 625 626 *nqp = qp; 627 return (DDI_SUCCESS); 628 629 fail: 630 nvme_free_qpair(qp); 631 *nqp = NULL; 632 633 return (DDI_FAILURE); 634 } 635 636 static nvme_cmd_t * 637 nvme_alloc_cmd(nvme_t *nvme, int kmflag) 638 { 639 nvme_cmd_t *cmd = kmem_cache_alloc(nvme_cmd_cache, kmflag); 640 641 if (cmd == NULL) 642 return (cmd); 643 644 bzero(cmd, sizeof (nvme_cmd_t)); 645 646 cmd->nc_nvme = nvme; 647 648 mutex_init(&cmd->nc_mutex, NULL, MUTEX_DRIVER, 649 DDI_INTR_PRI(nvme->n_intr_pri)); 650 cv_init(&cmd->nc_cv, NULL, CV_DRIVER, NULL); 651 652 return (cmd); 653 } 654 655 static void 656 nvme_free_cmd(nvme_cmd_t *cmd) 657 { 658 if (cmd->nc_dma) { 659 nvme_free_dma(cmd->nc_dma); 660 cmd->nc_dma = NULL; 661 } 662 663 cv_destroy(&cmd->nc_cv); 664 mutex_destroy(&cmd->nc_mutex); 665 666 kmem_cache_free(nvme_cmd_cache, cmd); 667 } 668 669 static int 670 nvme_submit_cmd(nvme_qpair_t *qp, nvme_cmd_t *cmd) 671 { 672 nvme_reg_sqtdbl_t tail = { 0 }; 673 674 mutex_enter(&qp->nq_mutex); 675 676 if (qp->nq_active_cmds == qp->nq_nentry) { 677 mutex_exit(&qp->nq_mutex); 678 return (DDI_FAILURE); 679 } 680 681 cmd->nc_completed = B_FALSE; 682 683 /* 684 * Try to insert the cmd into the active cmd array at the nq_next_cmd 685 * slot. If the slot is already occupied advance to the next slot and 686 * try again. This can happen for long running commands like async event 687 * requests. 688 */ 689 while (qp->nq_cmd[qp->nq_next_cmd] != NULL) 690 qp->nq_next_cmd = (qp->nq_next_cmd + 1) % qp->nq_nentry; 691 qp->nq_cmd[qp->nq_next_cmd] = cmd; 692 693 qp->nq_active_cmds++; 694 695 cmd->nc_sqe.sqe_cid = qp->nq_next_cmd; 696 bcopy(&cmd->nc_sqe, &qp->nq_sq[qp->nq_sqtail], sizeof (nvme_sqe_t)); 697 (void) ddi_dma_sync(qp->nq_sqdma->nd_dmah, 698 sizeof (nvme_sqe_t) * qp->nq_sqtail, 699 sizeof (nvme_sqe_t), DDI_DMA_SYNC_FORDEV); 700 qp->nq_next_cmd = (qp->nq_next_cmd + 1) % qp->nq_nentry; 701 702 tail.b.sqtdbl_sqt = qp->nq_sqtail = (qp->nq_sqtail + 1) % qp->nq_nentry; 703 nvme_put32(cmd->nc_nvme, qp->nq_sqtdbl, tail.r); 704 705 mutex_exit(&qp->nq_mutex); 706 return (DDI_SUCCESS); 707 } 708 709 static nvme_cmd_t * 710 nvme_retrieve_cmd(nvme_t *nvme, nvme_qpair_t *qp) 711 { 712 nvme_reg_cqhdbl_t head = { 0 }; 713 714 nvme_cqe_t *cqe; 715 nvme_cmd_t *cmd; 716 717 (void) ddi_dma_sync(qp->nq_cqdma->nd_dmah, 0, 718 sizeof (nvme_cqe_t) * qp->nq_nentry, DDI_DMA_SYNC_FORKERNEL); 719 720 cqe = &qp->nq_cq[qp->nq_cqhead]; 721 722 /* Check phase tag of CQE. Hardware inverts it for new entries. */ 723 if (cqe->cqe_sf.sf_p == qp->nq_phase) 724 return (NULL); 725 726 ASSERT(nvme->n_ioq[cqe->cqe_sqid] == qp); 727 ASSERT(cqe->cqe_cid < qp->nq_nentry); 728 729 mutex_enter(&qp->nq_mutex); 730 cmd = qp->nq_cmd[cqe->cqe_cid]; 731 qp->nq_cmd[cqe->cqe_cid] = NULL; 732 qp->nq_active_cmds--; 733 mutex_exit(&qp->nq_mutex); 734 735 ASSERT(cmd != NULL); 736 ASSERT(cmd->nc_nvme == nvme); 737 ASSERT(cmd->nc_sqid == cqe->cqe_sqid); 738 ASSERT(cmd->nc_sqe.sqe_cid == cqe->cqe_cid); 739 bcopy(cqe, &cmd->nc_cqe, sizeof (nvme_cqe_t)); 740 741 qp->nq_sqhead = cqe->cqe_sqhd; 742 743 head.b.cqhdbl_cqh = qp->nq_cqhead = (qp->nq_cqhead + 1) % qp->nq_nentry; 744 745 /* Toggle phase on wrap-around. */ 746 if (qp->nq_cqhead == 0) 747 qp->nq_phase = qp->nq_phase ? 0 : 1; 748 749 nvme_put32(cmd->nc_nvme, qp->nq_cqhdbl, head.r); 750 751 return (cmd); 752 } 753 754 static int 755 nvme_check_unknown_cmd_status(nvme_cmd_t *cmd) 756 { 757 nvme_cqe_t *cqe = &cmd->nc_cqe; 758 759 dev_err(cmd->nc_nvme->n_dip, CE_WARN, 760 "!unknown command status received: opc = %x, sqid = %d, cid = %d, " 761 "sc = %x, sct = %x, dnr = %d, m = %d", cmd->nc_sqe.sqe_opc, 762 cqe->cqe_sqid, cqe->cqe_cid, cqe->cqe_sf.sf_sc, cqe->cqe_sf.sf_sct, 763 cqe->cqe_sf.sf_dnr, cqe->cqe_sf.sf_m); 764 765 bd_error(cmd->nc_xfer, BD_ERR_ILLRQ); 766 767 if (cmd->nc_nvme->n_strict_version) { 768 cmd->nc_nvme->n_dead = B_TRUE; 769 ddi_fm_service_impact(cmd->nc_nvme->n_dip, DDI_SERVICE_LOST); 770 } 771 772 return (EIO); 773 } 774 775 static int 776 nvme_check_vendor_cmd_status(nvme_cmd_t *cmd) 777 { 778 nvme_cqe_t *cqe = &cmd->nc_cqe; 779 780 dev_err(cmd->nc_nvme->n_dip, CE_WARN, 781 "!unknown command status received: opc = %x, sqid = %d, cid = %d, " 782 "sc = %x, sct = %x, dnr = %d, m = %d", cmd->nc_sqe.sqe_opc, 783 cqe->cqe_sqid, cqe->cqe_cid, cqe->cqe_sf.sf_sc, cqe->cqe_sf.sf_sct, 784 cqe->cqe_sf.sf_dnr, cqe->cqe_sf.sf_m); 785 if (cmd->nc_nvme->n_ignore_unknown_vendor_status) { 786 cmd->nc_nvme->n_dead = B_TRUE; 787 ddi_fm_service_impact(cmd->nc_nvme->n_dip, DDI_SERVICE_LOST); 788 } 789 790 return (EIO); 791 } 792 793 static int 794 nvme_check_integrity_cmd_status(nvme_cmd_t *cmd) 795 { 796 nvme_cqe_t *cqe = &cmd->nc_cqe; 797 798 switch (cqe->cqe_sf.sf_sc) { 799 case NVME_CQE_SC_INT_NVM_WRITE: 800 /* write fail */ 801 /* TODO: post ereport */ 802 bd_error(cmd->nc_xfer, BD_ERR_MEDIA); 803 return (EIO); 804 805 case NVME_CQE_SC_INT_NVM_READ: 806 /* read fail */ 807 /* TODO: post ereport */ 808 bd_error(cmd->nc_xfer, BD_ERR_MEDIA); 809 return (EIO); 810 811 default: 812 return (nvme_check_unknown_cmd_status(cmd)); 813 } 814 } 815 816 static int 817 nvme_check_generic_cmd_status(nvme_cmd_t *cmd) 818 { 819 nvme_cqe_t *cqe = &cmd->nc_cqe; 820 821 switch (cqe->cqe_sf.sf_sc) { 822 case NVME_CQE_SC_GEN_SUCCESS: 823 return (0); 824 825 /* 826 * Errors indicating a bug in the driver should cause a panic. 827 */ 828 case NVME_CQE_SC_GEN_INV_OPC: 829 /* Invalid Command Opcode */ 830 dev_err(cmd->nc_nvme->n_dip, CE_PANIC, "programming error: " 831 "invalid opcode in cmd %p", (void *)cmd); 832 return (0); 833 834 case NVME_CQE_SC_GEN_INV_FLD: 835 /* Invalid Field in Command */ 836 dev_err(cmd->nc_nvme->n_dip, CE_PANIC, "programming error: " 837 "invalid field in cmd %p", (void *)cmd); 838 return (0); 839 840 case NVME_CQE_SC_GEN_ID_CNFL: 841 /* Command ID Conflict */ 842 dev_err(cmd->nc_nvme->n_dip, CE_PANIC, "programming error: " 843 "cmd ID conflict in cmd %p", (void *)cmd); 844 return (0); 845 846 case NVME_CQE_SC_GEN_INV_NS: 847 /* Invalid Namespace or Format */ 848 dev_err(cmd->nc_nvme->n_dip, CE_PANIC, "programming error: " 849 "invalid NS/format in cmd %p", (void *)cmd); 850 return (0); 851 852 case NVME_CQE_SC_GEN_NVM_LBA_RANGE: 853 /* LBA Out Of Range */ 854 dev_err(cmd->nc_nvme->n_dip, CE_PANIC, "programming error: " 855 "LBA out of range in cmd %p", (void *)cmd); 856 return (0); 857 858 /* 859 * Non-fatal errors, handle gracefully. 860 */ 861 case NVME_CQE_SC_GEN_DATA_XFR_ERR: 862 /* Data Transfer Error (DMA) */ 863 /* TODO: post ereport */ 864 atomic_inc_32(&cmd->nc_nvme->n_data_xfr_err); 865 bd_error(cmd->nc_xfer, BD_ERR_NTRDY); 866 return (EIO); 867 868 case NVME_CQE_SC_GEN_INTERNAL_ERR: 869 /* 870 * Internal Error. The spec (v1.0, section 4.5.1.2) says 871 * detailed error information is returned as async event, 872 * so we pretty much ignore the error here and handle it 873 * in the async event handler. 874 */ 875 atomic_inc_32(&cmd->nc_nvme->n_internal_err); 876 bd_error(cmd->nc_xfer, BD_ERR_NTRDY); 877 return (EIO); 878 879 case NVME_CQE_SC_GEN_ABORT_REQUEST: 880 /* 881 * Command Abort Requested. This normally happens only when a 882 * command times out. 883 */ 884 /* TODO: post ereport or change blkdev to handle this? */ 885 atomic_inc_32(&cmd->nc_nvme->n_abort_rq_err); 886 return (ECANCELED); 887 888 case NVME_CQE_SC_GEN_ABORT_PWRLOSS: 889 /* Command Aborted due to Power Loss Notification */ 890 ddi_fm_service_impact(cmd->nc_nvme->n_dip, DDI_SERVICE_LOST); 891 cmd->nc_nvme->n_dead = B_TRUE; 892 return (EIO); 893 894 case NVME_CQE_SC_GEN_ABORT_SQ_DEL: 895 /* Command Aborted due to SQ Deletion */ 896 atomic_inc_32(&cmd->nc_nvme->n_abort_sq_del); 897 return (EIO); 898 899 case NVME_CQE_SC_GEN_NVM_CAP_EXC: 900 /* Capacity Exceeded */ 901 atomic_inc_32(&cmd->nc_nvme->n_nvm_cap_exc); 902 bd_error(cmd->nc_xfer, BD_ERR_MEDIA); 903 return (EIO); 904 905 case NVME_CQE_SC_GEN_NVM_NS_NOTRDY: 906 /* Namespace Not Ready */ 907 atomic_inc_32(&cmd->nc_nvme->n_nvm_ns_notrdy); 908 bd_error(cmd->nc_xfer, BD_ERR_NTRDY); 909 return (EIO); 910 911 default: 912 return (nvme_check_unknown_cmd_status(cmd)); 913 } 914 } 915 916 static int 917 nvme_check_specific_cmd_status(nvme_cmd_t *cmd) 918 { 919 nvme_cqe_t *cqe = &cmd->nc_cqe; 920 921 switch (cqe->cqe_sf.sf_sc) { 922 case NVME_CQE_SC_SPC_INV_CQ: 923 /* Completion Queue Invalid */ 924 ASSERT(cmd->nc_sqe.sqe_opc == NVME_OPC_CREATE_SQUEUE); 925 atomic_inc_32(&cmd->nc_nvme->n_inv_cq_err); 926 return (EINVAL); 927 928 case NVME_CQE_SC_SPC_INV_QID: 929 /* Invalid Queue Identifier */ 930 ASSERT(cmd->nc_sqe.sqe_opc == NVME_OPC_CREATE_SQUEUE || 931 cmd->nc_sqe.sqe_opc == NVME_OPC_DELETE_SQUEUE || 932 cmd->nc_sqe.sqe_opc == NVME_OPC_CREATE_CQUEUE || 933 cmd->nc_sqe.sqe_opc == NVME_OPC_DELETE_CQUEUE); 934 atomic_inc_32(&cmd->nc_nvme->n_inv_qid_err); 935 return (EINVAL); 936 937 case NVME_CQE_SC_SPC_MAX_QSZ_EXC: 938 /* Max Queue Size Exceeded */ 939 ASSERT(cmd->nc_sqe.sqe_opc == NVME_OPC_CREATE_SQUEUE || 940 cmd->nc_sqe.sqe_opc == NVME_OPC_CREATE_CQUEUE); 941 atomic_inc_32(&cmd->nc_nvme->n_max_qsz_exc); 942 return (EINVAL); 943 944 case NVME_CQE_SC_SPC_ABRT_CMD_EXC: 945 /* Abort Command Limit Exceeded */ 946 ASSERT(cmd->nc_sqe.sqe_opc == NVME_OPC_ABORT); 947 dev_err(cmd->nc_nvme->n_dip, CE_PANIC, "programming error: " 948 "abort command limit exceeded in cmd %p", (void *)cmd); 949 return (0); 950 951 case NVME_CQE_SC_SPC_ASYNC_EVREQ_EXC: 952 /* Async Event Request Limit Exceeded */ 953 ASSERT(cmd->nc_sqe.sqe_opc == NVME_OPC_ASYNC_EVENT); 954 dev_err(cmd->nc_nvme->n_dip, CE_PANIC, "programming error: " 955 "async event request limit exceeded in cmd %p", 956 (void *)cmd); 957 return (0); 958 959 case NVME_CQE_SC_SPC_INV_INT_VECT: 960 /* Invalid Interrupt Vector */ 961 ASSERT(cmd->nc_sqe.sqe_opc == NVME_OPC_CREATE_CQUEUE); 962 atomic_inc_32(&cmd->nc_nvme->n_inv_int_vect); 963 return (EINVAL); 964 965 case NVME_CQE_SC_SPC_INV_LOG_PAGE: 966 /* Invalid Log Page */ 967 ASSERT(cmd->nc_sqe.sqe_opc == NVME_OPC_GET_LOG_PAGE); 968 atomic_inc_32(&cmd->nc_nvme->n_inv_log_page); 969 bd_error(cmd->nc_xfer, BD_ERR_ILLRQ); 970 return (EINVAL); 971 972 case NVME_CQE_SC_SPC_INV_FORMAT: 973 /* Invalid Format */ 974 ASSERT(cmd->nc_sqe.sqe_opc == NVME_OPC_NVM_FORMAT); 975 atomic_inc_32(&cmd->nc_nvme->n_inv_format); 976 bd_error(cmd->nc_xfer, BD_ERR_ILLRQ); 977 return (EINVAL); 978 979 case NVME_CQE_SC_SPC_INV_Q_DEL: 980 /* Invalid Queue Deletion */ 981 ASSERT(cmd->nc_sqe.sqe_opc == NVME_OPC_DELETE_CQUEUE); 982 atomic_inc_32(&cmd->nc_nvme->n_inv_q_del); 983 return (EINVAL); 984 985 case NVME_CQE_SC_SPC_NVM_CNFL_ATTR: 986 /* Conflicting Attributes */ 987 ASSERT(cmd->nc_sqe.sqe_opc == NVME_OPC_NVM_DSET_MGMT || 988 cmd->nc_sqe.sqe_opc == NVME_OPC_NVM_READ || 989 cmd->nc_sqe.sqe_opc == NVME_OPC_NVM_WRITE); 990 atomic_inc_32(&cmd->nc_nvme->n_cnfl_attr); 991 bd_error(cmd->nc_xfer, BD_ERR_ILLRQ); 992 return (EINVAL); 993 994 case NVME_CQE_SC_SPC_NVM_INV_PROT: 995 /* Invalid Protection Information */ 996 ASSERT(cmd->nc_sqe.sqe_opc == NVME_OPC_NVM_COMPARE || 997 cmd->nc_sqe.sqe_opc == NVME_OPC_NVM_READ || 998 cmd->nc_sqe.sqe_opc == NVME_OPC_NVM_WRITE); 999 atomic_inc_32(&cmd->nc_nvme->n_inv_prot); 1000 bd_error(cmd->nc_xfer, BD_ERR_ILLRQ); 1001 return (EINVAL); 1002 1003 case NVME_CQE_SC_SPC_NVM_READONLY: 1004 /* Write to Read Only Range */ 1005 ASSERT(cmd->nc_sqe.sqe_opc == NVME_OPC_NVM_WRITE); 1006 atomic_inc_32(&cmd->nc_nvme->n_readonly); 1007 bd_error(cmd->nc_xfer, BD_ERR_ILLRQ); 1008 return (EROFS); 1009 1010 default: 1011 return (nvme_check_unknown_cmd_status(cmd)); 1012 } 1013 } 1014 1015 static inline int 1016 nvme_check_cmd_status(nvme_cmd_t *cmd) 1017 { 1018 nvme_cqe_t *cqe = &cmd->nc_cqe; 1019 1020 /* take a shortcut if everything is alright */ 1021 if (cqe->cqe_sf.sf_sct == NVME_CQE_SCT_GENERIC && 1022 cqe->cqe_sf.sf_sc == NVME_CQE_SC_GEN_SUCCESS) 1023 return (0); 1024 1025 if (cqe->cqe_sf.sf_sct == NVME_CQE_SCT_GENERIC) 1026 return (nvme_check_generic_cmd_status(cmd)); 1027 else if (cqe->cqe_sf.sf_sct == NVME_CQE_SCT_SPECIFIC) 1028 return (nvme_check_specific_cmd_status(cmd)); 1029 else if (cqe->cqe_sf.sf_sct == NVME_CQE_SCT_INTEGRITY) 1030 return (nvme_check_integrity_cmd_status(cmd)); 1031 else if (cqe->cqe_sf.sf_sct == NVME_CQE_SCT_VENDOR) 1032 return (nvme_check_vendor_cmd_status(cmd)); 1033 1034 return (nvme_check_unknown_cmd_status(cmd)); 1035 } 1036 1037 /* 1038 * nvme_abort_cmd_cb -- replaces nc_callback of aborted commands 1039 * 1040 * This functions takes care of cleaning up aborted commands. The command 1041 * status is checked to catch any fatal errors. 1042 */ 1043 static void 1044 nvme_abort_cmd_cb(void *arg) 1045 { 1046 nvme_cmd_t *cmd = arg; 1047 1048 /* 1049 * Grab the command mutex. Once we have it we hold the last reference 1050 * to the command and can safely free it. 1051 */ 1052 mutex_enter(&cmd->nc_mutex); 1053 (void) nvme_check_cmd_status(cmd); 1054 mutex_exit(&cmd->nc_mutex); 1055 1056 nvme_free_cmd(cmd); 1057 } 1058 1059 static void 1060 nvme_abort_cmd(nvme_cmd_t *abort_cmd) 1061 { 1062 nvme_t *nvme = abort_cmd->nc_nvme; 1063 nvme_cmd_t *cmd = nvme_alloc_cmd(nvme, KM_SLEEP); 1064 nvme_abort_cmd_t ac = { 0 }; 1065 1066 sema_p(&nvme->n_abort_sema); 1067 1068 ac.b.ac_cid = abort_cmd->nc_sqe.sqe_cid; 1069 ac.b.ac_sqid = abort_cmd->nc_sqid; 1070 1071 /* 1072 * Drop the mutex of the aborted command. From this point on 1073 * we must assume that the abort callback has freed the command. 1074 */ 1075 mutex_exit(&abort_cmd->nc_mutex); 1076 1077 cmd->nc_sqid = 0; 1078 cmd->nc_sqe.sqe_opc = NVME_OPC_ABORT; 1079 cmd->nc_callback = nvme_wakeup_cmd; 1080 cmd->nc_sqe.sqe_cdw10 = ac.r; 1081 1082 /* 1083 * Send the ABORT to the hardware. The ABORT command will return _after_ 1084 * the aborted command has completed (aborted or otherwise). 1085 */ 1086 if (nvme_admin_cmd(cmd, NVME_ADMIN_CMD_TIMEOUT) != DDI_SUCCESS) { 1087 sema_v(&nvme->n_abort_sema); 1088 dev_err(nvme->n_dip, CE_WARN, 1089 "!nvme_admin_cmd failed for ABORT"); 1090 atomic_inc_32(&nvme->n_abort_failed); 1091 return; 1092 } 1093 sema_v(&nvme->n_abort_sema); 1094 1095 if (nvme_check_cmd_status(cmd)) { 1096 dev_err(nvme->n_dip, CE_WARN, 1097 "!ABORT failed with sct = %x, sc = %x", 1098 cmd->nc_cqe.cqe_sf.sf_sct, cmd->nc_cqe.cqe_sf.sf_sc); 1099 atomic_inc_32(&nvme->n_abort_failed); 1100 } else { 1101 atomic_inc_32(&nvme->n_cmd_aborted); 1102 } 1103 1104 nvme_free_cmd(cmd); 1105 } 1106 1107 /* 1108 * nvme_wait_cmd -- wait for command completion or timeout 1109 * 1110 * Returns B_TRUE if the command completed normally. 1111 * 1112 * Returns B_FALSE if the command timed out and an abort was attempted. The 1113 * command mutex will be dropped and the command must be considered freed. The 1114 * freeing of the command is normally done by the abort command callback. 1115 * 1116 * In case of a serious error or a timeout of the abort command the hardware 1117 * will be declared dead and FMA will be notified. 1118 */ 1119 static boolean_t 1120 nvme_wait_cmd(nvme_cmd_t *cmd, uint_t usec) 1121 { 1122 clock_t timeout = ddi_get_lbolt() + drv_usectohz(usec); 1123 nvme_t *nvme = cmd->nc_nvme; 1124 nvme_reg_csts_t csts; 1125 1126 ASSERT(mutex_owned(&cmd->nc_mutex)); 1127 1128 while (!cmd->nc_completed) { 1129 if (cv_timedwait(&cmd->nc_cv, &cmd->nc_mutex, timeout) == -1) 1130 break; 1131 } 1132 1133 if (cmd->nc_completed) 1134 return (B_TRUE); 1135 1136 /* 1137 * The command timed out. Change the callback to the cleanup function. 1138 */ 1139 cmd->nc_callback = nvme_abort_cmd_cb; 1140 1141 /* 1142 * Check controller for fatal status, any errors associated with the 1143 * register or DMA handle, or for a double timeout (abort command timed 1144 * out). If necessary log a warning and call FMA. 1145 */ 1146 csts.r = nvme_get32(nvme, NVME_REG_CSTS); 1147 dev_err(nvme->n_dip, CE_WARN, "!command timeout, " 1148 "OPC = %x, CFS = %d", cmd->nc_sqe.sqe_opc, csts.b.csts_cfs); 1149 atomic_inc_32(&nvme->n_cmd_timeout); 1150 1151 if (csts.b.csts_cfs || 1152 nvme_check_regs_hdl(nvme) || 1153 nvme_check_dma_hdl(cmd->nc_dma) || 1154 cmd->nc_sqe.sqe_opc == NVME_OPC_ABORT) { 1155 ddi_fm_service_impact(nvme->n_dip, DDI_SERVICE_LOST); 1156 nvme->n_dead = B_TRUE; 1157 mutex_exit(&cmd->nc_mutex); 1158 } else { 1159 /* 1160 * Try to abort the command. The command mutex is released by 1161 * nvme_abort_cmd(). 1162 * If the abort succeeds it will have freed the aborted command. 1163 * If the abort fails for other reasons we must assume that the 1164 * command may complete at any time, and the callback will free 1165 * it for us. 1166 */ 1167 nvme_abort_cmd(cmd); 1168 } 1169 1170 return (B_FALSE); 1171 } 1172 1173 static void 1174 nvme_wakeup_cmd(void *arg) 1175 { 1176 nvme_cmd_t *cmd = arg; 1177 1178 mutex_enter(&cmd->nc_mutex); 1179 /* 1180 * There is a slight chance that this command completed shortly after 1181 * the timeout was hit in nvme_wait_cmd() but before the callback was 1182 * changed. Catch that case here and clean up accordingly. 1183 */ 1184 if (cmd->nc_callback == nvme_abort_cmd_cb) { 1185 mutex_exit(&cmd->nc_mutex); 1186 nvme_abort_cmd_cb(cmd); 1187 return; 1188 } 1189 1190 cmd->nc_completed = B_TRUE; 1191 cv_signal(&cmd->nc_cv); 1192 mutex_exit(&cmd->nc_mutex); 1193 } 1194 1195 static void 1196 nvme_async_event_task(void *arg) 1197 { 1198 nvme_cmd_t *cmd = arg; 1199 nvme_t *nvme = cmd->nc_nvme; 1200 nvme_error_log_entry_t *error_log = NULL; 1201 nvme_health_log_t *health_log = NULL; 1202 nvme_async_event_t event; 1203 int ret; 1204 1205 /* 1206 * Check for errors associated with the async request itself. The only 1207 * command-specific error is "async event limit exceeded", which 1208 * indicates a programming error in the driver and causes a panic in 1209 * nvme_check_cmd_status(). 1210 * 1211 * Other possible errors are various scenarios where the async request 1212 * was aborted, or internal errors in the device. Internal errors are 1213 * reported to FMA, the command aborts need no special handling here. 1214 */ 1215 if (nvme_check_cmd_status(cmd)) { 1216 dev_err(cmd->nc_nvme->n_dip, CE_WARN, 1217 "!async event request returned failure, sct = %x, " 1218 "sc = %x, dnr = %d, m = %d", cmd->nc_cqe.cqe_sf.sf_sct, 1219 cmd->nc_cqe.cqe_sf.sf_sc, cmd->nc_cqe.cqe_sf.sf_dnr, 1220 cmd->nc_cqe.cqe_sf.sf_m); 1221 1222 if (cmd->nc_cqe.cqe_sf.sf_sct == NVME_CQE_SCT_GENERIC && 1223 cmd->nc_cqe.cqe_sf.sf_sc == NVME_CQE_SC_GEN_INTERNAL_ERR) { 1224 cmd->nc_nvme->n_dead = B_TRUE; 1225 ddi_fm_service_impact(cmd->nc_nvme->n_dip, 1226 DDI_SERVICE_LOST); 1227 } 1228 nvme_free_cmd(cmd); 1229 return; 1230 } 1231 1232 1233 event.r = cmd->nc_cqe.cqe_dw0; 1234 1235 /* Clear CQE and re-submit the async request. */ 1236 bzero(&cmd->nc_cqe, sizeof (nvme_cqe_t)); 1237 ret = nvme_submit_cmd(nvme->n_adminq, cmd); 1238 1239 if (ret != DDI_SUCCESS) { 1240 dev_err(nvme->n_dip, CE_WARN, 1241 "!failed to resubmit async event request"); 1242 atomic_inc_32(&nvme->n_async_resubmit_failed); 1243 nvme_free_cmd(cmd); 1244 } 1245 1246 switch (event.b.ae_type) { 1247 case NVME_ASYNC_TYPE_ERROR: 1248 if (event.b.ae_logpage == NVME_LOGPAGE_ERROR) { 1249 error_log = (nvme_error_log_entry_t *) 1250 nvme_get_logpage(nvme, event.b.ae_logpage); 1251 } else { 1252 dev_err(nvme->n_dip, CE_WARN, "!wrong logpage in " 1253 "async event reply: %d", event.b.ae_logpage); 1254 atomic_inc_32(&nvme->n_wrong_logpage); 1255 } 1256 1257 switch (event.b.ae_info) { 1258 case NVME_ASYNC_ERROR_INV_SQ: 1259 dev_err(nvme->n_dip, CE_PANIC, "programming error: " 1260 "invalid submission queue"); 1261 return; 1262 1263 case NVME_ASYNC_ERROR_INV_DBL: 1264 dev_err(nvme->n_dip, CE_PANIC, "programming error: " 1265 "invalid doorbell write value"); 1266 return; 1267 1268 case NVME_ASYNC_ERROR_DIAGFAIL: 1269 dev_err(nvme->n_dip, CE_WARN, "!diagnostic failure"); 1270 ddi_fm_service_impact(nvme->n_dip, DDI_SERVICE_LOST); 1271 nvme->n_dead = B_TRUE; 1272 atomic_inc_32(&nvme->n_diagfail_event); 1273 break; 1274 1275 case NVME_ASYNC_ERROR_PERSISTENT: 1276 dev_err(nvme->n_dip, CE_WARN, "!persistent internal " 1277 "device error"); 1278 ddi_fm_service_impact(nvme->n_dip, DDI_SERVICE_LOST); 1279 nvme->n_dead = B_TRUE; 1280 atomic_inc_32(&nvme->n_persistent_event); 1281 break; 1282 1283 case NVME_ASYNC_ERROR_TRANSIENT: 1284 dev_err(nvme->n_dip, CE_WARN, "!transient internal " 1285 "device error"); 1286 /* TODO: send ereport */ 1287 atomic_inc_32(&nvme->n_transient_event); 1288 break; 1289 1290 case NVME_ASYNC_ERROR_FW_LOAD: 1291 dev_err(nvme->n_dip, CE_WARN, 1292 "!firmware image load error"); 1293 atomic_inc_32(&nvme->n_fw_load_event); 1294 break; 1295 } 1296 break; 1297 1298 case NVME_ASYNC_TYPE_HEALTH: 1299 if (event.b.ae_logpage == NVME_LOGPAGE_HEALTH) { 1300 health_log = (nvme_health_log_t *) 1301 nvme_get_logpage(nvme, event.b.ae_logpage, -1); 1302 } else { 1303 dev_err(nvme->n_dip, CE_WARN, "!wrong logpage in " 1304 "async event reply: %d", event.b.ae_logpage); 1305 atomic_inc_32(&nvme->n_wrong_logpage); 1306 } 1307 1308 switch (event.b.ae_info) { 1309 case NVME_ASYNC_HEALTH_RELIABILITY: 1310 dev_err(nvme->n_dip, CE_WARN, 1311 "!device reliability compromised"); 1312 /* TODO: send ereport */ 1313 atomic_inc_32(&nvme->n_reliability_event); 1314 break; 1315 1316 case NVME_ASYNC_HEALTH_TEMPERATURE: 1317 dev_err(nvme->n_dip, CE_WARN, 1318 "!temperature above threshold"); 1319 /* TODO: send ereport */ 1320 atomic_inc_32(&nvme->n_temperature_event); 1321 break; 1322 1323 case NVME_ASYNC_HEALTH_SPARE: 1324 dev_err(nvme->n_dip, CE_WARN, 1325 "!spare space below threshold"); 1326 /* TODO: send ereport */ 1327 atomic_inc_32(&nvme->n_spare_event); 1328 break; 1329 } 1330 break; 1331 1332 case NVME_ASYNC_TYPE_VENDOR: 1333 dev_err(nvme->n_dip, CE_WARN, "!vendor specific async event " 1334 "received, info = %x, logpage = %x", event.b.ae_info, 1335 event.b.ae_logpage); 1336 atomic_inc_32(&nvme->n_vendor_event); 1337 break; 1338 1339 default: 1340 dev_err(nvme->n_dip, CE_WARN, "!unknown async event received, " 1341 "type = %x, info = %x, logpage = %x", event.b.ae_type, 1342 event.b.ae_info, event.b.ae_logpage); 1343 atomic_inc_32(&nvme->n_unknown_event); 1344 break; 1345 } 1346 1347 if (error_log) 1348 kmem_free(error_log, sizeof (nvme_error_log_entry_t) * 1349 nvme->n_error_log_len); 1350 1351 if (health_log) 1352 kmem_free(health_log, sizeof (nvme_health_log_t)); 1353 } 1354 1355 static int 1356 nvme_admin_cmd(nvme_cmd_t *cmd, int usec) 1357 { 1358 int ret; 1359 1360 mutex_enter(&cmd->nc_mutex); 1361 ret = nvme_submit_cmd(cmd->nc_nvme->n_adminq, cmd); 1362 1363 if (ret != DDI_SUCCESS) { 1364 mutex_exit(&cmd->nc_mutex); 1365 dev_err(cmd->nc_nvme->n_dip, CE_WARN, 1366 "!nvme_submit_cmd failed"); 1367 atomic_inc_32(&cmd->nc_nvme->n_admin_queue_full); 1368 nvme_free_cmd(cmd); 1369 return (DDI_FAILURE); 1370 } 1371 1372 if (nvme_wait_cmd(cmd, usec) == B_FALSE) { 1373 /* 1374 * The command timed out. An abort command was posted that 1375 * will take care of the cleanup. 1376 */ 1377 return (DDI_FAILURE); 1378 } 1379 mutex_exit(&cmd->nc_mutex); 1380 1381 return (DDI_SUCCESS); 1382 } 1383 1384 static int 1385 nvme_async_event(nvme_t *nvme) 1386 { 1387 nvme_cmd_t *cmd = nvme_alloc_cmd(nvme, KM_SLEEP); 1388 int ret; 1389 1390 cmd->nc_sqid = 0; 1391 cmd->nc_sqe.sqe_opc = NVME_OPC_ASYNC_EVENT; 1392 cmd->nc_callback = nvme_async_event_task; 1393 1394 ret = nvme_submit_cmd(nvme->n_adminq, cmd); 1395 1396 if (ret != DDI_SUCCESS) { 1397 dev_err(nvme->n_dip, CE_WARN, 1398 "!nvme_submit_cmd failed for ASYNCHRONOUS EVENT"); 1399 nvme_free_cmd(cmd); 1400 return (DDI_FAILURE); 1401 } 1402 1403 return (DDI_SUCCESS); 1404 } 1405 1406 static void * 1407 nvme_get_logpage(nvme_t *nvme, uint8_t logpage, ...) 1408 { 1409 nvme_cmd_t *cmd = nvme_alloc_cmd(nvme, KM_SLEEP); 1410 void *buf = NULL; 1411 nvme_getlogpage_t getlogpage; 1412 size_t bufsize; 1413 va_list ap; 1414 1415 va_start(ap, logpage); 1416 1417 cmd->nc_sqid = 0; 1418 cmd->nc_callback = nvme_wakeup_cmd; 1419 cmd->nc_sqe.sqe_opc = NVME_OPC_GET_LOG_PAGE; 1420 1421 getlogpage.b.lp_lid = logpage; 1422 1423 switch (logpage) { 1424 case NVME_LOGPAGE_ERROR: 1425 cmd->nc_sqe.sqe_nsid = (uint32_t)-1; 1426 bufsize = nvme->n_error_log_len * 1427 sizeof (nvme_error_log_entry_t); 1428 break; 1429 1430 case NVME_LOGPAGE_HEALTH: 1431 cmd->nc_sqe.sqe_nsid = va_arg(ap, uint32_t); 1432 bufsize = sizeof (nvme_health_log_t); 1433 break; 1434 1435 case NVME_LOGPAGE_FWSLOT: 1436 cmd->nc_sqe.sqe_nsid = (uint32_t)-1; 1437 bufsize = sizeof (nvme_fwslot_log_t); 1438 break; 1439 1440 default: 1441 dev_err(nvme->n_dip, CE_WARN, "!unknown log page requested: %d", 1442 logpage); 1443 atomic_inc_32(&nvme->n_unknown_logpage); 1444 goto fail; 1445 } 1446 1447 va_end(ap); 1448 1449 getlogpage.b.lp_numd = bufsize / sizeof (uint32_t) - 1; 1450 1451 cmd->nc_sqe.sqe_cdw10 = getlogpage.r; 1452 1453 if (nvme_zalloc_dma(nvme, getlogpage.b.lp_numd * sizeof (uint32_t), 1454 DDI_DMA_READ, &nvme->n_prp_dma_attr, &cmd->nc_dma) != DDI_SUCCESS) { 1455 dev_err(nvme->n_dip, CE_WARN, 1456 "!nvme_zalloc_dma failed for GET LOG PAGE"); 1457 goto fail; 1458 } 1459 1460 if (cmd->nc_dma->nd_ncookie > 2) { 1461 dev_err(nvme->n_dip, CE_WARN, 1462 "!too many DMA cookies for GET LOG PAGE"); 1463 atomic_inc_32(&nvme->n_too_many_cookies); 1464 goto fail; 1465 } 1466 1467 cmd->nc_sqe.sqe_dptr.d_prp[0] = cmd->nc_dma->nd_cookie.dmac_laddress; 1468 if (cmd->nc_dma->nd_ncookie > 1) { 1469 ddi_dma_nextcookie(cmd->nc_dma->nd_dmah, 1470 &cmd->nc_dma->nd_cookie); 1471 cmd->nc_sqe.sqe_dptr.d_prp[1] = 1472 cmd->nc_dma->nd_cookie.dmac_laddress; 1473 } 1474 1475 if (nvme_admin_cmd(cmd, NVME_ADMIN_CMD_TIMEOUT) != DDI_SUCCESS) { 1476 dev_err(nvme->n_dip, CE_WARN, 1477 "!nvme_admin_cmd failed for GET LOG PAGE"); 1478 return (NULL); 1479 } 1480 1481 if (nvme_check_cmd_status(cmd)) { 1482 dev_err(nvme->n_dip, CE_WARN, 1483 "!GET LOG PAGE failed with sct = %x, sc = %x", 1484 cmd->nc_cqe.cqe_sf.sf_sct, cmd->nc_cqe.cqe_sf.sf_sc); 1485 goto fail; 1486 } 1487 1488 buf = kmem_alloc(bufsize, KM_SLEEP); 1489 bcopy(cmd->nc_dma->nd_memp, buf, bufsize); 1490 1491 fail: 1492 nvme_free_cmd(cmd); 1493 1494 return (buf); 1495 } 1496 1497 static void * 1498 nvme_identify(nvme_t *nvme, uint32_t nsid) 1499 { 1500 nvme_cmd_t *cmd = nvme_alloc_cmd(nvme, KM_SLEEP); 1501 void *buf = NULL; 1502 1503 cmd->nc_sqid = 0; 1504 cmd->nc_callback = nvme_wakeup_cmd; 1505 cmd->nc_sqe.sqe_opc = NVME_OPC_IDENTIFY; 1506 cmd->nc_sqe.sqe_nsid = nsid; 1507 cmd->nc_sqe.sqe_cdw10 = nsid ? NVME_IDENTIFY_NSID : NVME_IDENTIFY_CTRL; 1508 1509 if (nvme_zalloc_dma(nvme, NVME_IDENTIFY_BUFSIZE, DDI_DMA_READ, 1510 &nvme->n_prp_dma_attr, &cmd->nc_dma) != DDI_SUCCESS) { 1511 dev_err(nvme->n_dip, CE_WARN, 1512 "!nvme_zalloc_dma failed for IDENTIFY"); 1513 goto fail; 1514 } 1515 1516 if (cmd->nc_dma->nd_ncookie > 2) { 1517 dev_err(nvme->n_dip, CE_WARN, 1518 "!too many DMA cookies for IDENTIFY"); 1519 atomic_inc_32(&nvme->n_too_many_cookies); 1520 goto fail; 1521 } 1522 1523 cmd->nc_sqe.sqe_dptr.d_prp[0] = cmd->nc_dma->nd_cookie.dmac_laddress; 1524 if (cmd->nc_dma->nd_ncookie > 1) { 1525 ddi_dma_nextcookie(cmd->nc_dma->nd_dmah, 1526 &cmd->nc_dma->nd_cookie); 1527 cmd->nc_sqe.sqe_dptr.d_prp[1] = 1528 cmd->nc_dma->nd_cookie.dmac_laddress; 1529 } 1530 1531 if (nvme_admin_cmd(cmd, NVME_ADMIN_CMD_TIMEOUT) != DDI_SUCCESS) { 1532 dev_err(nvme->n_dip, CE_WARN, 1533 "!nvme_admin_cmd failed for IDENTIFY"); 1534 return (NULL); 1535 } 1536 1537 if (nvme_check_cmd_status(cmd)) { 1538 dev_err(nvme->n_dip, CE_WARN, 1539 "!IDENTIFY failed with sct = %x, sc = %x", 1540 cmd->nc_cqe.cqe_sf.sf_sct, cmd->nc_cqe.cqe_sf.sf_sc); 1541 goto fail; 1542 } 1543 1544 buf = kmem_alloc(NVME_IDENTIFY_BUFSIZE, KM_SLEEP); 1545 bcopy(cmd->nc_dma->nd_memp, buf, NVME_IDENTIFY_BUFSIZE); 1546 1547 fail: 1548 nvme_free_cmd(cmd); 1549 1550 return (buf); 1551 } 1552 1553 static int 1554 nvme_set_nqueues(nvme_t *nvme, uint16_t nqueues) 1555 { 1556 nvme_cmd_t *cmd = nvme_alloc_cmd(nvme, KM_SLEEP); 1557 nvme_nqueue_t nq = { 0 }; 1558 1559 nq.b.nq_nsq = nq.b.nq_ncq = nqueues; 1560 1561 cmd->nc_sqid = 0; 1562 cmd->nc_callback = nvme_wakeup_cmd; 1563 cmd->nc_sqe.sqe_opc = NVME_OPC_SET_FEATURES; 1564 cmd->nc_sqe.sqe_cdw10 = NVME_FEAT_NQUEUES; 1565 cmd->nc_sqe.sqe_cdw11 = nq.r; 1566 1567 if (nvme_admin_cmd(cmd, NVME_ADMIN_CMD_TIMEOUT) != DDI_SUCCESS) { 1568 dev_err(nvme->n_dip, CE_WARN, 1569 "!nvme_admin_cmd failed for SET FEATURES (NQUEUES)"); 1570 return (0); 1571 } 1572 1573 if (nvme_check_cmd_status(cmd)) { 1574 dev_err(nvme->n_dip, CE_WARN, 1575 "!SET FEATURES (NQUEUES) failed with sct = %x, sc = %x", 1576 cmd->nc_cqe.cqe_sf.sf_sct, cmd->nc_cqe.cqe_sf.sf_sc); 1577 nvme_free_cmd(cmd); 1578 return (0); 1579 } 1580 1581 nq.r = cmd->nc_cqe.cqe_dw0; 1582 nvme_free_cmd(cmd); 1583 1584 /* 1585 * Always use the same number of submission and completion queues, and 1586 * never use more than the requested number of queues. 1587 */ 1588 return (MIN(nqueues, MIN(nq.b.nq_nsq, nq.b.nq_ncq))); 1589 } 1590 1591 static int 1592 nvme_create_io_qpair(nvme_t *nvme, nvme_qpair_t *qp, uint16_t idx) 1593 { 1594 nvme_cmd_t *cmd = nvme_alloc_cmd(nvme, KM_SLEEP); 1595 nvme_create_queue_dw10_t dw10 = { 0 }; 1596 nvme_create_cq_dw11_t c_dw11 = { 0 }; 1597 nvme_create_sq_dw11_t s_dw11 = { 0 }; 1598 1599 dw10.b.q_qid = idx; 1600 dw10.b.q_qsize = qp->nq_nentry - 1; 1601 1602 c_dw11.b.cq_pc = 1; 1603 c_dw11.b.cq_ien = 1; 1604 c_dw11.b.cq_iv = idx % nvme->n_intr_cnt; 1605 1606 cmd->nc_sqid = 0; 1607 cmd->nc_callback = nvme_wakeup_cmd; 1608 cmd->nc_sqe.sqe_opc = NVME_OPC_CREATE_CQUEUE; 1609 cmd->nc_sqe.sqe_cdw10 = dw10.r; 1610 cmd->nc_sqe.sqe_cdw11 = c_dw11.r; 1611 cmd->nc_sqe.sqe_dptr.d_prp[0] = qp->nq_cqdma->nd_cookie.dmac_laddress; 1612 1613 if (nvme_admin_cmd(cmd, NVME_ADMIN_CMD_TIMEOUT) != DDI_SUCCESS) { 1614 dev_err(nvme->n_dip, CE_WARN, 1615 "!nvme_admin_cmd failed for CREATE CQUEUE"); 1616 return (DDI_FAILURE); 1617 } 1618 1619 if (nvme_check_cmd_status(cmd)) { 1620 dev_err(nvme->n_dip, CE_WARN, 1621 "!CREATE CQUEUE failed with sct = %x, sc = %x", 1622 cmd->nc_cqe.cqe_sf.sf_sct, cmd->nc_cqe.cqe_sf.sf_sc); 1623 nvme_free_cmd(cmd); 1624 return (DDI_FAILURE); 1625 } 1626 1627 nvme_free_cmd(cmd); 1628 1629 s_dw11.b.sq_pc = 1; 1630 s_dw11.b.sq_cqid = idx; 1631 1632 cmd = nvme_alloc_cmd(nvme, KM_SLEEP); 1633 cmd->nc_sqid = 0; 1634 cmd->nc_callback = nvme_wakeup_cmd; 1635 cmd->nc_sqe.sqe_opc = NVME_OPC_CREATE_SQUEUE; 1636 cmd->nc_sqe.sqe_cdw10 = dw10.r; 1637 cmd->nc_sqe.sqe_cdw11 = s_dw11.r; 1638 cmd->nc_sqe.sqe_dptr.d_prp[0] = qp->nq_sqdma->nd_cookie.dmac_laddress; 1639 1640 if (nvme_admin_cmd(cmd, NVME_ADMIN_CMD_TIMEOUT) != DDI_SUCCESS) { 1641 dev_err(nvme->n_dip, CE_WARN, 1642 "!nvme_admin_cmd failed for CREATE SQUEUE"); 1643 return (DDI_FAILURE); 1644 } 1645 1646 if (nvme_check_cmd_status(cmd)) { 1647 dev_err(nvme->n_dip, CE_WARN, 1648 "!CREATE SQUEUE failed with sct = %x, sc = %x", 1649 cmd->nc_cqe.cqe_sf.sf_sct, cmd->nc_cqe.cqe_sf.sf_sc); 1650 nvme_free_cmd(cmd); 1651 return (DDI_FAILURE); 1652 } 1653 1654 nvme_free_cmd(cmd); 1655 1656 return (DDI_SUCCESS); 1657 } 1658 1659 static boolean_t 1660 nvme_reset(nvme_t *nvme, boolean_t quiesce) 1661 { 1662 nvme_reg_csts_t csts; 1663 int i; 1664 1665 nvme_put32(nvme, NVME_REG_CC, 0); 1666 1667 csts.r = nvme_get32(nvme, NVME_REG_CSTS); 1668 if (csts.b.csts_rdy == 1) { 1669 nvme_put32(nvme, NVME_REG_CC, 0); 1670 for (i = 0; i != nvme->n_timeout * 10; i++) { 1671 csts.r = nvme_get32(nvme, NVME_REG_CSTS); 1672 if (csts.b.csts_rdy == 0) 1673 break; 1674 1675 if (quiesce) 1676 drv_usecwait(50000); 1677 else 1678 delay(drv_usectohz(50000)); 1679 } 1680 } 1681 1682 nvme_put32(nvme, NVME_REG_AQA, 0); 1683 nvme_put32(nvme, NVME_REG_ASQ, 0); 1684 nvme_put32(nvme, NVME_REG_ACQ, 0); 1685 1686 csts.r = nvme_get32(nvme, NVME_REG_CSTS); 1687 return (csts.b.csts_rdy == 0 ? B_TRUE : B_FALSE); 1688 } 1689 1690 static void 1691 nvme_shutdown(nvme_t *nvme, int mode, boolean_t quiesce) 1692 { 1693 nvme_reg_cc_t cc; 1694 nvme_reg_csts_t csts; 1695 int i; 1696 1697 ASSERT(mode == NVME_CC_SHN_NORMAL || mode == NVME_CC_SHN_ABRUPT); 1698 1699 cc.r = nvme_get32(nvme, NVME_REG_CC); 1700 cc.b.cc_shn = mode & 0x3; 1701 nvme_put32(nvme, NVME_REG_CC, cc.r); 1702 1703 for (i = 0; i != 10; i++) { 1704 csts.r = nvme_get32(nvme, NVME_REG_CSTS); 1705 if (csts.b.csts_shst == NVME_CSTS_SHN_COMPLETE) 1706 break; 1707 1708 if (quiesce) 1709 drv_usecwait(100000); 1710 else 1711 delay(drv_usectohz(100000)); 1712 } 1713 } 1714 1715 1716 static void 1717 nvme_prepare_devid(nvme_t *nvme, uint32_t nsid) 1718 { 1719 char model[sizeof (nvme->n_idctl->id_model) + 1]; 1720 char serial[sizeof (nvme->n_idctl->id_serial) + 1]; 1721 1722 bcopy(nvme->n_idctl->id_model, model, sizeof (nvme->n_idctl->id_model)); 1723 bcopy(nvme->n_idctl->id_serial, serial, 1724 sizeof (nvme->n_idctl->id_serial)); 1725 1726 model[sizeof (nvme->n_idctl->id_model)] = '\0'; 1727 serial[sizeof (nvme->n_idctl->id_serial)] = '\0'; 1728 1729 (void) snprintf(nvme->n_ns[nsid - 1].ns_devid, 1730 sizeof (nvme->n_ns[0].ns_devid), "%4X-%s-%s-%X", 1731 nvme->n_idctl->id_vid, model, serial, nsid); 1732 } 1733 1734 static int 1735 nvme_init(nvme_t *nvme) 1736 { 1737 nvme_reg_cc_t cc = { 0 }; 1738 nvme_reg_aqa_t aqa = { 0 }; 1739 nvme_reg_asq_t asq = { 0 }; 1740 nvme_reg_acq_t acq = { 0 }; 1741 nvme_reg_cap_t cap; 1742 nvme_reg_vs_t vs; 1743 nvme_reg_csts_t csts; 1744 int i = 0; 1745 int nqueues; 1746 char model[sizeof (nvme->n_idctl->id_model) + 1]; 1747 char *vendor, *product; 1748 1749 /* Setup fixed interrupt for admin queue. */ 1750 if (nvme_setup_interrupts(nvme, DDI_INTR_TYPE_FIXED, 1) 1751 != DDI_SUCCESS) { 1752 dev_err(nvme->n_dip, CE_WARN, 1753 "!failed to setup fixed interrupt"); 1754 goto fail; 1755 } 1756 1757 /* Check controller version */ 1758 vs.r = nvme_get32(nvme, NVME_REG_VS); 1759 dev_err(nvme->n_dip, CE_CONT, "?NVMe spec version %d.%d", 1760 vs.b.vs_mjr, vs.b.vs_mnr); 1761 1762 if (nvme_version_major < vs.b.vs_mjr || 1763 (nvme_version_major == vs.b.vs_mjr && 1764 nvme_version_minor < vs.b.vs_mnr)) { 1765 dev_err(nvme->n_dip, CE_WARN, "!no support for version > %d.%d", 1766 nvme_version_major, nvme_version_minor); 1767 if (nvme->n_strict_version) 1768 goto fail; 1769 } 1770 1771 /* retrieve controller configuration */ 1772 cap.r = nvme_get64(nvme, NVME_REG_CAP); 1773 1774 if ((cap.b.cap_css & NVME_CAP_CSS_NVM) == 0) { 1775 dev_err(nvme->n_dip, CE_WARN, 1776 "!NVM command set not supported by hardware"); 1777 goto fail; 1778 } 1779 1780 nvme->n_nssr_supported = cap.b.cap_nssrs; 1781 nvme->n_doorbell_stride = 4 << cap.b.cap_dstrd; 1782 nvme->n_timeout = cap.b.cap_to; 1783 nvme->n_arbitration_mechanisms = cap.b.cap_ams; 1784 nvme->n_cont_queues_reqd = cap.b.cap_cqr; 1785 nvme->n_max_queue_entries = cap.b.cap_mqes + 1; 1786 1787 /* 1788 * The MPSMIN and MPSMAX fields in the CAP register use 0 to specify 1789 * the base page size of 4k (1<<12), so add 12 here to get the real 1790 * page size value. 1791 */ 1792 nvme->n_pageshift = MIN(MAX(cap.b.cap_mpsmin + 12, PAGESHIFT), 1793 cap.b.cap_mpsmax + 12); 1794 nvme->n_pagesize = 1UL << (nvme->n_pageshift); 1795 1796 /* 1797 * Set up Queue DMA to transfer at least 1 page-aligned page at a time. 1798 */ 1799 nvme->n_queue_dma_attr.dma_attr_align = nvme->n_pagesize; 1800 nvme->n_queue_dma_attr.dma_attr_minxfer = nvme->n_pagesize; 1801 1802 /* 1803 * Set up PRP DMA to transfer 1 page-aligned page at a time. 1804 * Maxxfer may be increased after we identified the controller limits. 1805 */ 1806 nvme->n_prp_dma_attr.dma_attr_maxxfer = nvme->n_pagesize; 1807 nvme->n_prp_dma_attr.dma_attr_minxfer = nvme->n_pagesize; 1808 nvme->n_prp_dma_attr.dma_attr_align = nvme->n_pagesize; 1809 1810 /* 1811 * Reset controller if it's still in ready state. 1812 */ 1813 if (nvme_reset(nvme, B_FALSE) == B_FALSE) { 1814 dev_err(nvme->n_dip, CE_WARN, "!unable to reset controller"); 1815 ddi_fm_service_impact(nvme->n_dip, DDI_SERVICE_LOST); 1816 nvme->n_dead = B_TRUE; 1817 goto fail; 1818 } 1819 1820 /* 1821 * Create the admin queue pair. 1822 */ 1823 if (nvme_alloc_qpair(nvme, nvme->n_admin_queue_len, &nvme->n_adminq, 0) 1824 != DDI_SUCCESS) { 1825 dev_err(nvme->n_dip, CE_WARN, 1826 "!unable to allocate admin qpair"); 1827 goto fail; 1828 } 1829 nvme->n_ioq = kmem_alloc(sizeof (nvme_qpair_t *), KM_SLEEP); 1830 nvme->n_ioq[0] = nvme->n_adminq; 1831 1832 nvme->n_progress |= NVME_ADMIN_QUEUE; 1833 1834 (void) ddi_prop_update_int(DDI_DEV_T_NONE, nvme->n_dip, 1835 "admin-queue-len", nvme->n_admin_queue_len); 1836 1837 aqa.b.aqa_asqs = aqa.b.aqa_acqs = nvme->n_admin_queue_len - 1; 1838 asq = nvme->n_adminq->nq_sqdma->nd_cookie.dmac_laddress; 1839 acq = nvme->n_adminq->nq_cqdma->nd_cookie.dmac_laddress; 1840 1841 ASSERT((asq & (nvme->n_pagesize - 1)) == 0); 1842 ASSERT((acq & (nvme->n_pagesize - 1)) == 0); 1843 1844 nvme_put32(nvme, NVME_REG_AQA, aqa.r); 1845 nvme_put64(nvme, NVME_REG_ASQ, asq); 1846 nvme_put64(nvme, NVME_REG_ACQ, acq); 1847 1848 cc.b.cc_ams = 0; /* use Round-Robin arbitration */ 1849 cc.b.cc_css = 0; /* use NVM command set */ 1850 cc.b.cc_mps = nvme->n_pageshift - 12; 1851 cc.b.cc_shn = 0; /* no shutdown in progress */ 1852 cc.b.cc_en = 1; /* enable controller */ 1853 1854 nvme_put32(nvme, NVME_REG_CC, cc.r); 1855 1856 /* 1857 * Wait for the controller to become ready. 1858 */ 1859 csts.r = nvme_get32(nvme, NVME_REG_CSTS); 1860 if (csts.b.csts_rdy == 0) { 1861 for (i = 0; i != nvme->n_timeout * 10; i++) { 1862 delay(drv_usectohz(50000)); 1863 csts.r = nvme_get32(nvme, NVME_REG_CSTS); 1864 1865 if (csts.b.csts_cfs == 1) { 1866 dev_err(nvme->n_dip, CE_WARN, 1867 "!controller fatal status at init"); 1868 ddi_fm_service_impact(nvme->n_dip, 1869 DDI_SERVICE_LOST); 1870 nvme->n_dead = B_TRUE; 1871 goto fail; 1872 } 1873 1874 if (csts.b.csts_rdy == 1) 1875 break; 1876 } 1877 } 1878 1879 if (csts.b.csts_rdy == 0) { 1880 dev_err(nvme->n_dip, CE_WARN, "!controller not ready"); 1881 ddi_fm_service_impact(nvme->n_dip, DDI_SERVICE_LOST); 1882 nvme->n_dead = B_TRUE; 1883 goto fail; 1884 } 1885 1886 /* 1887 * Assume an abort command limit of 1. We'll destroy and re-init 1888 * that later when we know the true abort command limit. 1889 */ 1890 sema_init(&nvme->n_abort_sema, 1, NULL, SEMA_DRIVER, NULL); 1891 1892 /* 1893 * Post an asynchronous event command to catch errors. 1894 */ 1895 if (nvme_async_event(nvme) != DDI_SUCCESS) { 1896 dev_err(nvme->n_dip, CE_WARN, 1897 "!failed to post async event"); 1898 goto fail; 1899 } 1900 1901 /* 1902 * Identify Controller 1903 */ 1904 nvme->n_idctl = nvme_identify(nvme, 0); 1905 if (nvme->n_idctl == NULL) { 1906 dev_err(nvme->n_dip, CE_WARN, 1907 "!failed to identify controller"); 1908 goto fail; 1909 } 1910 1911 /* 1912 * Get Vendor & Product ID 1913 */ 1914 bcopy(nvme->n_idctl->id_model, model, sizeof (nvme->n_idctl->id_model)); 1915 model[sizeof (nvme->n_idctl->id_model)] = '\0'; 1916 sata_split_model(model, &vendor, &product); 1917 1918 if (vendor == NULL) 1919 nvme->n_vendor = strdup("NVMe"); 1920 else 1921 nvme->n_vendor = strdup(vendor); 1922 1923 nvme->n_product = strdup(product); 1924 1925 /* 1926 * Get controller limits. 1927 */ 1928 nvme->n_async_event_limit = MAX(NVME_MIN_ASYNC_EVENT_LIMIT, 1929 MIN(nvme->n_admin_queue_len / 10, 1930 MIN(nvme->n_idctl->id_aerl + 1, nvme->n_async_event_limit))); 1931 1932 (void) ddi_prop_update_int(DDI_DEV_T_NONE, nvme->n_dip, 1933 "async-event-limit", nvme->n_async_event_limit); 1934 1935 nvme->n_abort_command_limit = nvme->n_idctl->id_acl + 1; 1936 1937 /* 1938 * Reinitialize the semaphore with the true abort command limit 1939 * supported by the hardware. It's not necessary to disable interrupts 1940 * as only command aborts use the semaphore, and no commands are 1941 * executed or aborted while we're here. 1942 */ 1943 sema_destroy(&nvme->n_abort_sema); 1944 sema_init(&nvme->n_abort_sema, nvme->n_abort_command_limit - 1, NULL, 1945 SEMA_DRIVER, NULL); 1946 1947 nvme->n_progress |= NVME_CTRL_LIMITS; 1948 1949 if (nvme->n_idctl->id_mdts == 0) 1950 nvme->n_max_data_transfer_size = nvme->n_pagesize * 65536; 1951 else 1952 nvme->n_max_data_transfer_size = 1953 1ull << (nvme->n_pageshift + nvme->n_idctl->id_mdts); 1954 1955 nvme->n_error_log_len = nvme->n_idctl->id_elpe + 1; 1956 1957 /* 1958 * Limit n_max_data_transfer_size to what we can handle in one PRP. 1959 * Chained PRPs are currently unsupported. 1960 * 1961 * This is a no-op on hardware which doesn't support a transfer size 1962 * big enough to require chained PRPs. 1963 */ 1964 nvme->n_max_data_transfer_size = MIN(nvme->n_max_data_transfer_size, 1965 (nvme->n_pagesize / sizeof (uint64_t) * nvme->n_pagesize)); 1966 1967 nvme->n_prp_dma_attr.dma_attr_maxxfer = nvme->n_max_data_transfer_size; 1968 1969 /* 1970 * Make sure the minimum/maximum queue entry sizes are not 1971 * larger/smaller than the default. 1972 */ 1973 1974 if (((1 << nvme->n_idctl->id_sqes.qes_min) > sizeof (nvme_sqe_t)) || 1975 ((1 << nvme->n_idctl->id_sqes.qes_max) < sizeof (nvme_sqe_t)) || 1976 ((1 << nvme->n_idctl->id_cqes.qes_min) > sizeof (nvme_cqe_t)) || 1977 ((1 << nvme->n_idctl->id_cqes.qes_max) < sizeof (nvme_cqe_t))) 1978 goto fail; 1979 1980 /* 1981 * Check for the presence of a Volatile Write Cache. If present, 1982 * enable it by default. 1983 */ 1984 if (nvme->n_idctl->id_vwc.vwc_present == 0) { 1985 nvme->n_volatile_write_cache_enabled = B_FALSE; 1986 nvme_bd_ops.o_sync_cache = NULL; 1987 } else { 1988 /* 1989 * TODO: send SET FEATURES to enable VWC 1990 * (have no hardware to test this) 1991 */ 1992 nvme->n_volatile_write_cache_enabled = B_FALSE; 1993 nvme_bd_ops.o_sync_cache = NULL; 1994 } 1995 1996 /* 1997 * Grab a copy of all mandatory log pages. 1998 * 1999 * TODO: should go away once user space tool exists to print logs 2000 */ 2001 nvme->n_error_log = (nvme_error_log_entry_t *) 2002 nvme_get_logpage(nvme, NVME_LOGPAGE_ERROR); 2003 nvme->n_health_log = (nvme_health_log_t *) 2004 nvme_get_logpage(nvme, NVME_LOGPAGE_HEALTH, -1); 2005 nvme->n_fwslot_log = (nvme_fwslot_log_t *) 2006 nvme_get_logpage(nvme, NVME_LOGPAGE_FWSLOT); 2007 2008 /* 2009 * Identify Namespaces 2010 */ 2011 nvme->n_namespace_count = nvme->n_idctl->id_nn; 2012 nvme->n_ns = kmem_zalloc(sizeof (nvme_namespace_t) * 2013 nvme->n_namespace_count, KM_SLEEP); 2014 2015 for (i = 0; i != nvme->n_namespace_count; i++) { 2016 nvme_identify_nsid_t *idns; 2017 int last_rp; 2018 2019 nvme->n_ns[i].ns_nvme = nvme; 2020 nvme->n_ns[i].ns_idns = idns = nvme_identify(nvme, i + 1); 2021 2022 if (idns == NULL) { 2023 dev_err(nvme->n_dip, CE_WARN, 2024 "!failed to identify namespace %d", i + 1); 2025 goto fail; 2026 } 2027 2028 nvme->n_ns[i].ns_id = i + 1; 2029 nvme->n_ns[i].ns_block_count = idns->id_nsize; 2030 nvme->n_ns[i].ns_block_size = 2031 1 << idns->id_lbaf[idns->id_flbas.lba_format].lbaf_lbads; 2032 nvme->n_ns[i].ns_best_block_size = nvme->n_ns[i].ns_block_size; 2033 2034 nvme_prepare_devid(nvme, nvme->n_ns[i].ns_id); 2035 2036 /* 2037 * Find the LBA format with no metadata and the best relative 2038 * performance. A value of 3 means "degraded", 0 is best. 2039 */ 2040 last_rp = 3; 2041 for (int j = 0; j != idns->id_nlbaf; j++) { 2042 if (idns->id_lbaf[j].lbaf_lbads == 0) 2043 break; 2044 if (idns->id_lbaf[j].lbaf_ms != 0) 2045 continue; 2046 if (idns->id_lbaf[j].lbaf_rp >= last_rp) 2047 continue; 2048 last_rp = idns->id_lbaf[j].lbaf_rp; 2049 nvme->n_ns[i].ns_best_block_size = 2050 1 << idns->id_lbaf[j].lbaf_lbads; 2051 } 2052 2053 /* 2054 * We currently don't support namespaces that use either: 2055 * - thin provisioning 2056 * - extended LBAs 2057 * - protection information 2058 */ 2059 if (idns->id_nsfeat.f_thin || 2060 idns->id_flbas.lba_extlba || 2061 idns->id_dps.dp_pinfo) { 2062 dev_err(nvme->n_dip, CE_WARN, 2063 "!ignoring namespace %d, unsupported features: " 2064 "thin = %d, extlba = %d, pinfo = %d", i + 1, 2065 idns->id_nsfeat.f_thin, idns->id_flbas.lba_extlba, 2066 idns->id_dps.dp_pinfo); 2067 nvme->n_ns[i].ns_ignore = B_TRUE; 2068 } 2069 } 2070 2071 /* 2072 * Try to set up MSI/MSI-X interrupts. 2073 */ 2074 if ((nvme->n_intr_types & (DDI_INTR_TYPE_MSI | DDI_INTR_TYPE_MSIX)) 2075 != 0) { 2076 nvme_release_interrupts(nvme); 2077 2078 nqueues = MIN(UINT16_MAX, ncpus); 2079 2080 if ((nvme_setup_interrupts(nvme, DDI_INTR_TYPE_MSIX, 2081 nqueues) != DDI_SUCCESS) && 2082 (nvme_setup_interrupts(nvme, DDI_INTR_TYPE_MSI, 2083 nqueues) != DDI_SUCCESS)) { 2084 dev_err(nvme->n_dip, CE_WARN, 2085 "!failed to setup MSI/MSI-X interrupts"); 2086 goto fail; 2087 } 2088 } 2089 2090 nqueues = nvme->n_intr_cnt; 2091 2092 /* 2093 * Create I/O queue pairs. 2094 */ 2095 nvme->n_ioq_count = nvme_set_nqueues(nvme, nqueues); 2096 if (nvme->n_ioq_count == 0) { 2097 dev_err(nvme->n_dip, CE_WARN, 2098 "!failed to set number of I/O queues to %d", nqueues); 2099 goto fail; 2100 } 2101 2102 /* 2103 * Reallocate I/O queue array 2104 */ 2105 kmem_free(nvme->n_ioq, sizeof (nvme_qpair_t *)); 2106 nvme->n_ioq = kmem_zalloc(sizeof (nvme_qpair_t *) * 2107 (nvme->n_ioq_count + 1), KM_SLEEP); 2108 nvme->n_ioq[0] = nvme->n_adminq; 2109 2110 /* 2111 * If we got less queues than we asked for we might as well give 2112 * some of the interrupt vectors back to the system. 2113 */ 2114 if (nvme->n_ioq_count < nqueues) { 2115 nvme_release_interrupts(nvme); 2116 2117 if (nvme_setup_interrupts(nvme, nvme->n_intr_type, nqueues) 2118 != DDI_SUCCESS) { 2119 dev_err(nvme->n_dip, CE_WARN, 2120 "!failed to reduce number of interrupts"); 2121 goto fail; 2122 } 2123 } 2124 2125 /* 2126 * Alloc & register I/O queue pairs 2127 */ 2128 nvme->n_io_queue_len = 2129 MIN(nvme->n_io_queue_len, nvme->n_max_queue_entries); 2130 (void) ddi_prop_update_int(DDI_DEV_T_NONE, nvme->n_dip, "io-queue-len", 2131 nvme->n_io_queue_len); 2132 2133 for (i = 1; i != nvme->n_ioq_count + 1; i++) { 2134 if (nvme_alloc_qpair(nvme, nvme->n_io_queue_len, 2135 &nvme->n_ioq[i], i) != DDI_SUCCESS) { 2136 dev_err(nvme->n_dip, CE_WARN, 2137 "!unable to allocate I/O qpair %d", i); 2138 goto fail; 2139 } 2140 2141 if (nvme_create_io_qpair(nvme, nvme->n_ioq[i], i) 2142 != DDI_SUCCESS) { 2143 dev_err(nvme->n_dip, CE_WARN, 2144 "!unable to create I/O qpair %d", i); 2145 goto fail; 2146 } 2147 } 2148 2149 /* 2150 * Post more asynchronous events commands to reduce event reporting 2151 * latency as suggested by the spec. 2152 */ 2153 for (i = 1; i != nvme->n_async_event_limit; i++) { 2154 if (nvme_async_event(nvme) != DDI_SUCCESS) { 2155 dev_err(nvme->n_dip, CE_WARN, 2156 "!failed to post async event %d", i); 2157 goto fail; 2158 } 2159 } 2160 2161 return (DDI_SUCCESS); 2162 2163 fail: 2164 (void) nvme_reset(nvme, B_FALSE); 2165 return (DDI_FAILURE); 2166 } 2167 2168 static uint_t 2169 nvme_intr(caddr_t arg1, caddr_t arg2) 2170 { 2171 /*LINTED: E_PTR_BAD_CAST_ALIGN*/ 2172 nvme_t *nvme = (nvme_t *)arg1; 2173 int inum = (int)(uintptr_t)arg2; 2174 int qnum; 2175 nvme_cmd_t *cmd; 2176 2177 if (inum >= nvme->n_intr_cnt) 2178 return (DDI_INTR_UNCLAIMED); 2179 2180 /* 2181 * The interrupt vector a queue uses is calculated as queue_idx % 2182 * intr_cnt in nvme_create_io_qpair(). Iterate through the queue array 2183 * in steps of n_intr_cnt to process all queues using this vector. 2184 */ 2185 for (qnum = inum; 2186 qnum < nvme->n_ioq_count + 1 && nvme->n_ioq[qnum] != NULL; 2187 qnum += nvme->n_intr_cnt) { 2188 while ((cmd = nvme_retrieve_cmd(nvme, nvme->n_ioq[qnum]))) { 2189 taskq_dispatch_ent((taskq_t *)cmd->nc_nvme->n_cmd_taskq, 2190 cmd->nc_callback, cmd, TQ_NOSLEEP, &cmd->nc_tqent); 2191 } 2192 } 2193 2194 return (DDI_INTR_CLAIMED); 2195 } 2196 2197 static void 2198 nvme_release_interrupts(nvme_t *nvme) 2199 { 2200 int i; 2201 2202 for (i = 0; i < nvme->n_intr_cnt; i++) { 2203 if (nvme->n_inth[i] == NULL) 2204 break; 2205 2206 if (nvme->n_intr_cap & DDI_INTR_FLAG_BLOCK) 2207 (void) ddi_intr_block_disable(&nvme->n_inth[i], 1); 2208 else 2209 (void) ddi_intr_disable(nvme->n_inth[i]); 2210 2211 (void) ddi_intr_remove_handler(nvme->n_inth[i]); 2212 (void) ddi_intr_free(nvme->n_inth[i]); 2213 } 2214 2215 kmem_free(nvme->n_inth, nvme->n_inth_sz); 2216 nvme->n_inth = NULL; 2217 nvme->n_inth_sz = 0; 2218 2219 nvme->n_progress &= ~NVME_INTERRUPTS; 2220 } 2221 2222 static int 2223 nvme_setup_interrupts(nvme_t *nvme, int intr_type, int nqpairs) 2224 { 2225 int failed = 0; 2226 int nintrs, navail, count; 2227 int ret; 2228 int i; 2229 2230 if (nvme->n_intr_types == 0) { 2231 ret = ddi_intr_get_supported_types(nvme->n_dip, 2232 &nvme->n_intr_types); 2233 if (ret != DDI_SUCCESS) { 2234 dev_err(nvme->n_dip, CE_WARN, 2235 "!%s: ddi_intr_get_supported types failed", 2236 __func__); 2237 return (ret); 2238 } 2239 } 2240 2241 if ((nvme->n_intr_types & intr_type) == 0) 2242 return (DDI_FAILURE); 2243 2244 ret = ddi_intr_get_nintrs(nvme->n_dip, intr_type, &nintrs); 2245 if (ret != DDI_SUCCESS) { 2246 dev_err(nvme->n_dip, CE_WARN, "!%s: ddi_intr_get_nintrs failed", 2247 __func__); 2248 return (ret); 2249 } 2250 2251 ret = ddi_intr_get_navail(nvme->n_dip, intr_type, &navail); 2252 if (ret != DDI_SUCCESS) { 2253 dev_err(nvme->n_dip, CE_WARN, "!%s: ddi_intr_get_navail failed", 2254 __func__); 2255 return (ret); 2256 } 2257 2258 /* We want at most one interrupt per queue pair. */ 2259 if (navail > nqpairs) 2260 navail = nqpairs; 2261 2262 nvme->n_inth_sz = sizeof (ddi_intr_handle_t) * navail; 2263 nvme->n_inth = kmem_zalloc(nvme->n_inth_sz, KM_SLEEP); 2264 2265 ret = ddi_intr_alloc(nvme->n_dip, nvme->n_inth, intr_type, 0, navail, 2266 &count, 0); 2267 if (ret != DDI_SUCCESS) { 2268 dev_err(nvme->n_dip, CE_WARN, "!%s: ddi_intr_alloc failed", 2269 __func__); 2270 goto fail; 2271 } 2272 2273 nvme->n_intr_cnt = count; 2274 2275 ret = ddi_intr_get_pri(nvme->n_inth[0], &nvme->n_intr_pri); 2276 if (ret != DDI_SUCCESS) { 2277 dev_err(nvme->n_dip, CE_WARN, "!%s: ddi_intr_get_pri failed", 2278 __func__); 2279 goto fail; 2280 } 2281 2282 for (i = 0; i < count; i++) { 2283 ret = ddi_intr_add_handler(nvme->n_inth[i], nvme_intr, 2284 (void *)nvme, (void *)(uintptr_t)i); 2285 if (ret != DDI_SUCCESS) { 2286 dev_err(nvme->n_dip, CE_WARN, 2287 "!%s: ddi_intr_add_handler failed", __func__); 2288 goto fail; 2289 } 2290 } 2291 2292 (void) ddi_intr_get_cap(nvme->n_inth[0], &nvme->n_intr_cap); 2293 2294 for (i = 0; i < count; i++) { 2295 if (nvme->n_inth[i] == NULL) 2296 break; 2297 2298 if (nvme->n_intr_cap & DDI_INTR_FLAG_BLOCK) { 2299 if (ddi_intr_block_enable(&nvme->n_inth[i], 1) != 2300 DDI_SUCCESS) 2301 failed++; 2302 } else { 2303 if (ddi_intr_enable(nvme->n_inth[i]) != DDI_SUCCESS) 2304 failed++; 2305 } 2306 } 2307 2308 if (failed != 0) { 2309 dev_err(nvme->n_dip, CE_WARN, 2310 "!%s: enabling interrupts failed", __func__); 2311 goto fail; 2312 } 2313 2314 nvme->n_intr_type = intr_type; 2315 2316 nvme->n_progress |= NVME_INTERRUPTS; 2317 2318 return (DDI_SUCCESS); 2319 2320 fail: 2321 nvme_release_interrupts(nvme); 2322 2323 return (ret); 2324 } 2325 2326 static int 2327 nvme_fm_errcb(dev_info_t *dip, ddi_fm_error_t *fm_error, const void *arg) 2328 { 2329 _NOTE(ARGUNUSED(arg)); 2330 2331 pci_ereport_post(dip, fm_error, NULL); 2332 return (fm_error->fme_status); 2333 } 2334 2335 static int 2336 nvme_attach(dev_info_t *dip, ddi_attach_cmd_t cmd) 2337 { 2338 nvme_t *nvme; 2339 int instance; 2340 int nregs; 2341 off_t regsize; 2342 int i; 2343 char name[32]; 2344 2345 if (cmd != DDI_ATTACH) 2346 return (DDI_FAILURE); 2347 2348 instance = ddi_get_instance(dip); 2349 2350 if (ddi_soft_state_zalloc(nvme_state, instance) != DDI_SUCCESS) 2351 return (DDI_FAILURE); 2352 2353 nvme = ddi_get_soft_state(nvme_state, instance); 2354 ddi_set_driver_private(dip, nvme); 2355 nvme->n_dip = dip; 2356 2357 nvme->n_strict_version = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 2358 DDI_PROP_DONTPASS, "strict-version", 1) == 1 ? B_TRUE : B_FALSE; 2359 nvme->n_ignore_unknown_vendor_status = ddi_prop_get_int(DDI_DEV_T_ANY, 2360 dip, DDI_PROP_DONTPASS, "ignore-unknown-vendor-status", 0) == 1 ? 2361 B_TRUE : B_FALSE; 2362 nvme->n_admin_queue_len = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 2363 DDI_PROP_DONTPASS, "admin-queue-len", NVME_DEFAULT_ADMIN_QUEUE_LEN); 2364 nvme->n_io_queue_len = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 2365 DDI_PROP_DONTPASS, "io-queue-len", NVME_DEFAULT_IO_QUEUE_LEN); 2366 nvme->n_async_event_limit = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 2367 DDI_PROP_DONTPASS, "async-event-limit", 2368 NVME_DEFAULT_ASYNC_EVENT_LIMIT); 2369 2370 if (nvme->n_admin_queue_len < NVME_MIN_ADMIN_QUEUE_LEN) 2371 nvme->n_admin_queue_len = NVME_MIN_ADMIN_QUEUE_LEN; 2372 else if (nvme->n_admin_queue_len > NVME_MAX_ADMIN_QUEUE_LEN) 2373 nvme->n_admin_queue_len = NVME_MAX_ADMIN_QUEUE_LEN; 2374 2375 if (nvme->n_io_queue_len < NVME_MIN_IO_QUEUE_LEN) 2376 nvme->n_io_queue_len = NVME_MIN_IO_QUEUE_LEN; 2377 2378 if (nvme->n_async_event_limit < 1) 2379 nvme->n_async_event_limit = NVME_DEFAULT_ASYNC_EVENT_LIMIT; 2380 2381 nvme->n_reg_acc_attr = nvme_reg_acc_attr; 2382 nvme->n_queue_dma_attr = nvme_queue_dma_attr; 2383 nvme->n_prp_dma_attr = nvme_prp_dma_attr; 2384 nvme->n_sgl_dma_attr = nvme_sgl_dma_attr; 2385 2386 /* 2387 * Setup FMA support. 2388 */ 2389 nvme->n_fm_cap = ddi_getprop(DDI_DEV_T_ANY, dip, 2390 DDI_PROP_CANSLEEP | DDI_PROP_DONTPASS, "fm-capable", 2391 DDI_FM_EREPORT_CAPABLE | DDI_FM_ACCCHK_CAPABLE | 2392 DDI_FM_DMACHK_CAPABLE | DDI_FM_ERRCB_CAPABLE); 2393 2394 ddi_fm_init(dip, &nvme->n_fm_cap, &nvme->n_fm_ibc); 2395 2396 if (nvme->n_fm_cap) { 2397 if (nvme->n_fm_cap & DDI_FM_ACCCHK_CAPABLE) 2398 nvme->n_reg_acc_attr.devacc_attr_access = 2399 DDI_FLAGERR_ACC; 2400 2401 if (nvme->n_fm_cap & DDI_FM_DMACHK_CAPABLE) { 2402 nvme->n_prp_dma_attr.dma_attr_flags |= DDI_DMA_FLAGERR; 2403 nvme->n_sgl_dma_attr.dma_attr_flags |= DDI_DMA_FLAGERR; 2404 } 2405 2406 if (DDI_FM_EREPORT_CAP(nvme->n_fm_cap) || 2407 DDI_FM_ERRCB_CAP(nvme->n_fm_cap)) 2408 pci_ereport_setup(dip); 2409 2410 if (DDI_FM_ERRCB_CAP(nvme->n_fm_cap)) 2411 ddi_fm_handler_register(dip, nvme_fm_errcb, 2412 (void *)nvme); 2413 } 2414 2415 nvme->n_progress |= NVME_FMA_INIT; 2416 2417 /* 2418 * The spec defines several register sets. Only the controller 2419 * registers (set 1) are currently used. 2420 */ 2421 if (ddi_dev_nregs(dip, &nregs) == DDI_FAILURE || 2422 nregs < 2 || 2423 ddi_dev_regsize(dip, 1, ®size) == DDI_FAILURE) 2424 goto fail; 2425 2426 if (ddi_regs_map_setup(dip, 1, &nvme->n_regs, 0, regsize, 2427 &nvme->n_reg_acc_attr, &nvme->n_regh) != DDI_SUCCESS) { 2428 dev_err(dip, CE_WARN, "!failed to map regset 1"); 2429 goto fail; 2430 } 2431 2432 nvme->n_progress |= NVME_REGS_MAPPED; 2433 2434 /* 2435 * Create taskq for command completion. 2436 */ 2437 (void) snprintf(name, sizeof (name), "%s%d_cmd_taskq", 2438 ddi_driver_name(dip), ddi_get_instance(dip)); 2439 nvme->n_cmd_taskq = ddi_taskq_create(dip, name, MIN(UINT16_MAX, ncpus), 2440 TASKQ_DEFAULTPRI, 0); 2441 if (nvme->n_cmd_taskq == NULL) { 2442 dev_err(dip, CE_WARN, "!failed to create cmd taskq"); 2443 goto fail; 2444 } 2445 2446 2447 if (nvme_init(nvme) != DDI_SUCCESS) 2448 goto fail; 2449 2450 /* 2451 * Attach the blkdev driver for each namespace. 2452 */ 2453 for (i = 0; i != nvme->n_namespace_count; i++) { 2454 if (nvme->n_ns[i].ns_ignore) 2455 continue; 2456 2457 nvme->n_ns[i].ns_bd_hdl = bd_alloc_handle(&nvme->n_ns[i], 2458 &nvme_bd_ops, &nvme->n_prp_dma_attr, KM_SLEEP); 2459 2460 if (nvme->n_ns[i].ns_bd_hdl == NULL) { 2461 dev_err(dip, CE_WARN, 2462 "!failed to get blkdev handle for namespace %d", i); 2463 goto fail; 2464 } 2465 2466 if (bd_attach_handle(dip, nvme->n_ns[i].ns_bd_hdl) 2467 != DDI_SUCCESS) { 2468 dev_err(dip, CE_WARN, 2469 "!failed to attach blkdev handle for namespace %d", 2470 i); 2471 goto fail; 2472 } 2473 } 2474 2475 return (DDI_SUCCESS); 2476 2477 fail: 2478 /* attach successful anyway so that FMA can retire the device */ 2479 if (nvme->n_dead) 2480 return (DDI_SUCCESS); 2481 2482 (void) nvme_detach(dip, DDI_DETACH); 2483 2484 return (DDI_FAILURE); 2485 } 2486 2487 static int 2488 nvme_detach(dev_info_t *dip, ddi_detach_cmd_t cmd) 2489 { 2490 int instance, i; 2491 nvme_t *nvme; 2492 2493 if (cmd != DDI_DETACH) 2494 return (DDI_FAILURE); 2495 2496 instance = ddi_get_instance(dip); 2497 2498 nvme = ddi_get_soft_state(nvme_state, instance); 2499 2500 if (nvme == NULL) 2501 return (DDI_FAILURE); 2502 2503 if (nvme->n_ns) { 2504 for (i = 0; i != nvme->n_namespace_count; i++) { 2505 if (nvme->n_ns[i].ns_bd_hdl) { 2506 (void) bd_detach_handle( 2507 nvme->n_ns[i].ns_bd_hdl); 2508 bd_free_handle(nvme->n_ns[i].ns_bd_hdl); 2509 } 2510 2511 if (nvme->n_ns[i].ns_idns) 2512 kmem_free(nvme->n_ns[i].ns_idns, 2513 sizeof (nvme_identify_nsid_t)); 2514 } 2515 2516 kmem_free(nvme->n_ns, sizeof (nvme_namespace_t) * 2517 nvme->n_namespace_count); 2518 } 2519 2520 if (nvme->n_progress & NVME_INTERRUPTS) 2521 nvme_release_interrupts(nvme); 2522 2523 if (nvme->n_cmd_taskq) 2524 ddi_taskq_wait(nvme->n_cmd_taskq); 2525 2526 if (nvme->n_ioq_count > 0) { 2527 for (i = 1; i != nvme->n_ioq_count + 1; i++) { 2528 if (nvme->n_ioq[i] != NULL) { 2529 /* TODO: send destroy queue commands */ 2530 nvme_free_qpair(nvme->n_ioq[i]); 2531 } 2532 } 2533 2534 kmem_free(nvme->n_ioq, sizeof (nvme_qpair_t *) * 2535 (nvme->n_ioq_count + 1)); 2536 } 2537 2538 if (nvme->n_progress & NVME_REGS_MAPPED) { 2539 nvme_shutdown(nvme, NVME_CC_SHN_NORMAL, B_FALSE); 2540 (void) nvme_reset(nvme, B_FALSE); 2541 } 2542 2543 if (nvme->n_cmd_taskq) 2544 ddi_taskq_destroy(nvme->n_cmd_taskq); 2545 2546 if (nvme->n_progress & NVME_CTRL_LIMITS) 2547 sema_destroy(&nvme->n_abort_sema); 2548 2549 if (nvme->n_progress & NVME_ADMIN_QUEUE) 2550 nvme_free_qpair(nvme->n_adminq); 2551 2552 if (nvme->n_idctl) 2553 kmem_free(nvme->n_idctl, sizeof (nvme_identify_ctrl_t)); 2554 2555 if (nvme->n_progress & NVME_REGS_MAPPED) 2556 ddi_regs_map_free(&nvme->n_regh); 2557 2558 if (nvme->n_progress & NVME_FMA_INIT) { 2559 if (DDI_FM_ERRCB_CAP(nvme->n_fm_cap)) 2560 ddi_fm_handler_unregister(nvme->n_dip); 2561 2562 if (DDI_FM_EREPORT_CAP(nvme->n_fm_cap) || 2563 DDI_FM_ERRCB_CAP(nvme->n_fm_cap)) 2564 pci_ereport_teardown(nvme->n_dip); 2565 2566 ddi_fm_fini(nvme->n_dip); 2567 } 2568 2569 if (nvme->n_vendor != NULL) 2570 strfree(nvme->n_vendor); 2571 2572 if (nvme->n_product != NULL) 2573 strfree(nvme->n_product); 2574 2575 ddi_soft_state_free(nvme_state, instance); 2576 2577 return (DDI_SUCCESS); 2578 } 2579 2580 static int 2581 nvme_quiesce(dev_info_t *dip) 2582 { 2583 int instance; 2584 nvme_t *nvme; 2585 2586 instance = ddi_get_instance(dip); 2587 2588 nvme = ddi_get_soft_state(nvme_state, instance); 2589 2590 if (nvme == NULL) 2591 return (DDI_FAILURE); 2592 2593 nvme_shutdown(nvme, NVME_CC_SHN_ABRUPT, B_TRUE); 2594 2595 (void) nvme_reset(nvme, B_TRUE); 2596 2597 return (DDI_FAILURE); 2598 } 2599 2600 static int 2601 nvme_fill_prp(nvme_cmd_t *cmd, bd_xfer_t *xfer) 2602 { 2603 nvme_t *nvme = cmd->nc_nvme; 2604 int nprp_page, nprp; 2605 uint64_t *prp; 2606 2607 if (xfer->x_ndmac == 0) 2608 return (DDI_FAILURE); 2609 2610 cmd->nc_sqe.sqe_dptr.d_prp[0] = xfer->x_dmac.dmac_laddress; 2611 ddi_dma_nextcookie(xfer->x_dmah, &xfer->x_dmac); 2612 2613 if (xfer->x_ndmac == 1) { 2614 cmd->nc_sqe.sqe_dptr.d_prp[1] = 0; 2615 return (DDI_SUCCESS); 2616 } else if (xfer->x_ndmac == 2) { 2617 cmd->nc_sqe.sqe_dptr.d_prp[1] = xfer->x_dmac.dmac_laddress; 2618 return (DDI_SUCCESS); 2619 } 2620 2621 xfer->x_ndmac--; 2622 2623 nprp_page = nvme->n_pagesize / sizeof (uint64_t) - 1; 2624 ASSERT(nprp_page > 0); 2625 nprp = (xfer->x_ndmac + nprp_page - 1) / nprp_page; 2626 2627 /* 2628 * We currently don't support chained PRPs and set up our DMA 2629 * attributes to reflect that. If we still get an I/O request 2630 * that needs a chained PRP something is very wrong. 2631 */ 2632 VERIFY(nprp == 1); 2633 2634 if (nvme_zalloc_dma(nvme, nvme->n_pagesize * nprp, DDI_DMA_READ, 2635 &nvme->n_prp_dma_attr, &cmd->nc_dma) != DDI_SUCCESS) { 2636 dev_err(nvme->n_dip, CE_WARN, "!%s: nvme_zalloc_dma failed", 2637 __func__); 2638 return (DDI_FAILURE); 2639 } 2640 2641 cmd->nc_sqe.sqe_dptr.d_prp[1] = cmd->nc_dma->nd_cookie.dmac_laddress; 2642 ddi_dma_nextcookie(cmd->nc_dma->nd_dmah, &cmd->nc_dma->nd_cookie); 2643 2644 /*LINTED: E_PTR_BAD_CAST_ALIGN*/ 2645 for (prp = (uint64_t *)cmd->nc_dma->nd_memp; 2646 xfer->x_ndmac > 0; 2647 prp++, xfer->x_ndmac--) { 2648 *prp = xfer->x_dmac.dmac_laddress; 2649 ddi_dma_nextcookie(xfer->x_dmah, &xfer->x_dmac); 2650 } 2651 2652 (void) ddi_dma_sync(cmd->nc_dma->nd_dmah, 0, cmd->nc_dma->nd_len, 2653 DDI_DMA_SYNC_FORDEV); 2654 return (DDI_SUCCESS); 2655 } 2656 2657 static nvme_cmd_t * 2658 nvme_create_nvm_cmd(nvme_namespace_t *ns, uint8_t opc, bd_xfer_t *xfer) 2659 { 2660 nvme_t *nvme = ns->ns_nvme; 2661 nvme_cmd_t *cmd; 2662 2663 /* 2664 * Blkdev only sets BD_XFER_POLL when dumping, so don't sleep. 2665 */ 2666 cmd = nvme_alloc_cmd(nvme, (xfer->x_flags & BD_XFER_POLL) ? 2667 KM_NOSLEEP : KM_SLEEP); 2668 2669 if (cmd == NULL) 2670 return (NULL); 2671 2672 cmd->nc_sqe.sqe_opc = opc; 2673 cmd->nc_callback = nvme_bd_xfer_done; 2674 cmd->nc_xfer = xfer; 2675 2676 switch (opc) { 2677 case NVME_OPC_NVM_WRITE: 2678 case NVME_OPC_NVM_READ: 2679 VERIFY(xfer->x_nblks <= 0x10000); 2680 2681 cmd->nc_sqe.sqe_nsid = ns->ns_id; 2682 2683 cmd->nc_sqe.sqe_cdw10 = xfer->x_blkno & 0xffffffffu; 2684 cmd->nc_sqe.sqe_cdw11 = (xfer->x_blkno >> 32); 2685 cmd->nc_sqe.sqe_cdw12 = (uint16_t)(xfer->x_nblks - 1); 2686 2687 if (nvme_fill_prp(cmd, xfer) != DDI_SUCCESS) 2688 goto fail; 2689 break; 2690 2691 case NVME_OPC_NVM_FLUSH: 2692 cmd->nc_sqe.sqe_nsid = ns->ns_id; 2693 break; 2694 2695 default: 2696 goto fail; 2697 } 2698 2699 return (cmd); 2700 2701 fail: 2702 nvme_free_cmd(cmd); 2703 return (NULL); 2704 } 2705 2706 static void 2707 nvme_bd_xfer_done(void *arg) 2708 { 2709 nvme_cmd_t *cmd = arg; 2710 bd_xfer_t *xfer = cmd->nc_xfer; 2711 int error = 0; 2712 2713 error = nvme_check_cmd_status(cmd); 2714 nvme_free_cmd(cmd); 2715 2716 bd_xfer_done(xfer, error); 2717 } 2718 2719 static void 2720 nvme_bd_driveinfo(void *arg, bd_drive_t *drive) 2721 { 2722 nvme_namespace_t *ns = arg; 2723 nvme_t *nvme = ns->ns_nvme; 2724 2725 /* 2726 * blkdev maintains one queue size per instance (namespace), 2727 * but all namespace share the I/O queues. 2728 * TODO: need to figure out a sane default, or use per-NS I/O queues, 2729 * or change blkdev to handle EAGAIN 2730 */ 2731 drive->d_qsize = nvme->n_ioq_count * nvme->n_io_queue_len 2732 / nvme->n_namespace_count; 2733 2734 /* 2735 * d_maxxfer is not set, which means the value is taken from the DMA 2736 * attributes specified to bd_alloc_handle. 2737 */ 2738 2739 drive->d_removable = B_FALSE; 2740 drive->d_hotpluggable = B_FALSE; 2741 2742 drive->d_target = ns->ns_id; 2743 drive->d_lun = 0; 2744 2745 drive->d_model = nvme->n_idctl->id_model; 2746 drive->d_model_len = sizeof (nvme->n_idctl->id_model); 2747 drive->d_vendor = nvme->n_vendor; 2748 drive->d_vendor_len = strlen(nvme->n_vendor); 2749 drive->d_product = nvme->n_product; 2750 drive->d_product_len = strlen(nvme->n_product); 2751 drive->d_serial = nvme->n_idctl->id_serial; 2752 drive->d_serial_len = sizeof (nvme->n_idctl->id_serial); 2753 drive->d_revision = nvme->n_idctl->id_fwrev; 2754 drive->d_revision_len = sizeof (nvme->n_idctl->id_fwrev); 2755 } 2756 2757 static int 2758 nvme_bd_mediainfo(void *arg, bd_media_t *media) 2759 { 2760 nvme_namespace_t *ns = arg; 2761 2762 media->m_nblks = ns->ns_block_count; 2763 media->m_blksize = ns->ns_block_size; 2764 media->m_readonly = B_FALSE; 2765 media->m_solidstate = B_TRUE; 2766 2767 media->m_pblksize = ns->ns_best_block_size; 2768 2769 return (0); 2770 } 2771 2772 static int 2773 nvme_bd_cmd(nvme_namespace_t *ns, bd_xfer_t *xfer, uint8_t opc) 2774 { 2775 nvme_t *nvme = ns->ns_nvme; 2776 nvme_cmd_t *cmd; 2777 2778 if (nvme->n_dead) 2779 return (EIO); 2780 2781 /* No polling for now */ 2782 if (xfer->x_flags & BD_XFER_POLL) 2783 return (EIO); 2784 2785 cmd = nvme_create_nvm_cmd(ns, opc, xfer); 2786 if (cmd == NULL) 2787 return (ENOMEM); 2788 2789 cmd->nc_sqid = (CPU->cpu_id % nvme->n_ioq_count) + 1; 2790 ASSERT(cmd->nc_sqid <= nvme->n_ioq_count); 2791 2792 if (nvme_submit_cmd(nvme->n_ioq[cmd->nc_sqid], cmd) 2793 != DDI_SUCCESS) 2794 return (EAGAIN); 2795 2796 return (0); 2797 } 2798 2799 static int 2800 nvme_bd_read(void *arg, bd_xfer_t *xfer) 2801 { 2802 nvme_namespace_t *ns = arg; 2803 2804 return (nvme_bd_cmd(ns, xfer, NVME_OPC_NVM_READ)); 2805 } 2806 2807 static int 2808 nvme_bd_write(void *arg, bd_xfer_t *xfer) 2809 { 2810 nvme_namespace_t *ns = arg; 2811 2812 return (nvme_bd_cmd(ns, xfer, NVME_OPC_NVM_WRITE)); 2813 } 2814 2815 static int 2816 nvme_bd_sync(void *arg, bd_xfer_t *xfer) 2817 { 2818 nvme_namespace_t *ns = arg; 2819 2820 if (ns->ns_nvme->n_dead) 2821 return (EIO); 2822 2823 /* 2824 * If the volatile write cache isn't enabled the FLUSH command is a 2825 * no-op, so we can take a shortcut here. 2826 */ 2827 if (ns->ns_nvme->n_volatile_write_cache_enabled == B_FALSE) { 2828 bd_xfer_done(xfer, ENOTSUP); 2829 return (0); 2830 } 2831 2832 return (nvme_bd_cmd(ns, xfer, NVME_OPC_NVM_FLUSH)); 2833 } 2834 2835 static int 2836 nvme_bd_devid(void *arg, dev_info_t *devinfo, ddi_devid_t *devid) 2837 { 2838 nvme_namespace_t *ns = arg; 2839 2840 return (ddi_devid_init(devinfo, DEVID_ENCAP, strlen(ns->ns_devid), 2841 ns->ns_devid, devid)); 2842 } 2843