1 /* 2 * This file and its contents are supplied under the terms of the 3 * Common Development and Distribution License ("CDDL"), version 1.0. 4 * You may only use this file in accordance with the terms of version 5 * 1.0 of the CDDL. 6 * 7 * A full copy of the text of the CDDL should have accompanied this 8 * source. A copy of the CDDL is also available via the Internet at 9 * http://www.illumos.org/license/CDDL. 10 */ 11 12 /* 13 * Copyright (c) 2016 The MathWorks, Inc. All rights reserved. 14 * Copyright 2019 Unix Software Ltd. 15 * Copyright 2020 Joyent, Inc. 16 * Copyright 2020 Racktop Systems. 17 * Copyright 2023 Oxide Computer Company. 18 * Copyright 2022 OmniOS Community Edition (OmniOSce) Association. 19 * Copyright 2022 Tintri by DDN, Inc. All rights reserved. 20 */ 21 22 /* 23 * blkdev driver for NVMe compliant storage devices 24 * 25 * This driver targets and is designed to support all NVMe 1.x devices. 26 * Features are added to the driver as we encounter devices that require them 27 * and our needs, so some commands or log pages may not take advantage of newer 28 * features that devices support at this time. When you encounter such a case, 29 * it is generally fine to add that support to the driver as long as you take 30 * care to ensure that the requisite device version is met before using it. 31 * 32 * The driver has only been tested on x86 systems and will not work on big- 33 * endian systems without changes to the code accessing registers and data 34 * structures used by the hardware. 35 * 36 * 37 * Interrupt Usage: 38 * 39 * The driver will use a single interrupt while configuring the device as the 40 * specification requires, but contrary to the specification it will try to use 41 * a single-message MSI(-X) or FIXED interrupt. Later in the attach process it 42 * will switch to multiple-message MSI(-X) if supported. The driver wants to 43 * have one interrupt vector per CPU, but it will work correctly if less are 44 * available. Interrupts can be shared by queues, the interrupt handler will 45 * iterate through the I/O queue array by steps of n_intr_cnt. Usually only 46 * the admin queue will share an interrupt with one I/O queue. The interrupt 47 * handler will retrieve completed commands from all queues sharing an interrupt 48 * vector and will post them to a taskq for completion processing. 49 * 50 * 51 * Command Processing: 52 * 53 * NVMe devices can have up to 65535 I/O queue pairs, with each queue holding up 54 * to 65536 I/O commands. The driver will configure one I/O queue pair per 55 * available interrupt vector, with the queue length usually much smaller than 56 * the maximum of 65536. If the hardware doesn't provide enough queues, fewer 57 * interrupt vectors will be used. 58 * 59 * Additionally the hardware provides a single special admin queue pair that can 60 * hold up to 4096 admin commands. 61 * 62 * From the hardware perspective both queues of a queue pair are independent, 63 * but they share some driver state: the command array (holding pointers to 64 * commands currently being processed by the hardware) and the active command 65 * counter. Access to a submission queue and the shared state is protected by 66 * nq_mutex; completion queue is protected by ncq_mutex. 67 * 68 * When a command is submitted to a queue pair the active command counter is 69 * incremented and a pointer to the command is stored in the command array. The 70 * array index is used as command identifier (CID) in the submission queue 71 * entry. Some commands may take a very long time to complete, and if the queue 72 * wraps around in that time a submission may find the next array slot to still 73 * be used by a long-running command. In this case the array is sequentially 74 * searched for the next free slot. The length of the command array is the same 75 * as the configured queue length. Queue overrun is prevented by the semaphore, 76 * so a command submission may block if the queue is full. 77 * 78 * 79 * Polled I/O Support: 80 * 81 * For kernel core dump support the driver can do polled I/O. As interrupts are 82 * turned off while dumping the driver will just submit a command in the regular 83 * way, and then repeatedly attempt a command retrieval until it gets the 84 * command back. 85 * 86 * 87 * Namespace Support: 88 * 89 * NVMe devices can have multiple namespaces, each being a independent data 90 * store. The driver supports multiple namespaces and creates a blkdev interface 91 * for each namespace found. Namespaces can have various attributes to support 92 * protection information. This driver does not support any of this and ignores 93 * namespaces that have these attributes. 94 * 95 * As of NVMe 1.1 namespaces can have an 64bit Extended Unique Identifier 96 * (EUI64), and NVMe 1.2 introduced an additional 128bit Namespace Globally 97 * Unique Identifier (NGUID). This driver uses either the NGUID or the EUI64 98 * if present to generate the devid, and passes the EUI64 to blkdev to use it 99 * in the device node names. 100 * 101 * We currently support only (2 << NVME_MINOR_INST_SHIFT) - 2 namespaces in a 102 * single controller. This is an artificial limit imposed by the driver to be 103 * able to address a reasonable number of controllers and namespaces using a 104 * 32bit minor node number. 105 * 106 * 107 * Minor nodes: 108 * 109 * For each NVMe device the driver exposes one minor node for the controller and 110 * one minor node for each namespace. The only operations supported by those 111 * minor nodes are open(9E), close(9E), and ioctl(9E). This serves as the 112 * interface for the nvmeadm(8) utility. 113 * 114 * Exclusive opens are required for certain ioctl(9E) operations that alter 115 * controller and/or namespace state. While different namespaces may be opened 116 * exclusively in parallel, an exclusive open of the controller minor node 117 * requires that no namespaces are currently open (exclusive or otherwise). 118 * Opening any namespace minor node (exclusive or otherwise) will fail while 119 * the controller minor node is opened exclusively by any other thread. Thus it 120 * is possible for one thread at a time to open the controller minor node 121 * exclusively, and keep it open while opening any namespace minor node of the 122 * same controller, exclusively or otherwise. 123 * 124 * 125 * 126 * Blkdev Interface: 127 * 128 * This driver uses blkdev to do all the heavy lifting involved with presenting 129 * a disk device to the system. As a result, the processing of I/O requests is 130 * relatively simple as blkdev takes care of partitioning, boundary checks, DMA 131 * setup, and splitting of transfers into manageable chunks. 132 * 133 * I/O requests coming in from blkdev are turned into NVM commands and posted to 134 * an I/O queue. The queue is selected by taking the CPU id modulo the number of 135 * queues. There is currently no timeout handling of I/O commands. 136 * 137 * Blkdev also supports querying device/media information and generating a 138 * devid. The driver reports the best block size as determined by the namespace 139 * format back to blkdev as physical block size to support partition and block 140 * alignment. The devid is either based on the namespace GUID or EUI64, if 141 * present, or composed using the device vendor ID, model number, serial number, 142 * and the namespace ID. 143 * 144 * 145 * Error Handling: 146 * 147 * Error handling is currently limited to detecting fatal hardware errors, 148 * either by asynchronous events, or synchronously through command status or 149 * admin command timeouts. In case of severe errors the device is fenced off, 150 * all further requests will return EIO. FMA is then called to fault the device. 151 * 152 * The hardware has a limit for outstanding asynchronous event requests. Before 153 * this limit is known the driver assumes it is at least 1 and posts a single 154 * asynchronous request. Later when the limit is known more asynchronous event 155 * requests are posted to allow quicker reception of error information. When an 156 * asynchronous event is posted by the hardware the driver will parse the error 157 * status fields and log information or fault the device, depending on the 158 * severity of the asynchronous event. The asynchronous event request is then 159 * reused and posted to the admin queue again. 160 * 161 * On command completion the command status is checked for errors. In case of 162 * errors indicating a driver bug the driver panics. Almost all other error 163 * status values just cause EIO to be returned. 164 * 165 * Command timeouts are currently detected for all admin commands except 166 * asynchronous event requests. If a command times out and the hardware appears 167 * to be healthy the driver attempts to abort the command. The original command 168 * timeout is also applied to the abort command. If the abort times out too the 169 * driver assumes the device to be dead, fences it off, and calls FMA to retire 170 * it. In all other cases the aborted command should return immediately with a 171 * status indicating it was aborted, and the driver will wait indefinitely for 172 * that to happen. No timeout handling of normal I/O commands is presently done. 173 * 174 * Any command that times out due to the controller dropping dead will be put on 175 * nvme_lost_cmds list if it references DMA memory. This will prevent the DMA 176 * memory being reused by the system and later be written to by a "dead" NVMe 177 * controller. 178 * 179 * 180 * Locking: 181 * 182 * Each queue pair has a nq_mutex and ncq_mutex. The nq_mutex must be held 183 * when accessing shared state and submission queue registers, ncq_mutex 184 * is held when accessing completion queue state and registers. 185 * Callers of nvme_unqueue_cmd() must make sure that nq_mutex is held, while 186 * nvme_submit_{admin,io}_cmd() and nvme_retrieve_cmd() take care of both 187 * mutexes themselves. 188 * 189 * Each command also has its own nc_mutex, which is associated with the 190 * condition variable nc_cv. It is only used on admin commands which are run 191 * synchronously. In that case it must be held across calls to 192 * nvme_submit_{admin,io}_cmd() and nvme_wait_cmd(), which is taken care of by 193 * nvme_admin_cmd(). It must also be held whenever the completion state of the 194 * command is changed or while a admin command timeout is handled. 195 * 196 * If both nc_mutex and nq_mutex must be held, nc_mutex must be acquired first. 197 * More than one nc_mutex may only be held when aborting commands. In this case, 198 * the nc_mutex of the command to be aborted must be held across the call to 199 * nvme_abort_cmd() to prevent the command from completing while the abort is in 200 * progress. 201 * 202 * If both nq_mutex and ncq_mutex need to be held, ncq_mutex must be 203 * acquired first. More than one nq_mutex is never held by a single thread. 204 * The ncq_mutex is only held by nvme_retrieve_cmd() and 205 * nvme_process_iocq(). nvme_process_iocq() is only called from the 206 * interrupt thread and nvme_retrieve_cmd() during polled I/O, so the 207 * mutex is non-contentious but is required for implementation completeness 208 * and safety. 209 * 210 * There is one mutex n_minor_mutex which protects all open flags nm_open and 211 * exclusive-open thread pointers nm_oexcl of each minor node associated with a 212 * controller and its namespaces. 213 * 214 * In addition, there is one mutex n_mgmt_mutex which must be held whenever the 215 * driver state for any namespace is changed, especially across calls to 216 * nvme_init_ns(), nvme_attach_ns() and nvme_detach_ns(). Except when detaching 217 * nvme, it should also be held across calls that modify the blkdev handle of a 218 * namespace. Command and queue mutexes may be acquired and released while 219 * n_mgmt_mutex is held, n_minor_mutex should not. 220 * 221 * 222 * Quiesce / Fast Reboot: 223 * 224 * The driver currently does not support fast reboot. A quiesce(9E) entry point 225 * is still provided which is used to send a shutdown notification to the 226 * device. 227 * 228 * 229 * NVMe Hotplug: 230 * 231 * The driver supports hot removal. The driver uses the NDI event framework 232 * to register a callback, nvme_remove_callback, to clean up when a disk is 233 * removed. In particular, the driver will unqueue outstanding I/O commands and 234 * set n_dead on the softstate to true so that other operations, such as ioctls 235 * and command submissions, fail as well. 236 * 237 * While the callback registration relies on the NDI event framework, the 238 * removal event itself is kicked off in the PCIe hotplug framework, when the 239 * PCIe bridge driver ("pcieb") gets a hotplug interrupt indicating that a 240 * device was removed from the slot. 241 * 242 * The NVMe driver instance itself will remain until the final close of the 243 * device. 244 * 245 * 246 * DDI UFM Support 247 * 248 * The driver supports the DDI UFM framework for reporting information about 249 * the device's firmware image and slot configuration. This data can be 250 * queried by userland software via ioctls to the ufm driver. For more 251 * information, see ddi_ufm(9E). 252 * 253 * 254 * Driver Configuration: 255 * 256 * The following driver properties can be changed to control some aspects of the 257 * drivers operation: 258 * - strict-version: can be set to 0 to allow devices conforming to newer 259 * major versions to be used 260 * - ignore-unknown-vendor-status: can be set to 1 to not handle any vendor 261 * specific command status as a fatal error leading device faulting 262 * - admin-queue-len: the maximum length of the admin queue (16-4096) 263 * - io-squeue-len: the maximum length of the I/O submission queues (16-65536) 264 * - io-cqueue-len: the maximum length of the I/O completion queues (16-65536) 265 * - async-event-limit: the maximum number of asynchronous event requests to be 266 * posted by the driver 267 * - volatile-write-cache-enable: can be set to 0 to disable the volatile write 268 * cache 269 * - min-phys-block-size: the minimum physical block size to report to blkdev, 270 * which is among other things the basis for ZFS vdev ashift 271 * - max-submission-queues: the maximum number of I/O submission queues. 272 * - max-completion-queues: the maximum number of I/O completion queues, 273 * can be less than max-submission-queues, in which case the completion 274 * queues are shared. 275 * 276 * In addition to the above properties, some device-specific tunables can be 277 * configured using the nvme-config-list global property. The value of this 278 * property is a list of triplets. The formal syntax is: 279 * 280 * nvme-config-list ::= <triplet> [, <triplet>]* ; 281 * <triplet> ::= "<model>" , "<rev-list>" , "<tuple-list>" 282 * <rev-list> ::= [ <fwrev> [, <fwrev>]*] 283 * <tuple-list> ::= <tunable> [, <tunable>]* 284 * <tunable> ::= <name> : <value> 285 * 286 * The <model> and <fwrev> are the strings in nvme_identify_ctrl_t`id_model and 287 * nvme_identify_ctrl_t`id_fwrev, respectively. The remainder of <tuple-list> 288 * contains one or more tunables to apply to all controllers that match the 289 * specified model number and optionally firmware revision. Each <tunable> is a 290 * <name> : <value> pair. Supported tunables are: 291 * 292 * - ignore-unknown-vendor-status: can be set to "on" to not handle any vendor 293 * specific command status as a fatal error leading device faulting 294 * 295 * - min-phys-block-size: the minimum physical block size to report to blkdev, 296 * which is among other things the basis for ZFS vdev ashift 297 * 298 * - volatile-write-cache: can be set to "on" or "off" to enable or disable the 299 * volatile write cache, if present 300 * 301 * 302 * TODO: 303 * - figure out sane default for I/O queue depth reported to blkdev 304 * - FMA handling of media errors 305 * - support for devices supporting very large I/O requests using chained PRPs 306 * - support for configuring hardware parameters like interrupt coalescing 307 * - support for media formatting and hard partitioning into namespaces 308 * - support for big-endian systems 309 * - support for fast reboot 310 * - support for NVMe Subsystem Reset (1.1) 311 * - support for Scatter/Gather lists (1.1) 312 * - support for Reservations (1.1) 313 * - support for power management 314 */ 315 316 #include <sys/byteorder.h> 317 #ifdef _BIG_ENDIAN 318 #error nvme driver needs porting for big-endian platforms 319 #endif 320 321 #include <sys/modctl.h> 322 #include <sys/conf.h> 323 #include <sys/devops.h> 324 #include <sys/ddi.h> 325 #include <sys/ddi_ufm.h> 326 #include <sys/sunddi.h> 327 #include <sys/sunndi.h> 328 #include <sys/bitmap.h> 329 #include <sys/sysmacros.h> 330 #include <sys/param.h> 331 #include <sys/varargs.h> 332 #include <sys/cpuvar.h> 333 #include <sys/disp.h> 334 #include <sys/blkdev.h> 335 #include <sys/atomic.h> 336 #include <sys/archsystm.h> 337 #include <sys/sata/sata_hba.h> 338 #include <sys/stat.h> 339 #include <sys/policy.h> 340 #include <sys/list.h> 341 #include <sys/dkio.h> 342 343 #include <sys/nvme.h> 344 345 #ifdef __x86 346 #include <sys/x86_archext.h> 347 #endif 348 349 #include "nvme_reg.h" 350 #include "nvme_var.h" 351 352 /* 353 * Assertions to make sure that we've properly captured various aspects of the 354 * packed structures and haven't broken them during updates. 355 */ 356 CTASSERT(sizeof (nvme_identify_ctrl_t) == NVME_IDENTIFY_BUFSIZE); 357 CTASSERT(offsetof(nvme_identify_ctrl_t, id_oacs) == 256); 358 CTASSERT(offsetof(nvme_identify_ctrl_t, id_sqes) == 512); 359 CTASSERT(offsetof(nvme_identify_ctrl_t, id_oncs) == 520); 360 CTASSERT(offsetof(nvme_identify_ctrl_t, id_subnqn) == 768); 361 CTASSERT(offsetof(nvme_identify_ctrl_t, id_nvmof) == 1792); 362 CTASSERT(offsetof(nvme_identify_ctrl_t, id_psd) == 2048); 363 CTASSERT(offsetof(nvme_identify_ctrl_t, id_vs) == 3072); 364 365 CTASSERT(sizeof (nvme_identify_nsid_t) == NVME_IDENTIFY_BUFSIZE); 366 CTASSERT(offsetof(nvme_identify_nsid_t, id_fpi) == 32); 367 CTASSERT(offsetof(nvme_identify_nsid_t, id_anagrpid) == 92); 368 CTASSERT(offsetof(nvme_identify_nsid_t, id_nguid) == 104); 369 CTASSERT(offsetof(nvme_identify_nsid_t, id_lbaf) == 128); 370 CTASSERT(offsetof(nvme_identify_nsid_t, id_vs) == 384); 371 372 CTASSERT(sizeof (nvme_identify_nsid_list_t) == NVME_IDENTIFY_BUFSIZE); 373 CTASSERT(sizeof (nvme_identify_ctrl_list_t) == NVME_IDENTIFY_BUFSIZE); 374 375 CTASSERT(sizeof (nvme_identify_primary_caps_t) == NVME_IDENTIFY_BUFSIZE); 376 CTASSERT(offsetof(nvme_identify_primary_caps_t, nipc_vqfrt) == 32); 377 CTASSERT(offsetof(nvme_identify_primary_caps_t, nipc_vifrt) == 64); 378 379 CTASSERT(sizeof (nvme_nschange_list_t) == 4096); 380 381 382 /* NVMe spec version supported */ 383 static const int nvme_version_major = 2; 384 385 /* tunable for admin command timeout in seconds, default is 1s */ 386 int nvme_admin_cmd_timeout = 1; 387 388 /* tunable for FORMAT NVM command timeout in seconds, default is 600s */ 389 int nvme_format_cmd_timeout = 600; 390 391 /* tunable for firmware commit with NVME_FWC_SAVE, default is 15s */ 392 int nvme_commit_save_cmd_timeout = 15; 393 394 /* 395 * tunable for the size of arbitrary vendor specific admin commands, 396 * default is 16MiB. 397 */ 398 uint32_t nvme_vendor_specific_admin_cmd_size = 1 << 24; 399 400 /* 401 * tunable for the max timeout of arbitary vendor specific admin commands, 402 * default is 60s. 403 */ 404 uint_t nvme_vendor_specific_admin_cmd_max_timeout = 60; 405 406 static int nvme_attach(dev_info_t *, ddi_attach_cmd_t); 407 static int nvme_detach(dev_info_t *, ddi_detach_cmd_t); 408 static int nvme_quiesce(dev_info_t *); 409 static int nvme_fm_errcb(dev_info_t *, ddi_fm_error_t *, const void *); 410 static int nvme_setup_interrupts(nvme_t *, int, int); 411 static void nvme_release_interrupts(nvme_t *); 412 static uint_t nvme_intr(caddr_t, caddr_t); 413 414 static void nvme_shutdown(nvme_t *, boolean_t); 415 static boolean_t nvme_reset(nvme_t *, boolean_t); 416 static int nvme_init(nvme_t *); 417 static nvme_cmd_t *nvme_alloc_cmd(nvme_t *, int); 418 static void nvme_free_cmd(nvme_cmd_t *); 419 static nvme_cmd_t *nvme_create_nvm_cmd(nvme_namespace_t *, uint8_t, 420 bd_xfer_t *); 421 static void nvme_admin_cmd(nvme_cmd_t *, int); 422 static void nvme_submit_admin_cmd(nvme_qpair_t *, nvme_cmd_t *); 423 static int nvme_submit_io_cmd(nvme_qpair_t *, nvme_cmd_t *); 424 static void nvme_submit_cmd_common(nvme_qpair_t *, nvme_cmd_t *); 425 static nvme_cmd_t *nvme_unqueue_cmd(nvme_t *, nvme_qpair_t *, int); 426 static nvme_cmd_t *nvme_retrieve_cmd(nvme_t *, nvme_qpair_t *); 427 static void nvme_wait_cmd(nvme_cmd_t *, uint_t); 428 static void nvme_wakeup_cmd(void *); 429 static void nvme_async_event_task(void *); 430 431 static int nvme_check_unknown_cmd_status(nvme_cmd_t *); 432 static int nvme_check_vendor_cmd_status(nvme_cmd_t *); 433 static int nvme_check_integrity_cmd_status(nvme_cmd_t *); 434 static int nvme_check_specific_cmd_status(nvme_cmd_t *); 435 static int nvme_check_generic_cmd_status(nvme_cmd_t *); 436 static inline int nvme_check_cmd_status(nvme_cmd_t *); 437 438 static int nvme_abort_cmd(nvme_cmd_t *, uint_t); 439 static void nvme_async_event(nvme_t *); 440 static int nvme_format_nvm(nvme_t *, boolean_t, uint32_t, uint8_t, boolean_t, 441 uint8_t, boolean_t, uint8_t); 442 static int nvme_get_logpage(nvme_t *, boolean_t, void **, size_t *, uint8_t, 443 ...); 444 static int nvme_identify(nvme_t *, boolean_t, uint32_t, uint8_t, void **); 445 static int nvme_set_features(nvme_t *, boolean_t, uint32_t, uint8_t, uint32_t, 446 uint32_t *); 447 static int nvme_get_features(nvme_t *, boolean_t, uint32_t, uint8_t, uint32_t *, 448 void **, size_t *); 449 static int nvme_write_cache_set(nvme_t *, boolean_t); 450 static int nvme_set_nqueues(nvme_t *); 451 452 static void nvme_free_dma(nvme_dma_t *); 453 static int nvme_zalloc_dma(nvme_t *, size_t, uint_t, ddi_dma_attr_t *, 454 nvme_dma_t **); 455 static int nvme_zalloc_queue_dma(nvme_t *, uint32_t, uint16_t, uint_t, 456 nvme_dma_t **); 457 static void nvme_free_qpair(nvme_qpair_t *); 458 static int nvme_alloc_qpair(nvme_t *, uint32_t, nvme_qpair_t **, uint_t); 459 static int nvme_create_io_qpair(nvme_t *, nvme_qpair_t *, uint16_t); 460 461 static inline void nvme_put64(nvme_t *, uintptr_t, uint64_t); 462 static inline void nvme_put32(nvme_t *, uintptr_t, uint32_t); 463 static inline uint64_t nvme_get64(nvme_t *, uintptr_t); 464 static inline uint32_t nvme_get32(nvme_t *, uintptr_t); 465 466 static boolean_t nvme_check_regs_hdl(nvme_t *); 467 static boolean_t nvme_check_dma_hdl(nvme_dma_t *); 468 469 static int nvme_fill_prp(nvme_cmd_t *, ddi_dma_handle_t); 470 471 static void nvme_bd_xfer_done(void *); 472 static void nvme_bd_driveinfo(void *, bd_drive_t *); 473 static int nvme_bd_mediainfo(void *, bd_media_t *); 474 static int nvme_bd_cmd(nvme_namespace_t *, bd_xfer_t *, uint8_t); 475 static int nvme_bd_read(void *, bd_xfer_t *); 476 static int nvme_bd_write(void *, bd_xfer_t *); 477 static int nvme_bd_sync(void *, bd_xfer_t *); 478 static int nvme_bd_devid(void *, dev_info_t *, ddi_devid_t *); 479 static int nvme_bd_free_space(void *, bd_xfer_t *); 480 481 static int nvme_prp_dma_constructor(void *, void *, int); 482 static void nvme_prp_dma_destructor(void *, void *); 483 484 static void nvme_prepare_devid(nvme_t *, uint32_t); 485 486 /* DDI UFM callbacks */ 487 static int nvme_ufm_fill_image(ddi_ufm_handle_t *, void *, uint_t, 488 ddi_ufm_image_t *); 489 static int nvme_ufm_fill_slot(ddi_ufm_handle_t *, void *, uint_t, uint_t, 490 ddi_ufm_slot_t *); 491 static int nvme_ufm_getcaps(ddi_ufm_handle_t *, void *, ddi_ufm_cap_t *); 492 493 static int nvme_open(dev_t *, int, int, cred_t *); 494 static int nvme_close(dev_t, int, int, cred_t *); 495 static int nvme_ioctl(dev_t, int, intptr_t, int, cred_t *, int *); 496 497 static int nvme_init_ns(nvme_t *, int); 498 static int nvme_attach_ns(nvme_t *, int); 499 static int nvme_detach_ns(nvme_t *, int); 500 501 #define NVME_NSID2NS(nvme, nsid) (&((nvme)->n_ns[(nsid) - 1])) 502 503 static ddi_ufm_ops_t nvme_ufm_ops = { 504 NULL, 505 nvme_ufm_fill_image, 506 nvme_ufm_fill_slot, 507 nvme_ufm_getcaps 508 }; 509 510 #define NVME_MINOR_INST_SHIFT 9 511 #define NVME_MINOR(inst, nsid) (((inst) << NVME_MINOR_INST_SHIFT) | (nsid)) 512 #define NVME_MINOR_INST(minor) ((minor) >> NVME_MINOR_INST_SHIFT) 513 #define NVME_MINOR_NSID(minor) ((minor) & ((1 << NVME_MINOR_INST_SHIFT) - 1)) 514 #define NVME_MINOR_MAX (NVME_MINOR(1, 0) - 2) 515 #define NVME_IS_VENDOR_SPECIFIC_CMD(x) (((x) >= 0xC0) && ((x) <= 0xFF)) 516 #define NVME_VENDOR_SPECIFIC_LOGPAGE_MIN 0xC0 517 #define NVME_VENDOR_SPECIFIC_LOGPAGE_MAX 0xFF 518 #define NVME_IS_VENDOR_SPECIFIC_LOGPAGE(x) \ 519 (((x) >= NVME_VENDOR_SPECIFIC_LOGPAGE_MIN) && \ 520 ((x) <= NVME_VENDOR_SPECIFIC_LOGPAGE_MAX)) 521 522 /* 523 * NVMe versions 1.3 and later actually support log pages up to UINT32_MAX 524 * DWords in size. However, revision 1.3 also modified the layout of the Get Log 525 * Page command significantly relative to version 1.2, including changing 526 * reserved bits, adding new bitfields, and requiring the use of command DWord 527 * 11 to fully specify the size of the log page (the lower and upper 16 bits of 528 * the number of DWords in the page are split between DWord 10 and DWord 11, 529 * respectively). 530 * 531 * All of these impose significantly different layout requirements on the 532 * `nvme_getlogpage_t` type. This could be solved with two different types, or a 533 * complicated/nested union with the two versions as the overlying members. Both 534 * of these are reasonable, if a bit convoluted. However, these is no current 535 * need for such large pages, or a way to test them, as most log pages actually 536 * fit within the current size limit. So for simplicity, we retain the size cap 537 * from version 1.2. 538 * 539 * Note that the number of DWords is zero-based, so we add 1. It is subtracted 540 * to form a zero-based value in `nvme_get_logpage`. 541 */ 542 #define NVME_VENDOR_SPECIFIC_LOGPAGE_MAX_SIZE \ 543 (((1 << 12) + 1) * sizeof (uint32_t)) 544 545 static void *nvme_state; 546 static kmem_cache_t *nvme_cmd_cache; 547 548 /* 549 * DMA attributes for queue DMA memory 550 * 551 * Queue DMA memory must be page aligned. The maximum length of a queue is 552 * 65536 entries, and an entry can be 64 bytes long. 553 */ 554 static const ddi_dma_attr_t nvme_queue_dma_attr = { 555 .dma_attr_version = DMA_ATTR_V0, 556 .dma_attr_addr_lo = 0, 557 .dma_attr_addr_hi = 0xffffffffffffffffULL, 558 .dma_attr_count_max = (UINT16_MAX + 1) * sizeof (nvme_sqe_t) - 1, 559 .dma_attr_align = 0x1000, 560 .dma_attr_burstsizes = 0x7ff, 561 .dma_attr_minxfer = 0x1000, 562 .dma_attr_maxxfer = (UINT16_MAX + 1) * sizeof (nvme_sqe_t), 563 .dma_attr_seg = 0xffffffffffffffffULL, 564 .dma_attr_sgllen = 1, 565 .dma_attr_granular = 1, 566 .dma_attr_flags = 0, 567 }; 568 569 /* 570 * DMA attributes for transfers using Physical Region Page (PRP) entries 571 * 572 * A PRP entry describes one page of DMA memory using the page size specified 573 * in the controller configuration's memory page size register (CC.MPS). It uses 574 * a 64bit base address aligned to this page size. There is no limitation on 575 * chaining PRPs together for arbitrarily large DMA transfers. These DMA 576 * attributes will be copied into the nvme_t during nvme_attach() and the 577 * dma_attr_maxxfer will be updated. 578 */ 579 static const ddi_dma_attr_t nvme_prp_dma_attr = { 580 .dma_attr_version = DMA_ATTR_V0, 581 .dma_attr_addr_lo = 0, 582 .dma_attr_addr_hi = 0xffffffffffffffffULL, 583 .dma_attr_count_max = 0xfff, 584 .dma_attr_align = 0x1000, 585 .dma_attr_burstsizes = 0x7ff, 586 .dma_attr_minxfer = 0x1000, 587 .dma_attr_maxxfer = 0x1000, 588 .dma_attr_seg = 0xfff, 589 .dma_attr_sgllen = -1, 590 .dma_attr_granular = 1, 591 .dma_attr_flags = 0, 592 }; 593 594 /* 595 * DMA attributes for transfers using scatter/gather lists 596 * 597 * A SGL entry describes a chunk of DMA memory using a 64bit base address and a 598 * 32bit length field. SGL Segment and SGL Last Segment entries require the 599 * length to be a multiple of 16 bytes. While the SGL DMA attributes are copied 600 * into the nvme_t, they are not currently used for any I/O. 601 */ 602 static const ddi_dma_attr_t nvme_sgl_dma_attr = { 603 .dma_attr_version = DMA_ATTR_V0, 604 .dma_attr_addr_lo = 0, 605 .dma_attr_addr_hi = 0xffffffffffffffffULL, 606 .dma_attr_count_max = 0xffffffffUL, 607 .dma_attr_align = 1, 608 .dma_attr_burstsizes = 0x7ff, 609 .dma_attr_minxfer = 0x10, 610 .dma_attr_maxxfer = 0xfffffffffULL, 611 .dma_attr_seg = 0xffffffffffffffffULL, 612 .dma_attr_sgllen = -1, 613 .dma_attr_granular = 0x10, 614 .dma_attr_flags = 0 615 }; 616 617 static ddi_device_acc_attr_t nvme_reg_acc_attr = { 618 .devacc_attr_version = DDI_DEVICE_ATTR_V0, 619 .devacc_attr_endian_flags = DDI_STRUCTURE_LE_ACC, 620 .devacc_attr_dataorder = DDI_STRICTORDER_ACC 621 }; 622 623 static struct cb_ops nvme_cb_ops = { 624 .cb_open = nvme_open, 625 .cb_close = nvme_close, 626 .cb_strategy = nodev, 627 .cb_print = nodev, 628 .cb_dump = nodev, 629 .cb_read = nodev, 630 .cb_write = nodev, 631 .cb_ioctl = nvme_ioctl, 632 .cb_devmap = nodev, 633 .cb_mmap = nodev, 634 .cb_segmap = nodev, 635 .cb_chpoll = nochpoll, 636 .cb_prop_op = ddi_prop_op, 637 .cb_str = 0, 638 .cb_flag = D_NEW | D_MP, 639 .cb_rev = CB_REV, 640 .cb_aread = nodev, 641 .cb_awrite = nodev 642 }; 643 644 static struct dev_ops nvme_dev_ops = { 645 .devo_rev = DEVO_REV, 646 .devo_refcnt = 0, 647 .devo_getinfo = ddi_no_info, 648 .devo_identify = nulldev, 649 .devo_probe = nulldev, 650 .devo_attach = nvme_attach, 651 .devo_detach = nvme_detach, 652 .devo_reset = nodev, 653 .devo_cb_ops = &nvme_cb_ops, 654 .devo_bus_ops = NULL, 655 .devo_power = NULL, 656 .devo_quiesce = nvme_quiesce, 657 }; 658 659 static struct modldrv nvme_modldrv = { 660 .drv_modops = &mod_driverops, 661 .drv_linkinfo = "NVMe v1.1b", 662 .drv_dev_ops = &nvme_dev_ops 663 }; 664 665 static struct modlinkage nvme_modlinkage = { 666 .ml_rev = MODREV_1, 667 .ml_linkage = { &nvme_modldrv, NULL } 668 }; 669 670 static bd_ops_t nvme_bd_ops = { 671 .o_version = BD_OPS_CURRENT_VERSION, 672 .o_drive_info = nvme_bd_driveinfo, 673 .o_media_info = nvme_bd_mediainfo, 674 .o_devid_init = nvme_bd_devid, 675 .o_sync_cache = nvme_bd_sync, 676 .o_read = nvme_bd_read, 677 .o_write = nvme_bd_write, 678 .o_free_space = nvme_bd_free_space, 679 }; 680 681 /* 682 * This list will hold commands that have timed out and couldn't be aborted. 683 * As we don't know what the hardware may still do with the DMA memory we can't 684 * free them, so we'll keep them forever on this list where we can easily look 685 * at them with mdb. 686 */ 687 static struct list nvme_lost_cmds; 688 static kmutex_t nvme_lc_mutex; 689 690 int 691 _init(void) 692 { 693 int error; 694 695 error = ddi_soft_state_init(&nvme_state, sizeof (nvme_t), 1); 696 if (error != DDI_SUCCESS) 697 return (error); 698 699 nvme_cmd_cache = kmem_cache_create("nvme_cmd_cache", 700 sizeof (nvme_cmd_t), 64, NULL, NULL, NULL, NULL, NULL, 0); 701 702 mutex_init(&nvme_lc_mutex, NULL, MUTEX_DRIVER, NULL); 703 list_create(&nvme_lost_cmds, sizeof (nvme_cmd_t), 704 offsetof(nvme_cmd_t, nc_list)); 705 706 bd_mod_init(&nvme_dev_ops); 707 708 error = mod_install(&nvme_modlinkage); 709 if (error != DDI_SUCCESS) { 710 ddi_soft_state_fini(&nvme_state); 711 mutex_destroy(&nvme_lc_mutex); 712 list_destroy(&nvme_lost_cmds); 713 bd_mod_fini(&nvme_dev_ops); 714 } 715 716 return (error); 717 } 718 719 int 720 _fini(void) 721 { 722 int error; 723 724 if (!list_is_empty(&nvme_lost_cmds)) 725 return (DDI_FAILURE); 726 727 error = mod_remove(&nvme_modlinkage); 728 if (error == DDI_SUCCESS) { 729 ddi_soft_state_fini(&nvme_state); 730 kmem_cache_destroy(nvme_cmd_cache); 731 mutex_destroy(&nvme_lc_mutex); 732 list_destroy(&nvme_lost_cmds); 733 bd_mod_fini(&nvme_dev_ops); 734 } 735 736 return (error); 737 } 738 739 int 740 _info(struct modinfo *modinfop) 741 { 742 return (mod_info(&nvme_modlinkage, modinfop)); 743 } 744 745 static inline void 746 nvme_put64(nvme_t *nvme, uintptr_t reg, uint64_t val) 747 { 748 ASSERT(((uintptr_t)(nvme->n_regs + reg) & 0x7) == 0); 749 750 /*LINTED: E_BAD_PTR_CAST_ALIGN*/ 751 ddi_put64(nvme->n_regh, (uint64_t *)(nvme->n_regs + reg), val); 752 } 753 754 static inline void 755 nvme_put32(nvme_t *nvme, uintptr_t reg, uint32_t val) 756 { 757 ASSERT(((uintptr_t)(nvme->n_regs + reg) & 0x3) == 0); 758 759 /*LINTED: E_BAD_PTR_CAST_ALIGN*/ 760 ddi_put32(nvme->n_regh, (uint32_t *)(nvme->n_regs + reg), val); 761 } 762 763 static inline uint64_t 764 nvme_get64(nvme_t *nvme, uintptr_t reg) 765 { 766 uint64_t val; 767 768 ASSERT(((uintptr_t)(nvme->n_regs + reg) & 0x7) == 0); 769 770 /*LINTED: E_BAD_PTR_CAST_ALIGN*/ 771 val = ddi_get64(nvme->n_regh, (uint64_t *)(nvme->n_regs + reg)); 772 773 return (val); 774 } 775 776 static inline uint32_t 777 nvme_get32(nvme_t *nvme, uintptr_t reg) 778 { 779 uint32_t val; 780 781 ASSERT(((uintptr_t)(nvme->n_regs + reg) & 0x3) == 0); 782 783 /*LINTED: E_BAD_PTR_CAST_ALIGN*/ 784 val = ddi_get32(nvme->n_regh, (uint32_t *)(nvme->n_regs + reg)); 785 786 return (val); 787 } 788 789 static boolean_t 790 nvme_check_regs_hdl(nvme_t *nvme) 791 { 792 ddi_fm_error_t error; 793 794 ddi_fm_acc_err_get(nvme->n_regh, &error, DDI_FME_VERSION); 795 796 if (error.fme_status != DDI_FM_OK) 797 return (B_TRUE); 798 799 return (B_FALSE); 800 } 801 802 static boolean_t 803 nvme_check_dma_hdl(nvme_dma_t *dma) 804 { 805 ddi_fm_error_t error; 806 807 if (dma == NULL) 808 return (B_FALSE); 809 810 ddi_fm_dma_err_get(dma->nd_dmah, &error, DDI_FME_VERSION); 811 812 if (error.fme_status != DDI_FM_OK) 813 return (B_TRUE); 814 815 return (B_FALSE); 816 } 817 818 static void 819 nvme_free_dma_common(nvme_dma_t *dma) 820 { 821 if (dma->nd_dmah != NULL) 822 (void) ddi_dma_unbind_handle(dma->nd_dmah); 823 if (dma->nd_acch != NULL) 824 ddi_dma_mem_free(&dma->nd_acch); 825 if (dma->nd_dmah != NULL) 826 ddi_dma_free_handle(&dma->nd_dmah); 827 } 828 829 static void 830 nvme_free_dma(nvme_dma_t *dma) 831 { 832 nvme_free_dma_common(dma); 833 kmem_free(dma, sizeof (*dma)); 834 } 835 836 /* ARGSUSED */ 837 static void 838 nvme_prp_dma_destructor(void *buf, void *private) 839 { 840 nvme_dma_t *dma = (nvme_dma_t *)buf; 841 842 nvme_free_dma_common(dma); 843 } 844 845 static int 846 nvme_alloc_dma_common(nvme_t *nvme, nvme_dma_t *dma, 847 size_t len, uint_t flags, ddi_dma_attr_t *dma_attr) 848 { 849 if (ddi_dma_alloc_handle(nvme->n_dip, dma_attr, DDI_DMA_SLEEP, NULL, 850 &dma->nd_dmah) != DDI_SUCCESS) { 851 /* 852 * Due to DDI_DMA_SLEEP this can't be DDI_DMA_NORESOURCES, and 853 * the only other possible error is DDI_DMA_BADATTR which 854 * indicates a driver bug which should cause a panic. 855 */ 856 dev_err(nvme->n_dip, CE_PANIC, 857 "!failed to get DMA handle, check DMA attributes"); 858 return (DDI_FAILURE); 859 } 860 861 /* 862 * ddi_dma_mem_alloc() can only fail when DDI_DMA_NOSLEEP is specified 863 * or the flags are conflicting, which isn't the case here. 864 */ 865 (void) ddi_dma_mem_alloc(dma->nd_dmah, len, &nvme->n_reg_acc_attr, 866 DDI_DMA_CONSISTENT, DDI_DMA_SLEEP, NULL, &dma->nd_memp, 867 &dma->nd_len, &dma->nd_acch); 868 869 if (ddi_dma_addr_bind_handle(dma->nd_dmah, NULL, dma->nd_memp, 870 dma->nd_len, flags | DDI_DMA_CONSISTENT, DDI_DMA_SLEEP, NULL, 871 &dma->nd_cookie, &dma->nd_ncookie) != DDI_DMA_MAPPED) { 872 dev_err(nvme->n_dip, CE_WARN, 873 "!failed to bind DMA memory"); 874 atomic_inc_32(&nvme->n_dma_bind_err); 875 nvme_free_dma_common(dma); 876 return (DDI_FAILURE); 877 } 878 879 return (DDI_SUCCESS); 880 } 881 882 static int 883 nvme_zalloc_dma(nvme_t *nvme, size_t len, uint_t flags, 884 ddi_dma_attr_t *dma_attr, nvme_dma_t **ret) 885 { 886 nvme_dma_t *dma = kmem_zalloc(sizeof (nvme_dma_t), KM_SLEEP); 887 888 if (nvme_alloc_dma_common(nvme, dma, len, flags, dma_attr) != 889 DDI_SUCCESS) { 890 *ret = NULL; 891 kmem_free(dma, sizeof (nvme_dma_t)); 892 return (DDI_FAILURE); 893 } 894 895 bzero(dma->nd_memp, dma->nd_len); 896 897 *ret = dma; 898 return (DDI_SUCCESS); 899 } 900 901 /* ARGSUSED */ 902 static int 903 nvme_prp_dma_constructor(void *buf, void *private, int flags) 904 { 905 nvme_dma_t *dma = (nvme_dma_t *)buf; 906 nvme_t *nvme = (nvme_t *)private; 907 908 dma->nd_dmah = NULL; 909 dma->nd_acch = NULL; 910 911 if (nvme_alloc_dma_common(nvme, dma, nvme->n_pagesize, 912 DDI_DMA_READ, &nvme->n_prp_dma_attr) != DDI_SUCCESS) { 913 return (-1); 914 } 915 916 ASSERT(dma->nd_ncookie == 1); 917 918 dma->nd_cached = B_TRUE; 919 920 return (0); 921 } 922 923 static int 924 nvme_zalloc_queue_dma(nvme_t *nvme, uint32_t nentry, uint16_t qe_len, 925 uint_t flags, nvme_dma_t **dma) 926 { 927 uint32_t len = nentry * qe_len; 928 ddi_dma_attr_t q_dma_attr = nvme->n_queue_dma_attr; 929 930 len = roundup(len, nvme->n_pagesize); 931 932 if (nvme_zalloc_dma(nvme, len, flags, &q_dma_attr, dma) 933 != DDI_SUCCESS) { 934 dev_err(nvme->n_dip, CE_WARN, 935 "!failed to get DMA memory for queue"); 936 goto fail; 937 } 938 939 if ((*dma)->nd_ncookie != 1) { 940 dev_err(nvme->n_dip, CE_WARN, 941 "!got too many cookies for queue DMA"); 942 goto fail; 943 } 944 945 return (DDI_SUCCESS); 946 947 fail: 948 if (*dma) { 949 nvme_free_dma(*dma); 950 *dma = NULL; 951 } 952 953 return (DDI_FAILURE); 954 } 955 956 static void 957 nvme_free_cq(nvme_cq_t *cq) 958 { 959 mutex_destroy(&cq->ncq_mutex); 960 961 if (cq->ncq_cmd_taskq != NULL) 962 taskq_destroy(cq->ncq_cmd_taskq); 963 964 if (cq->ncq_dma != NULL) 965 nvme_free_dma(cq->ncq_dma); 966 967 kmem_free(cq, sizeof (*cq)); 968 } 969 970 static void 971 nvme_free_qpair(nvme_qpair_t *qp) 972 { 973 int i; 974 975 mutex_destroy(&qp->nq_mutex); 976 sema_destroy(&qp->nq_sema); 977 978 if (qp->nq_sqdma != NULL) 979 nvme_free_dma(qp->nq_sqdma); 980 981 if (qp->nq_active_cmds > 0) 982 for (i = 0; i != qp->nq_nentry; i++) 983 if (qp->nq_cmd[i] != NULL) 984 nvme_free_cmd(qp->nq_cmd[i]); 985 986 if (qp->nq_cmd != NULL) 987 kmem_free(qp->nq_cmd, sizeof (nvme_cmd_t *) * qp->nq_nentry); 988 989 kmem_free(qp, sizeof (nvme_qpair_t)); 990 } 991 992 /* 993 * Destroy the pre-allocated cq array, but only free individual completion 994 * queues from the given starting index. 995 */ 996 static void 997 nvme_destroy_cq_array(nvme_t *nvme, uint_t start) 998 { 999 uint_t i; 1000 1001 for (i = start; i < nvme->n_cq_count; i++) 1002 if (nvme->n_cq[i] != NULL) 1003 nvme_free_cq(nvme->n_cq[i]); 1004 1005 kmem_free(nvme->n_cq, sizeof (*nvme->n_cq) * nvme->n_cq_count); 1006 } 1007 1008 static int 1009 nvme_alloc_cq(nvme_t *nvme, uint32_t nentry, nvme_cq_t **cqp, uint16_t idx, 1010 uint_t nthr) 1011 { 1012 nvme_cq_t *cq = kmem_zalloc(sizeof (*cq), KM_SLEEP); 1013 char name[64]; /* large enough for the taskq name */ 1014 1015 mutex_init(&cq->ncq_mutex, NULL, MUTEX_DRIVER, 1016 DDI_INTR_PRI(nvme->n_intr_pri)); 1017 1018 if (nvme_zalloc_queue_dma(nvme, nentry, sizeof (nvme_cqe_t), 1019 DDI_DMA_READ, &cq->ncq_dma) != DDI_SUCCESS) 1020 goto fail; 1021 1022 cq->ncq_cq = (nvme_cqe_t *)cq->ncq_dma->nd_memp; 1023 cq->ncq_nentry = nentry; 1024 cq->ncq_id = idx; 1025 cq->ncq_hdbl = NVME_REG_CQHDBL(nvme, idx); 1026 1027 /* 1028 * Each completion queue has its own command taskq. 1029 */ 1030 (void) snprintf(name, sizeof (name), "%s%d_cmd_taskq%u", 1031 ddi_driver_name(nvme->n_dip), ddi_get_instance(nvme->n_dip), idx); 1032 1033 cq->ncq_cmd_taskq = taskq_create(name, nthr, minclsyspri, 64, INT_MAX, 1034 TASKQ_PREPOPULATE); 1035 1036 if (cq->ncq_cmd_taskq == NULL) { 1037 dev_err(nvme->n_dip, CE_WARN, "!failed to create cmd " 1038 "taskq for cq %u", idx); 1039 goto fail; 1040 } 1041 1042 *cqp = cq; 1043 return (DDI_SUCCESS); 1044 1045 fail: 1046 nvme_free_cq(cq); 1047 *cqp = NULL; 1048 1049 return (DDI_FAILURE); 1050 } 1051 1052 /* 1053 * Create the n_cq array big enough to hold "ncq" completion queues. 1054 * If the array already exists it will be re-sized (but only larger). 1055 * The admin queue is included in this array, which boosts the 1056 * max number of entries to UINT16_MAX + 1. 1057 */ 1058 static int 1059 nvme_create_cq_array(nvme_t *nvme, uint_t ncq, uint32_t nentry, uint_t nthr) 1060 { 1061 nvme_cq_t **cq; 1062 uint_t i, cq_count; 1063 1064 ASSERT3U(ncq, >, nvme->n_cq_count); 1065 1066 cq = nvme->n_cq; 1067 cq_count = nvme->n_cq_count; 1068 1069 nvme->n_cq = kmem_zalloc(sizeof (*nvme->n_cq) * ncq, KM_SLEEP); 1070 nvme->n_cq_count = ncq; 1071 1072 for (i = 0; i < cq_count; i++) 1073 nvme->n_cq[i] = cq[i]; 1074 1075 for (; i < nvme->n_cq_count; i++) 1076 if (nvme_alloc_cq(nvme, nentry, &nvme->n_cq[i], i, nthr) != 1077 DDI_SUCCESS) 1078 goto fail; 1079 1080 if (cq != NULL) 1081 kmem_free(cq, sizeof (*cq) * cq_count); 1082 1083 return (DDI_SUCCESS); 1084 1085 fail: 1086 nvme_destroy_cq_array(nvme, cq_count); 1087 /* 1088 * Restore the original array 1089 */ 1090 nvme->n_cq_count = cq_count; 1091 nvme->n_cq = cq; 1092 1093 return (DDI_FAILURE); 1094 } 1095 1096 static int 1097 nvme_alloc_qpair(nvme_t *nvme, uint32_t nentry, nvme_qpair_t **nqp, 1098 uint_t idx) 1099 { 1100 nvme_qpair_t *qp = kmem_zalloc(sizeof (*qp), KM_SLEEP); 1101 uint_t cq_idx; 1102 1103 mutex_init(&qp->nq_mutex, NULL, MUTEX_DRIVER, 1104 DDI_INTR_PRI(nvme->n_intr_pri)); 1105 1106 /* 1107 * The NVMe spec defines that a full queue has one empty (unused) slot; 1108 * initialize the semaphore accordingly. 1109 */ 1110 sema_init(&qp->nq_sema, nentry - 1, NULL, SEMA_DRIVER, NULL); 1111 1112 if (nvme_zalloc_queue_dma(nvme, nentry, sizeof (nvme_sqe_t), 1113 DDI_DMA_WRITE, &qp->nq_sqdma) != DDI_SUCCESS) 1114 goto fail; 1115 1116 /* 1117 * idx == 0 is adminq, those above 0 are shared io completion queues. 1118 */ 1119 cq_idx = idx == 0 ? 0 : 1 + (idx - 1) % (nvme->n_cq_count - 1); 1120 qp->nq_cq = nvme->n_cq[cq_idx]; 1121 qp->nq_sq = (nvme_sqe_t *)qp->nq_sqdma->nd_memp; 1122 qp->nq_nentry = nentry; 1123 1124 qp->nq_sqtdbl = NVME_REG_SQTDBL(nvme, idx); 1125 1126 qp->nq_cmd = kmem_zalloc(sizeof (nvme_cmd_t *) * nentry, KM_SLEEP); 1127 qp->nq_next_cmd = 0; 1128 1129 *nqp = qp; 1130 return (DDI_SUCCESS); 1131 1132 fail: 1133 nvme_free_qpair(qp); 1134 *nqp = NULL; 1135 1136 return (DDI_FAILURE); 1137 } 1138 1139 static nvme_cmd_t * 1140 nvme_alloc_cmd(nvme_t *nvme, int kmflag) 1141 { 1142 nvme_cmd_t *cmd = kmem_cache_alloc(nvme_cmd_cache, kmflag); 1143 1144 if (cmd == NULL) 1145 return (cmd); 1146 1147 bzero(cmd, sizeof (nvme_cmd_t)); 1148 1149 cmd->nc_nvme = nvme; 1150 1151 mutex_init(&cmd->nc_mutex, NULL, MUTEX_DRIVER, 1152 DDI_INTR_PRI(nvme->n_intr_pri)); 1153 cv_init(&cmd->nc_cv, NULL, CV_DRIVER, NULL); 1154 1155 return (cmd); 1156 } 1157 1158 static void 1159 nvme_free_cmd(nvme_cmd_t *cmd) 1160 { 1161 /* Don't free commands on the lost commands list. */ 1162 if (list_link_active(&cmd->nc_list)) 1163 return; 1164 1165 if (cmd->nc_dma) { 1166 nvme_free_dma(cmd->nc_dma); 1167 cmd->nc_dma = NULL; 1168 } 1169 1170 if (cmd->nc_prp) { 1171 kmem_cache_free(cmd->nc_nvme->n_prp_cache, cmd->nc_prp); 1172 cmd->nc_prp = NULL; 1173 } 1174 1175 cv_destroy(&cmd->nc_cv); 1176 mutex_destroy(&cmd->nc_mutex); 1177 1178 kmem_cache_free(nvme_cmd_cache, cmd); 1179 } 1180 1181 static void 1182 nvme_submit_admin_cmd(nvme_qpair_t *qp, nvme_cmd_t *cmd) 1183 { 1184 sema_p(&qp->nq_sema); 1185 nvme_submit_cmd_common(qp, cmd); 1186 } 1187 1188 static int 1189 nvme_submit_io_cmd(nvme_qpair_t *qp, nvme_cmd_t *cmd) 1190 { 1191 if (cmd->nc_nvme->n_dead) { 1192 return (EIO); 1193 } 1194 1195 if (sema_tryp(&qp->nq_sema) == 0) 1196 return (EAGAIN); 1197 1198 nvme_submit_cmd_common(qp, cmd); 1199 return (0); 1200 } 1201 1202 static void 1203 nvme_submit_cmd_common(nvme_qpair_t *qp, nvme_cmd_t *cmd) 1204 { 1205 nvme_reg_sqtdbl_t tail = { 0 }; 1206 1207 mutex_enter(&qp->nq_mutex); 1208 cmd->nc_completed = B_FALSE; 1209 1210 /* 1211 * Now that we hold the queue pair lock, we must check whether or not 1212 * the controller has been listed as dead (e.g. was removed due to 1213 * hotplug). This is necessary as otherwise we could race with 1214 * nvme_remove_callback(). Because this has not been enqueued, we don't 1215 * call nvme_unqueue_cmd(), which is why we must manually decrement the 1216 * semaphore. 1217 */ 1218 if (cmd->nc_nvme->n_dead) { 1219 taskq_dispatch_ent(qp->nq_cq->ncq_cmd_taskq, cmd->nc_callback, 1220 cmd, TQ_NOSLEEP, &cmd->nc_tqent); 1221 sema_v(&qp->nq_sema); 1222 mutex_exit(&qp->nq_mutex); 1223 return; 1224 } 1225 1226 /* 1227 * Try to insert the cmd into the active cmd array at the nq_next_cmd 1228 * slot. If the slot is already occupied advance to the next slot and 1229 * try again. This can happen for long running commands like async event 1230 * requests. 1231 */ 1232 while (qp->nq_cmd[qp->nq_next_cmd] != NULL) 1233 qp->nq_next_cmd = (qp->nq_next_cmd + 1) % qp->nq_nentry; 1234 qp->nq_cmd[qp->nq_next_cmd] = cmd; 1235 1236 qp->nq_active_cmds++; 1237 1238 cmd->nc_sqe.sqe_cid = qp->nq_next_cmd; 1239 bcopy(&cmd->nc_sqe, &qp->nq_sq[qp->nq_sqtail], sizeof (nvme_sqe_t)); 1240 (void) ddi_dma_sync(qp->nq_sqdma->nd_dmah, 1241 sizeof (nvme_sqe_t) * qp->nq_sqtail, 1242 sizeof (nvme_sqe_t), DDI_DMA_SYNC_FORDEV); 1243 qp->nq_next_cmd = (qp->nq_next_cmd + 1) % qp->nq_nentry; 1244 1245 tail.b.sqtdbl_sqt = qp->nq_sqtail = (qp->nq_sqtail + 1) % qp->nq_nentry; 1246 nvme_put32(cmd->nc_nvme, qp->nq_sqtdbl, tail.r); 1247 1248 mutex_exit(&qp->nq_mutex); 1249 } 1250 1251 static nvme_cmd_t * 1252 nvme_unqueue_cmd(nvme_t *nvme, nvme_qpair_t *qp, int cid) 1253 { 1254 nvme_cmd_t *cmd; 1255 1256 ASSERT(mutex_owned(&qp->nq_mutex)); 1257 ASSERT3S(cid, <, qp->nq_nentry); 1258 1259 cmd = qp->nq_cmd[cid]; 1260 qp->nq_cmd[cid] = NULL; 1261 ASSERT3U(qp->nq_active_cmds, >, 0); 1262 qp->nq_active_cmds--; 1263 sema_v(&qp->nq_sema); 1264 1265 ASSERT3P(cmd, !=, NULL); 1266 ASSERT3P(cmd->nc_nvme, ==, nvme); 1267 ASSERT3S(cmd->nc_sqe.sqe_cid, ==, cid); 1268 1269 return (cmd); 1270 } 1271 1272 /* 1273 * Get the command tied to the next completed cqe and bump along completion 1274 * queue head counter. 1275 */ 1276 static nvme_cmd_t * 1277 nvme_get_completed(nvme_t *nvme, nvme_cq_t *cq) 1278 { 1279 nvme_qpair_t *qp; 1280 nvme_cqe_t *cqe; 1281 nvme_cmd_t *cmd; 1282 1283 ASSERT(mutex_owned(&cq->ncq_mutex)); 1284 1285 cqe = &cq->ncq_cq[cq->ncq_head]; 1286 1287 /* Check phase tag of CQE. Hardware inverts it for new entries. */ 1288 if (cqe->cqe_sf.sf_p == cq->ncq_phase) 1289 return (NULL); 1290 1291 qp = nvme->n_ioq[cqe->cqe_sqid]; 1292 1293 mutex_enter(&qp->nq_mutex); 1294 cmd = nvme_unqueue_cmd(nvme, qp, cqe->cqe_cid); 1295 mutex_exit(&qp->nq_mutex); 1296 1297 ASSERT(cmd->nc_sqid == cqe->cqe_sqid); 1298 bcopy(cqe, &cmd->nc_cqe, sizeof (nvme_cqe_t)); 1299 1300 qp->nq_sqhead = cqe->cqe_sqhd; 1301 1302 cq->ncq_head = (cq->ncq_head + 1) % cq->ncq_nentry; 1303 1304 /* Toggle phase on wrap-around. */ 1305 if (cq->ncq_head == 0) 1306 cq->ncq_phase = cq->ncq_phase ? 0 : 1; 1307 1308 return (cmd); 1309 } 1310 1311 /* 1312 * Process all completed commands on the io completion queue. 1313 */ 1314 static uint_t 1315 nvme_process_iocq(nvme_t *nvme, nvme_cq_t *cq) 1316 { 1317 nvme_reg_cqhdbl_t head = { 0 }; 1318 nvme_cmd_t *cmd; 1319 uint_t completed = 0; 1320 1321 if (ddi_dma_sync(cq->ncq_dma->nd_dmah, 0, 0, DDI_DMA_SYNC_FORKERNEL) != 1322 DDI_SUCCESS) 1323 dev_err(nvme->n_dip, CE_WARN, "!ddi_dma_sync() failed in %s", 1324 __func__); 1325 1326 mutex_enter(&cq->ncq_mutex); 1327 1328 while ((cmd = nvme_get_completed(nvme, cq)) != NULL) { 1329 taskq_dispatch_ent(cq->ncq_cmd_taskq, cmd->nc_callback, cmd, 1330 TQ_NOSLEEP, &cmd->nc_tqent); 1331 1332 completed++; 1333 } 1334 1335 if (completed > 0) { 1336 /* 1337 * Update the completion queue head doorbell. 1338 */ 1339 head.b.cqhdbl_cqh = cq->ncq_head; 1340 nvme_put32(nvme, cq->ncq_hdbl, head.r); 1341 } 1342 1343 mutex_exit(&cq->ncq_mutex); 1344 1345 return (completed); 1346 } 1347 1348 static nvme_cmd_t * 1349 nvme_retrieve_cmd(nvme_t *nvme, nvme_qpair_t *qp) 1350 { 1351 nvme_cq_t *cq = qp->nq_cq; 1352 nvme_reg_cqhdbl_t head = { 0 }; 1353 nvme_cmd_t *cmd; 1354 1355 if (ddi_dma_sync(cq->ncq_dma->nd_dmah, 0, 0, DDI_DMA_SYNC_FORKERNEL) != 1356 DDI_SUCCESS) 1357 dev_err(nvme->n_dip, CE_WARN, "!ddi_dma_sync() failed in %s", 1358 __func__); 1359 1360 mutex_enter(&cq->ncq_mutex); 1361 1362 if ((cmd = nvme_get_completed(nvme, cq)) != NULL) { 1363 head.b.cqhdbl_cqh = cq->ncq_head; 1364 nvme_put32(nvme, cq->ncq_hdbl, head.r); 1365 } 1366 1367 mutex_exit(&cq->ncq_mutex); 1368 1369 return (cmd); 1370 } 1371 1372 static int 1373 nvme_check_unknown_cmd_status(nvme_cmd_t *cmd) 1374 { 1375 nvme_cqe_t *cqe = &cmd->nc_cqe; 1376 1377 dev_err(cmd->nc_nvme->n_dip, CE_WARN, 1378 "!unknown command status received: opc = %x, sqid = %d, cid = %d, " 1379 "sc = %x, sct = %x, dnr = %d, m = %d", cmd->nc_sqe.sqe_opc, 1380 cqe->cqe_sqid, cqe->cqe_cid, cqe->cqe_sf.sf_sc, cqe->cqe_sf.sf_sct, 1381 cqe->cqe_sf.sf_dnr, cqe->cqe_sf.sf_m); 1382 1383 if (cmd->nc_xfer != NULL) 1384 bd_error(cmd->nc_xfer, BD_ERR_ILLRQ); 1385 1386 if (cmd->nc_nvme->n_strict_version) { 1387 cmd->nc_nvme->n_dead = B_TRUE; 1388 ddi_fm_service_impact(cmd->nc_nvme->n_dip, DDI_SERVICE_LOST); 1389 } 1390 1391 return (EIO); 1392 } 1393 1394 static int 1395 nvme_check_vendor_cmd_status(nvme_cmd_t *cmd) 1396 { 1397 nvme_cqe_t *cqe = &cmd->nc_cqe; 1398 1399 dev_err(cmd->nc_nvme->n_dip, CE_WARN, 1400 "!unknown command status received: opc = %x, sqid = %d, cid = %d, " 1401 "sc = %x, sct = %x, dnr = %d, m = %d", cmd->nc_sqe.sqe_opc, 1402 cqe->cqe_sqid, cqe->cqe_cid, cqe->cqe_sf.sf_sc, cqe->cqe_sf.sf_sct, 1403 cqe->cqe_sf.sf_dnr, cqe->cqe_sf.sf_m); 1404 if (!cmd->nc_nvme->n_ignore_unknown_vendor_status) { 1405 cmd->nc_nvme->n_dead = B_TRUE; 1406 ddi_fm_service_impact(cmd->nc_nvme->n_dip, DDI_SERVICE_LOST); 1407 } 1408 1409 return (EIO); 1410 } 1411 1412 static int 1413 nvme_check_integrity_cmd_status(nvme_cmd_t *cmd) 1414 { 1415 nvme_cqe_t *cqe = &cmd->nc_cqe; 1416 1417 switch (cqe->cqe_sf.sf_sc) { 1418 case NVME_CQE_SC_INT_NVM_WRITE: 1419 /* write fail */ 1420 /* TODO: post ereport */ 1421 if (cmd->nc_xfer != NULL) 1422 bd_error(cmd->nc_xfer, BD_ERR_MEDIA); 1423 return (EIO); 1424 1425 case NVME_CQE_SC_INT_NVM_READ: 1426 /* read fail */ 1427 /* TODO: post ereport */ 1428 if (cmd->nc_xfer != NULL) 1429 bd_error(cmd->nc_xfer, BD_ERR_MEDIA); 1430 return (EIO); 1431 1432 default: 1433 return (nvme_check_unknown_cmd_status(cmd)); 1434 } 1435 } 1436 1437 static int 1438 nvme_check_generic_cmd_status(nvme_cmd_t *cmd) 1439 { 1440 nvme_cqe_t *cqe = &cmd->nc_cqe; 1441 1442 switch (cqe->cqe_sf.sf_sc) { 1443 case NVME_CQE_SC_GEN_SUCCESS: 1444 return (0); 1445 1446 /* 1447 * Errors indicating a bug in the driver should cause a panic. 1448 */ 1449 case NVME_CQE_SC_GEN_INV_OPC: 1450 /* Invalid Command Opcode */ 1451 if (!cmd->nc_dontpanic) 1452 dev_err(cmd->nc_nvme->n_dip, CE_PANIC, 1453 "programming error: invalid opcode in cmd %p", 1454 (void *)cmd); 1455 return (EINVAL); 1456 1457 case NVME_CQE_SC_GEN_INV_FLD: 1458 /* Invalid Field in Command */ 1459 if (!cmd->nc_dontpanic) 1460 dev_err(cmd->nc_nvme->n_dip, CE_PANIC, 1461 "programming error: invalid field in cmd %p", 1462 (void *)cmd); 1463 return (EIO); 1464 1465 case NVME_CQE_SC_GEN_ID_CNFL: 1466 /* Command ID Conflict */ 1467 dev_err(cmd->nc_nvme->n_dip, CE_PANIC, "programming error: " 1468 "cmd ID conflict in cmd %p", (void *)cmd); 1469 return (0); 1470 1471 case NVME_CQE_SC_GEN_INV_NS: 1472 /* Invalid Namespace or Format */ 1473 if (!cmd->nc_dontpanic) 1474 dev_err(cmd->nc_nvme->n_dip, CE_PANIC, 1475 "programming error: invalid NS/format in cmd %p", 1476 (void *)cmd); 1477 return (EINVAL); 1478 1479 case NVME_CQE_SC_GEN_NVM_LBA_RANGE: 1480 /* LBA Out Of Range */ 1481 dev_err(cmd->nc_nvme->n_dip, CE_PANIC, "programming error: " 1482 "LBA out of range in cmd %p", (void *)cmd); 1483 return (0); 1484 1485 /* 1486 * Non-fatal errors, handle gracefully. 1487 */ 1488 case NVME_CQE_SC_GEN_DATA_XFR_ERR: 1489 /* Data Transfer Error (DMA) */ 1490 /* TODO: post ereport */ 1491 atomic_inc_32(&cmd->nc_nvme->n_data_xfr_err); 1492 if (cmd->nc_xfer != NULL) 1493 bd_error(cmd->nc_xfer, BD_ERR_NTRDY); 1494 return (EIO); 1495 1496 case NVME_CQE_SC_GEN_INTERNAL_ERR: 1497 /* 1498 * Internal Error. The spec (v1.0, section 4.5.1.2) says 1499 * detailed error information is returned as async event, 1500 * so we pretty much ignore the error here and handle it 1501 * in the async event handler. 1502 */ 1503 atomic_inc_32(&cmd->nc_nvme->n_internal_err); 1504 if (cmd->nc_xfer != NULL) 1505 bd_error(cmd->nc_xfer, BD_ERR_NTRDY); 1506 return (EIO); 1507 1508 case NVME_CQE_SC_GEN_ABORT_REQUEST: 1509 /* 1510 * Command Abort Requested. This normally happens only when a 1511 * command times out. 1512 */ 1513 /* TODO: post ereport or change blkdev to handle this? */ 1514 atomic_inc_32(&cmd->nc_nvme->n_abort_rq_err); 1515 return (ECANCELED); 1516 1517 case NVME_CQE_SC_GEN_ABORT_PWRLOSS: 1518 /* Command Aborted due to Power Loss Notification */ 1519 ddi_fm_service_impact(cmd->nc_nvme->n_dip, DDI_SERVICE_LOST); 1520 cmd->nc_nvme->n_dead = B_TRUE; 1521 return (EIO); 1522 1523 case NVME_CQE_SC_GEN_ABORT_SQ_DEL: 1524 /* Command Aborted due to SQ Deletion */ 1525 atomic_inc_32(&cmd->nc_nvme->n_abort_sq_del); 1526 return (EIO); 1527 1528 case NVME_CQE_SC_GEN_NVM_CAP_EXC: 1529 /* Capacity Exceeded */ 1530 atomic_inc_32(&cmd->nc_nvme->n_nvm_cap_exc); 1531 if (cmd->nc_xfer != NULL) 1532 bd_error(cmd->nc_xfer, BD_ERR_MEDIA); 1533 return (EIO); 1534 1535 case NVME_CQE_SC_GEN_NVM_NS_NOTRDY: 1536 /* Namespace Not Ready */ 1537 atomic_inc_32(&cmd->nc_nvme->n_nvm_ns_notrdy); 1538 if (cmd->nc_xfer != NULL) 1539 bd_error(cmd->nc_xfer, BD_ERR_NTRDY); 1540 return (EIO); 1541 1542 case NVME_CQE_SC_GEN_NVM_FORMATTING: 1543 /* Format in progress (1.2) */ 1544 if (!NVME_VERSION_ATLEAST(&cmd->nc_nvme->n_version, 1, 2)) 1545 return (nvme_check_unknown_cmd_status(cmd)); 1546 atomic_inc_32(&cmd->nc_nvme->n_nvm_ns_formatting); 1547 if (cmd->nc_xfer != NULL) 1548 bd_error(cmd->nc_xfer, BD_ERR_NTRDY); 1549 return (EIO); 1550 1551 default: 1552 return (nvme_check_unknown_cmd_status(cmd)); 1553 } 1554 } 1555 1556 static int 1557 nvme_check_specific_cmd_status(nvme_cmd_t *cmd) 1558 { 1559 nvme_cqe_t *cqe = &cmd->nc_cqe; 1560 1561 switch (cqe->cqe_sf.sf_sc) { 1562 case NVME_CQE_SC_SPC_INV_CQ: 1563 /* Completion Queue Invalid */ 1564 ASSERT(cmd->nc_sqe.sqe_opc == NVME_OPC_CREATE_SQUEUE); 1565 atomic_inc_32(&cmd->nc_nvme->n_inv_cq_err); 1566 return (EINVAL); 1567 1568 case NVME_CQE_SC_SPC_INV_QID: 1569 /* Invalid Queue Identifier */ 1570 ASSERT(cmd->nc_sqe.sqe_opc == NVME_OPC_CREATE_SQUEUE || 1571 cmd->nc_sqe.sqe_opc == NVME_OPC_DELETE_SQUEUE || 1572 cmd->nc_sqe.sqe_opc == NVME_OPC_CREATE_CQUEUE || 1573 cmd->nc_sqe.sqe_opc == NVME_OPC_DELETE_CQUEUE); 1574 atomic_inc_32(&cmd->nc_nvme->n_inv_qid_err); 1575 return (EINVAL); 1576 1577 case NVME_CQE_SC_SPC_MAX_QSZ_EXC: 1578 /* Max Queue Size Exceeded */ 1579 ASSERT(cmd->nc_sqe.sqe_opc == NVME_OPC_CREATE_SQUEUE || 1580 cmd->nc_sqe.sqe_opc == NVME_OPC_CREATE_CQUEUE); 1581 atomic_inc_32(&cmd->nc_nvme->n_max_qsz_exc); 1582 return (EINVAL); 1583 1584 case NVME_CQE_SC_SPC_ABRT_CMD_EXC: 1585 /* Abort Command Limit Exceeded */ 1586 ASSERT(cmd->nc_sqe.sqe_opc == NVME_OPC_ABORT); 1587 dev_err(cmd->nc_nvme->n_dip, CE_PANIC, "programming error: " 1588 "abort command limit exceeded in cmd %p", (void *)cmd); 1589 return (0); 1590 1591 case NVME_CQE_SC_SPC_ASYNC_EVREQ_EXC: 1592 /* Async Event Request Limit Exceeded */ 1593 ASSERT(cmd->nc_sqe.sqe_opc == NVME_OPC_ASYNC_EVENT); 1594 dev_err(cmd->nc_nvme->n_dip, CE_PANIC, "programming error: " 1595 "async event request limit exceeded in cmd %p", 1596 (void *)cmd); 1597 return (0); 1598 1599 case NVME_CQE_SC_SPC_INV_INT_VECT: 1600 /* Invalid Interrupt Vector */ 1601 ASSERT(cmd->nc_sqe.sqe_opc == NVME_OPC_CREATE_CQUEUE); 1602 atomic_inc_32(&cmd->nc_nvme->n_inv_int_vect); 1603 return (EINVAL); 1604 1605 case NVME_CQE_SC_SPC_INV_LOG_PAGE: 1606 /* Invalid Log Page */ 1607 ASSERT(cmd->nc_sqe.sqe_opc == NVME_OPC_GET_LOG_PAGE); 1608 atomic_inc_32(&cmd->nc_nvme->n_inv_log_page); 1609 return (EINVAL); 1610 1611 case NVME_CQE_SC_SPC_INV_FORMAT: 1612 /* Invalid Format */ 1613 ASSERT(cmd->nc_sqe.sqe_opc == NVME_OPC_NVM_FORMAT); 1614 atomic_inc_32(&cmd->nc_nvme->n_inv_format); 1615 if (cmd->nc_xfer != NULL) 1616 bd_error(cmd->nc_xfer, BD_ERR_ILLRQ); 1617 return (EINVAL); 1618 1619 case NVME_CQE_SC_SPC_INV_Q_DEL: 1620 /* Invalid Queue Deletion */ 1621 ASSERT(cmd->nc_sqe.sqe_opc == NVME_OPC_DELETE_CQUEUE); 1622 atomic_inc_32(&cmd->nc_nvme->n_inv_q_del); 1623 return (EINVAL); 1624 1625 case NVME_CQE_SC_SPC_NVM_CNFL_ATTR: 1626 /* Conflicting Attributes */ 1627 ASSERT(cmd->nc_sqe.sqe_opc == NVME_OPC_NVM_DSET_MGMT || 1628 cmd->nc_sqe.sqe_opc == NVME_OPC_NVM_READ || 1629 cmd->nc_sqe.sqe_opc == NVME_OPC_NVM_WRITE); 1630 atomic_inc_32(&cmd->nc_nvme->n_cnfl_attr); 1631 if (cmd->nc_xfer != NULL) 1632 bd_error(cmd->nc_xfer, BD_ERR_ILLRQ); 1633 return (EINVAL); 1634 1635 case NVME_CQE_SC_SPC_NVM_INV_PROT: 1636 /* Invalid Protection Information */ 1637 ASSERT(cmd->nc_sqe.sqe_opc == NVME_OPC_NVM_COMPARE || 1638 cmd->nc_sqe.sqe_opc == NVME_OPC_NVM_READ || 1639 cmd->nc_sqe.sqe_opc == NVME_OPC_NVM_WRITE); 1640 atomic_inc_32(&cmd->nc_nvme->n_inv_prot); 1641 if (cmd->nc_xfer != NULL) 1642 bd_error(cmd->nc_xfer, BD_ERR_ILLRQ); 1643 return (EINVAL); 1644 1645 case NVME_CQE_SC_SPC_NVM_READONLY: 1646 /* Write to Read Only Range */ 1647 ASSERT(cmd->nc_sqe.sqe_opc == NVME_OPC_NVM_WRITE); 1648 atomic_inc_32(&cmd->nc_nvme->n_readonly); 1649 if (cmd->nc_xfer != NULL) 1650 bd_error(cmd->nc_xfer, BD_ERR_ILLRQ); 1651 return (EROFS); 1652 1653 case NVME_CQE_SC_SPC_INV_FW_SLOT: 1654 /* Invalid Firmware Slot */ 1655 ASSERT(cmd->nc_sqe.sqe_opc == NVME_OPC_FW_ACTIVATE); 1656 return (EINVAL); 1657 1658 case NVME_CQE_SC_SPC_INV_FW_IMG: 1659 /* Invalid Firmware Image */ 1660 ASSERT(cmd->nc_sqe.sqe_opc == NVME_OPC_FW_ACTIVATE); 1661 return (EINVAL); 1662 1663 case NVME_CQE_SC_SPC_FW_RESET: 1664 /* Conventional Reset Required */ 1665 ASSERT(cmd->nc_sqe.sqe_opc == NVME_OPC_FW_ACTIVATE); 1666 return (0); 1667 1668 case NVME_CQE_SC_SPC_FW_NSSR: 1669 /* NVMe Subsystem Reset Required */ 1670 ASSERT(cmd->nc_sqe.sqe_opc == NVME_OPC_FW_ACTIVATE); 1671 return (0); 1672 1673 case NVME_CQE_SC_SPC_FW_NEXT_RESET: 1674 /* Activation Requires Reset */ 1675 ASSERT(cmd->nc_sqe.sqe_opc == NVME_OPC_FW_ACTIVATE); 1676 return (0); 1677 1678 case NVME_CQE_SC_SPC_FW_MTFA: 1679 /* Activation Requires Maximum Time Violation */ 1680 ASSERT(cmd->nc_sqe.sqe_opc == NVME_OPC_FW_ACTIVATE); 1681 return (EAGAIN); 1682 1683 case NVME_CQE_SC_SPC_FW_PROHIBITED: 1684 /* Activation Prohibited */ 1685 ASSERT(cmd->nc_sqe.sqe_opc == NVME_OPC_FW_ACTIVATE); 1686 return (EINVAL); 1687 1688 case NVME_CQE_SC_SPC_FW_OVERLAP: 1689 /* Overlapping Firmware Ranges */ 1690 ASSERT(cmd->nc_sqe.sqe_opc == NVME_OPC_FW_IMAGE_LOAD); 1691 return (EINVAL); 1692 1693 default: 1694 return (nvme_check_unknown_cmd_status(cmd)); 1695 } 1696 } 1697 1698 static inline int 1699 nvme_check_cmd_status(nvme_cmd_t *cmd) 1700 { 1701 nvme_cqe_t *cqe = &cmd->nc_cqe; 1702 1703 /* 1704 * Take a shortcut if the controller is dead, or if 1705 * command status indicates no error. 1706 */ 1707 if (cmd->nc_nvme->n_dead) 1708 return (EIO); 1709 1710 if (cqe->cqe_sf.sf_sct == NVME_CQE_SCT_GENERIC && 1711 cqe->cqe_sf.sf_sc == NVME_CQE_SC_GEN_SUCCESS) 1712 return (0); 1713 1714 if (cqe->cqe_sf.sf_sct == NVME_CQE_SCT_GENERIC) 1715 return (nvme_check_generic_cmd_status(cmd)); 1716 else if (cqe->cqe_sf.sf_sct == NVME_CQE_SCT_SPECIFIC) 1717 return (nvme_check_specific_cmd_status(cmd)); 1718 else if (cqe->cqe_sf.sf_sct == NVME_CQE_SCT_INTEGRITY) 1719 return (nvme_check_integrity_cmd_status(cmd)); 1720 else if (cqe->cqe_sf.sf_sct == NVME_CQE_SCT_VENDOR) 1721 return (nvme_check_vendor_cmd_status(cmd)); 1722 1723 return (nvme_check_unknown_cmd_status(cmd)); 1724 } 1725 1726 static int 1727 nvme_abort_cmd(nvme_cmd_t *abort_cmd, uint_t sec) 1728 { 1729 nvme_t *nvme = abort_cmd->nc_nvme; 1730 nvme_cmd_t *cmd = nvme_alloc_cmd(nvme, KM_SLEEP); 1731 nvme_abort_cmd_t ac = { 0 }; 1732 int ret = 0; 1733 1734 sema_p(&nvme->n_abort_sema); 1735 1736 ac.b.ac_cid = abort_cmd->nc_sqe.sqe_cid; 1737 ac.b.ac_sqid = abort_cmd->nc_sqid; 1738 1739 cmd->nc_sqid = 0; 1740 cmd->nc_sqe.sqe_opc = NVME_OPC_ABORT; 1741 cmd->nc_callback = nvme_wakeup_cmd; 1742 cmd->nc_sqe.sqe_cdw10 = ac.r; 1743 1744 /* 1745 * Send the ABORT to the hardware. The ABORT command will return _after_ 1746 * the aborted command has completed (aborted or otherwise), but since 1747 * we still hold the aborted command's mutex its callback hasn't been 1748 * processed yet. 1749 */ 1750 nvme_admin_cmd(cmd, sec); 1751 sema_v(&nvme->n_abort_sema); 1752 1753 if ((ret = nvme_check_cmd_status(cmd)) != 0) { 1754 dev_err(nvme->n_dip, CE_WARN, 1755 "!ABORT failed with sct = %x, sc = %x", 1756 cmd->nc_cqe.cqe_sf.sf_sct, cmd->nc_cqe.cqe_sf.sf_sc); 1757 atomic_inc_32(&nvme->n_abort_failed); 1758 } else { 1759 dev_err(nvme->n_dip, CE_WARN, 1760 "!ABORT of command %d/%d %ssuccessful", 1761 abort_cmd->nc_sqe.sqe_cid, abort_cmd->nc_sqid, 1762 cmd->nc_cqe.cqe_dw0 & 1 ? "un" : ""); 1763 if ((cmd->nc_cqe.cqe_dw0 & 1) == 0) 1764 atomic_inc_32(&nvme->n_cmd_aborted); 1765 } 1766 1767 nvme_free_cmd(cmd); 1768 return (ret); 1769 } 1770 1771 /* 1772 * nvme_wait_cmd -- wait for command completion or timeout 1773 * 1774 * In case of a serious error or a timeout of the abort command the hardware 1775 * will be declared dead and FMA will be notified. 1776 */ 1777 static void 1778 nvme_wait_cmd(nvme_cmd_t *cmd, uint_t sec) 1779 { 1780 clock_t timeout = ddi_get_lbolt() + drv_usectohz(sec * MICROSEC); 1781 nvme_t *nvme = cmd->nc_nvme; 1782 nvme_reg_csts_t csts; 1783 nvme_qpair_t *qp; 1784 1785 ASSERT(mutex_owned(&cmd->nc_mutex)); 1786 1787 while (!cmd->nc_completed) { 1788 if (cv_timedwait(&cmd->nc_cv, &cmd->nc_mutex, timeout) == -1) 1789 break; 1790 } 1791 1792 if (cmd->nc_completed) 1793 return; 1794 1795 /* 1796 * The command timed out. 1797 * 1798 * Check controller for fatal status, any errors associated with the 1799 * register or DMA handle, or for a double timeout (abort command timed 1800 * out). If necessary log a warning and call FMA. 1801 */ 1802 csts.r = nvme_get32(nvme, NVME_REG_CSTS); 1803 dev_err(nvme->n_dip, CE_WARN, "!command %d/%d timeout, " 1804 "OPC = %x, CFS = %d", cmd->nc_sqe.sqe_cid, cmd->nc_sqid, 1805 cmd->nc_sqe.sqe_opc, csts.b.csts_cfs); 1806 atomic_inc_32(&nvme->n_cmd_timeout); 1807 1808 if (csts.b.csts_cfs || 1809 nvme_check_regs_hdl(nvme) || 1810 nvme_check_dma_hdl(cmd->nc_dma) || 1811 cmd->nc_sqe.sqe_opc == NVME_OPC_ABORT) { 1812 ddi_fm_service_impact(nvme->n_dip, DDI_SERVICE_LOST); 1813 nvme->n_dead = B_TRUE; 1814 } else if (nvme_abort_cmd(cmd, sec) == 0) { 1815 /* 1816 * If the abort succeeded the command should complete 1817 * immediately with an appropriate status. 1818 */ 1819 while (!cmd->nc_completed) 1820 cv_wait(&cmd->nc_cv, &cmd->nc_mutex); 1821 1822 return; 1823 } 1824 1825 qp = nvme->n_ioq[cmd->nc_sqid]; 1826 1827 mutex_enter(&qp->nq_mutex); 1828 (void) nvme_unqueue_cmd(nvme, qp, cmd->nc_sqe.sqe_cid); 1829 mutex_exit(&qp->nq_mutex); 1830 1831 /* 1832 * As we don't know what the presumed dead hardware might still do with 1833 * the DMA memory, we'll put the command on the lost commands list if it 1834 * has any DMA memory. 1835 */ 1836 if (cmd->nc_dma != NULL) { 1837 mutex_enter(&nvme_lc_mutex); 1838 list_insert_head(&nvme_lost_cmds, cmd); 1839 mutex_exit(&nvme_lc_mutex); 1840 } 1841 } 1842 1843 static void 1844 nvme_wakeup_cmd(void *arg) 1845 { 1846 nvme_cmd_t *cmd = arg; 1847 1848 mutex_enter(&cmd->nc_mutex); 1849 cmd->nc_completed = B_TRUE; 1850 cv_signal(&cmd->nc_cv); 1851 mutex_exit(&cmd->nc_mutex); 1852 } 1853 1854 static void 1855 nvme_async_event_task(void *arg) 1856 { 1857 nvme_cmd_t *cmd = arg; 1858 nvme_t *nvme = cmd->nc_nvme; 1859 nvme_error_log_entry_t *error_log = NULL; 1860 nvme_health_log_t *health_log = NULL; 1861 nvme_nschange_list_t *nslist = NULL; 1862 size_t logsize = 0; 1863 nvme_async_event_t event; 1864 1865 /* 1866 * Check for errors associated with the async request itself. The only 1867 * command-specific error is "async event limit exceeded", which 1868 * indicates a programming error in the driver and causes a panic in 1869 * nvme_check_cmd_status(). 1870 * 1871 * Other possible errors are various scenarios where the async request 1872 * was aborted, or internal errors in the device. Internal errors are 1873 * reported to FMA, the command aborts need no special handling here. 1874 * 1875 * And finally, at least qemu nvme does not support async events, 1876 * and will return NVME_CQE_SC_GEN_INV_OPC | DNR. If so, we 1877 * will avoid posting async events. 1878 */ 1879 1880 if (nvme_check_cmd_status(cmd) != 0) { 1881 dev_err(cmd->nc_nvme->n_dip, CE_WARN, 1882 "!async event request returned failure, sct = %x, " 1883 "sc = %x, dnr = %d, m = %d", cmd->nc_cqe.cqe_sf.sf_sct, 1884 cmd->nc_cqe.cqe_sf.sf_sc, cmd->nc_cqe.cqe_sf.sf_dnr, 1885 cmd->nc_cqe.cqe_sf.sf_m); 1886 1887 if (cmd->nc_cqe.cqe_sf.sf_sct == NVME_CQE_SCT_GENERIC && 1888 cmd->nc_cqe.cqe_sf.sf_sc == NVME_CQE_SC_GEN_INTERNAL_ERR) { 1889 cmd->nc_nvme->n_dead = B_TRUE; 1890 ddi_fm_service_impact(cmd->nc_nvme->n_dip, 1891 DDI_SERVICE_LOST); 1892 } 1893 1894 if (cmd->nc_cqe.cqe_sf.sf_sct == NVME_CQE_SCT_GENERIC && 1895 cmd->nc_cqe.cqe_sf.sf_sc == NVME_CQE_SC_GEN_INV_OPC && 1896 cmd->nc_cqe.cqe_sf.sf_dnr == 1) { 1897 nvme->n_async_event_supported = B_FALSE; 1898 } 1899 1900 nvme_free_cmd(cmd); 1901 return; 1902 } 1903 1904 event.r = cmd->nc_cqe.cqe_dw0; 1905 1906 /* Clear CQE and re-submit the async request. */ 1907 bzero(&cmd->nc_cqe, sizeof (nvme_cqe_t)); 1908 nvme_submit_admin_cmd(nvme->n_adminq, cmd); 1909 1910 switch (event.b.ae_type) { 1911 case NVME_ASYNC_TYPE_ERROR: 1912 if (event.b.ae_logpage == NVME_LOGPAGE_ERROR) { 1913 (void) nvme_get_logpage(nvme, B_FALSE, 1914 (void **)&error_log, &logsize, event.b.ae_logpage); 1915 } else { 1916 dev_err(nvme->n_dip, CE_WARN, "!wrong logpage in " 1917 "async event reply: %d", event.b.ae_logpage); 1918 atomic_inc_32(&nvme->n_wrong_logpage); 1919 } 1920 1921 switch (event.b.ae_info) { 1922 case NVME_ASYNC_ERROR_INV_SQ: 1923 dev_err(nvme->n_dip, CE_PANIC, "programming error: " 1924 "invalid submission queue"); 1925 return; 1926 1927 case NVME_ASYNC_ERROR_INV_DBL: 1928 dev_err(nvme->n_dip, CE_PANIC, "programming error: " 1929 "invalid doorbell write value"); 1930 return; 1931 1932 case NVME_ASYNC_ERROR_DIAGFAIL: 1933 dev_err(nvme->n_dip, CE_WARN, "!diagnostic failure"); 1934 ddi_fm_service_impact(nvme->n_dip, DDI_SERVICE_LOST); 1935 nvme->n_dead = B_TRUE; 1936 atomic_inc_32(&nvme->n_diagfail_event); 1937 break; 1938 1939 case NVME_ASYNC_ERROR_PERSISTENT: 1940 dev_err(nvme->n_dip, CE_WARN, "!persistent internal " 1941 "device error"); 1942 ddi_fm_service_impact(nvme->n_dip, DDI_SERVICE_LOST); 1943 nvme->n_dead = B_TRUE; 1944 atomic_inc_32(&nvme->n_persistent_event); 1945 break; 1946 1947 case NVME_ASYNC_ERROR_TRANSIENT: 1948 dev_err(nvme->n_dip, CE_WARN, "!transient internal " 1949 "device error"); 1950 /* TODO: send ereport */ 1951 atomic_inc_32(&nvme->n_transient_event); 1952 break; 1953 1954 case NVME_ASYNC_ERROR_FW_LOAD: 1955 dev_err(nvme->n_dip, CE_WARN, 1956 "!firmware image load error"); 1957 atomic_inc_32(&nvme->n_fw_load_event); 1958 break; 1959 } 1960 break; 1961 1962 case NVME_ASYNC_TYPE_HEALTH: 1963 if (event.b.ae_logpage == NVME_LOGPAGE_HEALTH) { 1964 (void) nvme_get_logpage(nvme, B_FALSE, 1965 (void **)&health_log, &logsize, event.b.ae_logpage, 1966 -1); 1967 } else { 1968 dev_err(nvme->n_dip, CE_WARN, "!wrong logpage in " 1969 "async event reply: %d", event.b.ae_logpage); 1970 atomic_inc_32(&nvme->n_wrong_logpage); 1971 } 1972 1973 switch (event.b.ae_info) { 1974 case NVME_ASYNC_HEALTH_RELIABILITY: 1975 dev_err(nvme->n_dip, CE_WARN, 1976 "!device reliability compromised"); 1977 /* TODO: send ereport */ 1978 atomic_inc_32(&nvme->n_reliability_event); 1979 break; 1980 1981 case NVME_ASYNC_HEALTH_TEMPERATURE: 1982 dev_err(nvme->n_dip, CE_WARN, 1983 "!temperature above threshold"); 1984 /* TODO: send ereport */ 1985 atomic_inc_32(&nvme->n_temperature_event); 1986 break; 1987 1988 case NVME_ASYNC_HEALTH_SPARE: 1989 dev_err(nvme->n_dip, CE_WARN, 1990 "!spare space below threshold"); 1991 /* TODO: send ereport */ 1992 atomic_inc_32(&nvme->n_spare_event); 1993 break; 1994 } 1995 break; 1996 1997 case NVME_ASYNC_TYPE_NOTICE: 1998 switch (event.b.ae_info) { 1999 case NVME_ASYNC_NOTICE_NS_CHANGE: 2000 dev_err(nvme->n_dip, CE_NOTE, 2001 "namespace attribute change event, " 2002 "logpage = %x", event.b.ae_logpage); 2003 atomic_inc_32(&nvme->n_notice_event); 2004 2005 if (event.b.ae_logpage != NVME_LOGPAGE_NSCHANGE) 2006 break; 2007 2008 if (nvme_get_logpage(nvme, B_FALSE, (void **)&nslist, 2009 &logsize, event.b.ae_logpage, -1) != 0) { 2010 break; 2011 } 2012 2013 if (nslist->nscl_ns[0] == UINT32_MAX) { 2014 dev_err(nvme->n_dip, CE_CONT, 2015 "more than %u namespaces have changed.\n", 2016 NVME_NSCHANGE_LIST_SIZE); 2017 break; 2018 } 2019 2020 mutex_enter(&nvme->n_mgmt_mutex); 2021 for (uint_t i = 0; i < NVME_NSCHANGE_LIST_SIZE; i++) { 2022 uint32_t nsid = nslist->nscl_ns[i]; 2023 2024 if (nsid == 0) /* end of list */ 2025 break; 2026 2027 dev_err(nvme->n_dip, CE_NOTE, 2028 "!namespace nvme%d/%u has changed.", 2029 ddi_get_instance(nvme->n_dip), nsid); 2030 2031 2032 if (nvme_init_ns(nvme, nsid) != DDI_SUCCESS) 2033 continue; 2034 2035 bd_state_change( 2036 NVME_NSID2NS(nvme, nsid)->ns_bd_hdl); 2037 } 2038 mutex_exit(&nvme->n_mgmt_mutex); 2039 2040 break; 2041 2042 case NVME_ASYNC_NOTICE_FW_ACTIVATE: 2043 dev_err(nvme->n_dip, CE_NOTE, 2044 "firmware activation starting, " 2045 "logpage = %x", event.b.ae_logpage); 2046 atomic_inc_32(&nvme->n_notice_event); 2047 break; 2048 2049 case NVME_ASYNC_NOTICE_TELEMETRY: 2050 dev_err(nvme->n_dip, CE_NOTE, 2051 "telemetry log changed, " 2052 "logpage = %x", event.b.ae_logpage); 2053 atomic_inc_32(&nvme->n_notice_event); 2054 break; 2055 2056 case NVME_ASYNC_NOTICE_NS_ASYMM: 2057 dev_err(nvme->n_dip, CE_NOTE, 2058 "asymmetric namespace access change, " 2059 "logpage = %x", event.b.ae_logpage); 2060 atomic_inc_32(&nvme->n_notice_event); 2061 break; 2062 2063 case NVME_ASYNC_NOTICE_LATENCYLOG: 2064 dev_err(nvme->n_dip, CE_NOTE, 2065 "predictable latency event aggregate log change, " 2066 "logpage = %x", event.b.ae_logpage); 2067 atomic_inc_32(&nvme->n_notice_event); 2068 break; 2069 2070 case NVME_ASYNC_NOTICE_LBASTATUS: 2071 dev_err(nvme->n_dip, CE_NOTE, 2072 "LBA status information alert, " 2073 "logpage = %x", event.b.ae_logpage); 2074 atomic_inc_32(&nvme->n_notice_event); 2075 break; 2076 2077 case NVME_ASYNC_NOTICE_ENDURANCELOG: 2078 dev_err(nvme->n_dip, CE_NOTE, 2079 "endurance group event aggregate log page change, " 2080 "logpage = %x", event.b.ae_logpage); 2081 atomic_inc_32(&nvme->n_notice_event); 2082 break; 2083 2084 default: 2085 dev_err(nvme->n_dip, CE_WARN, 2086 "!unknown notice async event received, " 2087 "info = %x, logpage = %x", event.b.ae_info, 2088 event.b.ae_logpage); 2089 atomic_inc_32(&nvme->n_unknown_event); 2090 break; 2091 } 2092 break; 2093 2094 case NVME_ASYNC_TYPE_VENDOR: 2095 dev_err(nvme->n_dip, CE_WARN, "!vendor specific async event " 2096 "received, info = %x, logpage = %x", event.b.ae_info, 2097 event.b.ae_logpage); 2098 atomic_inc_32(&nvme->n_vendor_event); 2099 break; 2100 2101 default: 2102 dev_err(nvme->n_dip, CE_WARN, "!unknown async event received, " 2103 "type = %x, info = %x, logpage = %x", event.b.ae_type, 2104 event.b.ae_info, event.b.ae_logpage); 2105 atomic_inc_32(&nvme->n_unknown_event); 2106 break; 2107 } 2108 2109 if (error_log != NULL) 2110 kmem_free(error_log, logsize); 2111 2112 if (health_log != NULL) 2113 kmem_free(health_log, logsize); 2114 2115 if (nslist != NULL) 2116 kmem_free(nslist, logsize); 2117 } 2118 2119 static void 2120 nvme_admin_cmd(nvme_cmd_t *cmd, int sec) 2121 { 2122 mutex_enter(&cmd->nc_mutex); 2123 nvme_submit_admin_cmd(cmd->nc_nvme->n_adminq, cmd); 2124 nvme_wait_cmd(cmd, sec); 2125 mutex_exit(&cmd->nc_mutex); 2126 } 2127 2128 static void 2129 nvme_async_event(nvme_t *nvme) 2130 { 2131 nvme_cmd_t *cmd; 2132 2133 cmd = nvme_alloc_cmd(nvme, KM_SLEEP); 2134 cmd->nc_sqid = 0; 2135 cmd->nc_sqe.sqe_opc = NVME_OPC_ASYNC_EVENT; 2136 cmd->nc_callback = nvme_async_event_task; 2137 cmd->nc_dontpanic = B_TRUE; 2138 2139 nvme_submit_admin_cmd(nvme->n_adminq, cmd); 2140 } 2141 2142 static int 2143 nvme_format_nvm(nvme_t *nvme, boolean_t user, uint32_t nsid, uint8_t lbaf, 2144 boolean_t ms, uint8_t pi, boolean_t pil, uint8_t ses) 2145 { 2146 nvme_cmd_t *cmd = nvme_alloc_cmd(nvme, KM_SLEEP); 2147 nvme_format_nvm_t format_nvm = { 0 }; 2148 int ret; 2149 2150 format_nvm.b.fm_lbaf = lbaf & 0xf; 2151 format_nvm.b.fm_ms = ms ? 1 : 0; 2152 format_nvm.b.fm_pi = pi & 0x7; 2153 format_nvm.b.fm_pil = pil ? 1 : 0; 2154 format_nvm.b.fm_ses = ses & 0x7; 2155 2156 cmd->nc_sqid = 0; 2157 cmd->nc_callback = nvme_wakeup_cmd; 2158 cmd->nc_sqe.sqe_nsid = nsid; 2159 cmd->nc_sqe.sqe_opc = NVME_OPC_NVM_FORMAT; 2160 cmd->nc_sqe.sqe_cdw10 = format_nvm.r; 2161 2162 /* 2163 * Some devices like Samsung SM951 don't allow formatting of all 2164 * namespaces in one command. Handle that gracefully. 2165 */ 2166 if (nsid == (uint32_t)-1) 2167 cmd->nc_dontpanic = B_TRUE; 2168 /* 2169 * If this format request was initiated by the user, then don't allow a 2170 * programmer error to panic the system. 2171 */ 2172 if (user) 2173 cmd->nc_dontpanic = B_TRUE; 2174 2175 nvme_admin_cmd(cmd, nvme_format_cmd_timeout); 2176 2177 if ((ret = nvme_check_cmd_status(cmd)) != 0) { 2178 dev_err(nvme->n_dip, CE_WARN, 2179 "!FORMAT failed with sct = %x, sc = %x", 2180 cmd->nc_cqe.cqe_sf.sf_sct, cmd->nc_cqe.cqe_sf.sf_sc); 2181 } 2182 2183 nvme_free_cmd(cmd); 2184 return (ret); 2185 } 2186 2187 /* 2188 * The `bufsize` parameter is usually an output parameter, set by this routine 2189 * when filling in the supported types of logpages from the device. However, for 2190 * vendor-specific pages, it is an input parameter, and must be set 2191 * appropriately by callers. 2192 */ 2193 static int 2194 nvme_get_logpage(nvme_t *nvme, boolean_t user, void **buf, size_t *bufsize, 2195 uint8_t logpage, ...) 2196 { 2197 nvme_cmd_t *cmd = nvme_alloc_cmd(nvme, KM_SLEEP); 2198 nvme_getlogpage_t getlogpage = { 0 }; 2199 va_list ap; 2200 int ret; 2201 2202 va_start(ap, logpage); 2203 2204 cmd->nc_sqid = 0; 2205 cmd->nc_callback = nvme_wakeup_cmd; 2206 cmd->nc_sqe.sqe_opc = NVME_OPC_GET_LOG_PAGE; 2207 2208 if (user) 2209 cmd->nc_dontpanic = B_TRUE; 2210 2211 getlogpage.b.lp_lid = logpage; 2212 2213 switch (logpage) { 2214 case NVME_LOGPAGE_ERROR: 2215 cmd->nc_sqe.sqe_nsid = (uint32_t)-1; 2216 *bufsize = MIN(NVME_VENDOR_SPECIFIC_LOGPAGE_MAX_SIZE, 2217 nvme->n_error_log_len * sizeof (nvme_error_log_entry_t)); 2218 break; 2219 2220 case NVME_LOGPAGE_HEALTH: 2221 cmd->nc_sqe.sqe_nsid = va_arg(ap, uint32_t); 2222 *bufsize = sizeof (nvme_health_log_t); 2223 break; 2224 2225 case NVME_LOGPAGE_FWSLOT: 2226 cmd->nc_sqe.sqe_nsid = (uint32_t)-1; 2227 *bufsize = sizeof (nvme_fwslot_log_t); 2228 break; 2229 2230 case NVME_LOGPAGE_NSCHANGE: 2231 cmd->nc_sqe.sqe_nsid = (uint32_t)-1; 2232 *bufsize = sizeof (nvme_nschange_list_t); 2233 break; 2234 2235 default: 2236 /* 2237 * This intentionally only checks against the minimum valid 2238 * log page ID. `logpage` is a uint8_t, and `0xFF` is a valid 2239 * page ID, so this one-sided check avoids a compiler error 2240 * about a check that's always true. 2241 */ 2242 if (logpage < NVME_VENDOR_SPECIFIC_LOGPAGE_MIN) { 2243 dev_err(nvme->n_dip, CE_WARN, 2244 "!unknown log page requested: %d", logpage); 2245 atomic_inc_32(&nvme->n_unknown_logpage); 2246 ret = EINVAL; 2247 goto fail; 2248 } 2249 cmd->nc_sqe.sqe_nsid = va_arg(ap, uint32_t); 2250 } 2251 2252 va_end(ap); 2253 2254 getlogpage.b.lp_numd = *bufsize / sizeof (uint32_t) - 1; 2255 2256 cmd->nc_sqe.sqe_cdw10 = getlogpage.r; 2257 2258 if (nvme_zalloc_dma(nvme, *bufsize, 2259 DDI_DMA_READ, &nvme->n_prp_dma_attr, &cmd->nc_dma) != DDI_SUCCESS) { 2260 dev_err(nvme->n_dip, CE_WARN, 2261 "!nvme_zalloc_dma failed for GET LOG PAGE"); 2262 ret = ENOMEM; 2263 goto fail; 2264 } 2265 2266 if ((ret = nvme_fill_prp(cmd, cmd->nc_dma->nd_dmah)) != 0) 2267 goto fail; 2268 nvme_admin_cmd(cmd, nvme_admin_cmd_timeout); 2269 2270 if ((ret = nvme_check_cmd_status(cmd)) != 0) { 2271 dev_err(nvme->n_dip, CE_WARN, 2272 "!GET LOG PAGE failed with sct = %x, sc = %x", 2273 cmd->nc_cqe.cqe_sf.sf_sct, cmd->nc_cqe.cqe_sf.sf_sc); 2274 goto fail; 2275 } 2276 2277 *buf = kmem_alloc(*bufsize, KM_SLEEP); 2278 bcopy(cmd->nc_dma->nd_memp, *buf, *bufsize); 2279 2280 fail: 2281 nvme_free_cmd(cmd); 2282 2283 return (ret); 2284 } 2285 2286 static int 2287 nvme_identify(nvme_t *nvme, boolean_t user, uint32_t nsid, uint8_t cns, 2288 void **buf) 2289 { 2290 nvme_cmd_t *cmd = nvme_alloc_cmd(nvme, KM_SLEEP); 2291 int ret; 2292 2293 if (buf == NULL) 2294 return (EINVAL); 2295 2296 cmd->nc_sqid = 0; 2297 cmd->nc_callback = nvme_wakeup_cmd; 2298 cmd->nc_sqe.sqe_opc = NVME_OPC_IDENTIFY; 2299 cmd->nc_sqe.sqe_nsid = nsid; 2300 cmd->nc_sqe.sqe_cdw10 = cns; 2301 2302 if (nvme_zalloc_dma(nvme, NVME_IDENTIFY_BUFSIZE, DDI_DMA_READ, 2303 &nvme->n_prp_dma_attr, &cmd->nc_dma) != DDI_SUCCESS) { 2304 dev_err(nvme->n_dip, CE_WARN, 2305 "!nvme_zalloc_dma failed for IDENTIFY"); 2306 ret = ENOMEM; 2307 goto fail; 2308 } 2309 2310 if (cmd->nc_dma->nd_ncookie > 2) { 2311 dev_err(nvme->n_dip, CE_WARN, 2312 "!too many DMA cookies for IDENTIFY"); 2313 atomic_inc_32(&nvme->n_too_many_cookies); 2314 ret = ENOMEM; 2315 goto fail; 2316 } 2317 2318 cmd->nc_sqe.sqe_dptr.d_prp[0] = cmd->nc_dma->nd_cookie.dmac_laddress; 2319 if (cmd->nc_dma->nd_ncookie > 1) { 2320 ddi_dma_nextcookie(cmd->nc_dma->nd_dmah, 2321 &cmd->nc_dma->nd_cookie); 2322 cmd->nc_sqe.sqe_dptr.d_prp[1] = 2323 cmd->nc_dma->nd_cookie.dmac_laddress; 2324 } 2325 2326 if (user) 2327 cmd->nc_dontpanic = B_TRUE; 2328 2329 nvme_admin_cmd(cmd, nvme_admin_cmd_timeout); 2330 2331 if ((ret = nvme_check_cmd_status(cmd)) != 0) { 2332 dev_err(nvme->n_dip, CE_WARN, 2333 "!IDENTIFY failed with sct = %x, sc = %x", 2334 cmd->nc_cqe.cqe_sf.sf_sct, cmd->nc_cqe.cqe_sf.sf_sc); 2335 goto fail; 2336 } 2337 2338 *buf = kmem_alloc(NVME_IDENTIFY_BUFSIZE, KM_SLEEP); 2339 bcopy(cmd->nc_dma->nd_memp, *buf, NVME_IDENTIFY_BUFSIZE); 2340 2341 fail: 2342 nvme_free_cmd(cmd); 2343 2344 return (ret); 2345 } 2346 2347 static int 2348 nvme_set_features(nvme_t *nvme, boolean_t user, uint32_t nsid, uint8_t feature, 2349 uint32_t val, uint32_t *res) 2350 { 2351 _NOTE(ARGUNUSED(nsid)); 2352 nvme_cmd_t *cmd = nvme_alloc_cmd(nvme, KM_SLEEP); 2353 int ret = EINVAL; 2354 2355 ASSERT(res != NULL); 2356 2357 cmd->nc_sqid = 0; 2358 cmd->nc_callback = nvme_wakeup_cmd; 2359 cmd->nc_sqe.sqe_opc = NVME_OPC_SET_FEATURES; 2360 cmd->nc_sqe.sqe_cdw10 = feature; 2361 cmd->nc_sqe.sqe_cdw11 = val; 2362 2363 if (user) 2364 cmd->nc_dontpanic = B_TRUE; 2365 2366 switch (feature) { 2367 case NVME_FEAT_WRITE_CACHE: 2368 if (!nvme->n_write_cache_present) 2369 goto fail; 2370 break; 2371 2372 case NVME_FEAT_NQUEUES: 2373 break; 2374 2375 default: 2376 goto fail; 2377 } 2378 2379 nvme_admin_cmd(cmd, nvme_admin_cmd_timeout); 2380 2381 if ((ret = nvme_check_cmd_status(cmd)) != 0) { 2382 dev_err(nvme->n_dip, CE_WARN, 2383 "!SET FEATURES %d failed with sct = %x, sc = %x", 2384 feature, cmd->nc_cqe.cqe_sf.sf_sct, 2385 cmd->nc_cqe.cqe_sf.sf_sc); 2386 goto fail; 2387 } 2388 2389 *res = cmd->nc_cqe.cqe_dw0; 2390 2391 fail: 2392 nvme_free_cmd(cmd); 2393 return (ret); 2394 } 2395 2396 static int 2397 nvme_get_features(nvme_t *nvme, boolean_t user, uint32_t nsid, uint8_t feature, 2398 uint32_t *res, void **buf, size_t *bufsize) 2399 { 2400 nvme_cmd_t *cmd = nvme_alloc_cmd(nvme, KM_SLEEP); 2401 int ret = EINVAL; 2402 2403 ASSERT(res != NULL); 2404 2405 if (bufsize != NULL) 2406 *bufsize = 0; 2407 2408 cmd->nc_sqid = 0; 2409 cmd->nc_callback = nvme_wakeup_cmd; 2410 cmd->nc_sqe.sqe_opc = NVME_OPC_GET_FEATURES; 2411 cmd->nc_sqe.sqe_cdw10 = feature; 2412 cmd->nc_sqe.sqe_cdw11 = *res; 2413 2414 /* 2415 * For some of the optional features there doesn't seem to be a method 2416 * of detecting whether it is supported other than using it. This will 2417 * cause "Invalid Field in Command" error, which is normally considered 2418 * a programming error. Set the nc_dontpanic flag to override the panic 2419 * in nvme_check_generic_cmd_status(). 2420 */ 2421 switch (feature) { 2422 case NVME_FEAT_ARBITRATION: 2423 case NVME_FEAT_POWER_MGMT: 2424 case NVME_FEAT_TEMPERATURE: 2425 case NVME_FEAT_ERROR: 2426 case NVME_FEAT_NQUEUES: 2427 case NVME_FEAT_INTR_COAL: 2428 case NVME_FEAT_INTR_VECT: 2429 case NVME_FEAT_WRITE_ATOM: 2430 case NVME_FEAT_ASYNC_EVENT: 2431 break; 2432 2433 case NVME_FEAT_WRITE_CACHE: 2434 if (!nvme->n_write_cache_present) 2435 goto fail; 2436 break; 2437 2438 case NVME_FEAT_LBA_RANGE: 2439 if (!nvme->n_lba_range_supported) 2440 goto fail; 2441 2442 cmd->nc_dontpanic = B_TRUE; 2443 cmd->nc_sqe.sqe_nsid = nsid; 2444 ASSERT(bufsize != NULL); 2445 *bufsize = NVME_LBA_RANGE_BUFSIZE; 2446 break; 2447 2448 case NVME_FEAT_AUTO_PST: 2449 if (!nvme->n_auto_pst_supported) 2450 goto fail; 2451 2452 ASSERT(bufsize != NULL); 2453 *bufsize = NVME_AUTO_PST_BUFSIZE; 2454 break; 2455 2456 case NVME_FEAT_PROGRESS: 2457 if (!nvme->n_progress_supported) 2458 goto fail; 2459 2460 cmd->nc_dontpanic = B_TRUE; 2461 break; 2462 2463 default: 2464 goto fail; 2465 } 2466 2467 if (user) 2468 cmd->nc_dontpanic = B_TRUE; 2469 2470 if (bufsize != NULL && *bufsize != 0) { 2471 if (nvme_zalloc_dma(nvme, *bufsize, DDI_DMA_READ, 2472 &nvme->n_prp_dma_attr, &cmd->nc_dma) != DDI_SUCCESS) { 2473 dev_err(nvme->n_dip, CE_WARN, 2474 "!nvme_zalloc_dma failed for GET FEATURES"); 2475 ret = ENOMEM; 2476 goto fail; 2477 } 2478 2479 if (cmd->nc_dma->nd_ncookie > 2) { 2480 dev_err(nvme->n_dip, CE_WARN, 2481 "!too many DMA cookies for GET FEATURES"); 2482 atomic_inc_32(&nvme->n_too_many_cookies); 2483 ret = ENOMEM; 2484 goto fail; 2485 } 2486 2487 cmd->nc_sqe.sqe_dptr.d_prp[0] = 2488 cmd->nc_dma->nd_cookie.dmac_laddress; 2489 if (cmd->nc_dma->nd_ncookie > 1) { 2490 ddi_dma_nextcookie(cmd->nc_dma->nd_dmah, 2491 &cmd->nc_dma->nd_cookie); 2492 cmd->nc_sqe.sqe_dptr.d_prp[1] = 2493 cmd->nc_dma->nd_cookie.dmac_laddress; 2494 } 2495 } 2496 2497 nvme_admin_cmd(cmd, nvme_admin_cmd_timeout); 2498 2499 if ((ret = nvme_check_cmd_status(cmd)) != 0) { 2500 boolean_t known = B_TRUE; 2501 2502 /* Check if this is unsupported optional feature */ 2503 if (cmd->nc_cqe.cqe_sf.sf_sct == NVME_CQE_SCT_GENERIC && 2504 cmd->nc_cqe.cqe_sf.sf_sc == NVME_CQE_SC_GEN_INV_FLD) { 2505 switch (feature) { 2506 case NVME_FEAT_LBA_RANGE: 2507 nvme->n_lba_range_supported = B_FALSE; 2508 break; 2509 case NVME_FEAT_PROGRESS: 2510 nvme->n_progress_supported = B_FALSE; 2511 break; 2512 default: 2513 known = B_FALSE; 2514 break; 2515 } 2516 } else { 2517 known = B_FALSE; 2518 } 2519 2520 /* Report the error otherwise */ 2521 if (!known) { 2522 dev_err(nvme->n_dip, CE_WARN, 2523 "!GET FEATURES %d failed with sct = %x, sc = %x", 2524 feature, cmd->nc_cqe.cqe_sf.sf_sct, 2525 cmd->nc_cqe.cqe_sf.sf_sc); 2526 } 2527 2528 goto fail; 2529 } 2530 2531 if (bufsize != NULL && *bufsize != 0) { 2532 ASSERT(buf != NULL); 2533 *buf = kmem_alloc(*bufsize, KM_SLEEP); 2534 bcopy(cmd->nc_dma->nd_memp, *buf, *bufsize); 2535 } 2536 2537 *res = cmd->nc_cqe.cqe_dw0; 2538 2539 fail: 2540 nvme_free_cmd(cmd); 2541 return (ret); 2542 } 2543 2544 static int 2545 nvme_write_cache_set(nvme_t *nvme, boolean_t enable) 2546 { 2547 nvme_write_cache_t nwc = { 0 }; 2548 2549 if (enable) 2550 nwc.b.wc_wce = 1; 2551 2552 return (nvme_set_features(nvme, B_FALSE, 0, NVME_FEAT_WRITE_CACHE, 2553 nwc.r, &nwc.r)); 2554 } 2555 2556 static int 2557 nvme_set_nqueues(nvme_t *nvme) 2558 { 2559 nvme_nqueues_t nq = { 0 }; 2560 int ret; 2561 2562 /* 2563 * The default is to allocate one completion queue per vector. 2564 */ 2565 if (nvme->n_completion_queues == -1) 2566 nvme->n_completion_queues = nvme->n_intr_cnt; 2567 2568 /* 2569 * There is no point in having more completion queues than 2570 * interrupt vectors. 2571 */ 2572 nvme->n_completion_queues = MIN(nvme->n_completion_queues, 2573 nvme->n_intr_cnt); 2574 2575 /* 2576 * The default is to use one submission queue per completion queue. 2577 */ 2578 if (nvme->n_submission_queues == -1) 2579 nvme->n_submission_queues = nvme->n_completion_queues; 2580 2581 /* 2582 * There is no point in having more compeletion queues than 2583 * submission queues. 2584 */ 2585 nvme->n_completion_queues = MIN(nvme->n_completion_queues, 2586 nvme->n_submission_queues); 2587 2588 ASSERT(nvme->n_submission_queues > 0); 2589 ASSERT(nvme->n_completion_queues > 0); 2590 2591 nq.b.nq_nsq = nvme->n_submission_queues - 1; 2592 nq.b.nq_ncq = nvme->n_completion_queues - 1; 2593 2594 ret = nvme_set_features(nvme, B_FALSE, 0, NVME_FEAT_NQUEUES, nq.r, 2595 &nq.r); 2596 2597 if (ret == 0) { 2598 /* 2599 * Never use more than the requested number of queues. 2600 */ 2601 nvme->n_submission_queues = MIN(nvme->n_submission_queues, 2602 nq.b.nq_nsq + 1); 2603 nvme->n_completion_queues = MIN(nvme->n_completion_queues, 2604 nq.b.nq_ncq + 1); 2605 } 2606 2607 return (ret); 2608 } 2609 2610 static int 2611 nvme_create_completion_queue(nvme_t *nvme, nvme_cq_t *cq) 2612 { 2613 nvme_cmd_t *cmd = nvme_alloc_cmd(nvme, KM_SLEEP); 2614 nvme_create_queue_dw10_t dw10 = { 0 }; 2615 nvme_create_cq_dw11_t c_dw11 = { 0 }; 2616 int ret; 2617 2618 dw10.b.q_qid = cq->ncq_id; 2619 dw10.b.q_qsize = cq->ncq_nentry - 1; 2620 2621 c_dw11.b.cq_pc = 1; 2622 c_dw11.b.cq_ien = 1; 2623 c_dw11.b.cq_iv = cq->ncq_id % nvme->n_intr_cnt; 2624 2625 cmd->nc_sqid = 0; 2626 cmd->nc_callback = nvme_wakeup_cmd; 2627 cmd->nc_sqe.sqe_opc = NVME_OPC_CREATE_CQUEUE; 2628 cmd->nc_sqe.sqe_cdw10 = dw10.r; 2629 cmd->nc_sqe.sqe_cdw11 = c_dw11.r; 2630 cmd->nc_sqe.sqe_dptr.d_prp[0] = cq->ncq_dma->nd_cookie.dmac_laddress; 2631 2632 nvme_admin_cmd(cmd, nvme_admin_cmd_timeout); 2633 2634 if ((ret = nvme_check_cmd_status(cmd)) != 0) { 2635 dev_err(nvme->n_dip, CE_WARN, 2636 "!CREATE CQUEUE failed with sct = %x, sc = %x", 2637 cmd->nc_cqe.cqe_sf.sf_sct, cmd->nc_cqe.cqe_sf.sf_sc); 2638 } 2639 2640 nvme_free_cmd(cmd); 2641 2642 return (ret); 2643 } 2644 2645 static int 2646 nvme_create_io_qpair(nvme_t *nvme, nvme_qpair_t *qp, uint16_t idx) 2647 { 2648 nvme_cq_t *cq = qp->nq_cq; 2649 nvme_cmd_t *cmd; 2650 nvme_create_queue_dw10_t dw10 = { 0 }; 2651 nvme_create_sq_dw11_t s_dw11 = { 0 }; 2652 int ret; 2653 2654 /* 2655 * It is possible to have more qpairs than completion queues, 2656 * and when the idx > ncq_id, that completion queue is shared 2657 * and has already been created. 2658 */ 2659 if (idx <= cq->ncq_id && 2660 nvme_create_completion_queue(nvme, cq) != DDI_SUCCESS) 2661 return (DDI_FAILURE); 2662 2663 dw10.b.q_qid = idx; 2664 dw10.b.q_qsize = qp->nq_nentry - 1; 2665 2666 s_dw11.b.sq_pc = 1; 2667 s_dw11.b.sq_cqid = cq->ncq_id; 2668 2669 cmd = nvme_alloc_cmd(nvme, KM_SLEEP); 2670 cmd->nc_sqid = 0; 2671 cmd->nc_callback = nvme_wakeup_cmd; 2672 cmd->nc_sqe.sqe_opc = NVME_OPC_CREATE_SQUEUE; 2673 cmd->nc_sqe.sqe_cdw10 = dw10.r; 2674 cmd->nc_sqe.sqe_cdw11 = s_dw11.r; 2675 cmd->nc_sqe.sqe_dptr.d_prp[0] = qp->nq_sqdma->nd_cookie.dmac_laddress; 2676 2677 nvme_admin_cmd(cmd, nvme_admin_cmd_timeout); 2678 2679 if ((ret = nvme_check_cmd_status(cmd)) != 0) { 2680 dev_err(nvme->n_dip, CE_WARN, 2681 "!CREATE SQUEUE failed with sct = %x, sc = %x", 2682 cmd->nc_cqe.cqe_sf.sf_sct, cmd->nc_cqe.cqe_sf.sf_sc); 2683 } 2684 2685 nvme_free_cmd(cmd); 2686 2687 return (ret); 2688 } 2689 2690 static boolean_t 2691 nvme_reset(nvme_t *nvme, boolean_t quiesce) 2692 { 2693 nvme_reg_csts_t csts; 2694 int i; 2695 2696 nvme_put32(nvme, NVME_REG_CC, 0); 2697 2698 csts.r = nvme_get32(nvme, NVME_REG_CSTS); 2699 if (csts.b.csts_rdy == 1) { 2700 nvme_put32(nvme, NVME_REG_CC, 0); 2701 2702 /* 2703 * The timeout value is from the Controller Capabilities 2704 * register (CAP.TO, section 3.1.1). This is the worst case 2705 * time to wait for CSTS.RDY to transition from 1 to 0 after 2706 * CC.EN transitions from 1 to 0. 2707 * 2708 * The timeout units are in 500 ms units, and we are delaying 2709 * in 50ms chunks, hence counting to n_timeout * 10. 2710 */ 2711 for (i = 0; i < nvme->n_timeout * 10; i++) { 2712 csts.r = nvme_get32(nvme, NVME_REG_CSTS); 2713 if (csts.b.csts_rdy == 0) 2714 break; 2715 2716 /* 2717 * Quiescing drivers should not use locks or timeouts, 2718 * so if this is the quiesce path, use a quiesce-safe 2719 * delay. 2720 */ 2721 if (quiesce) { 2722 drv_usecwait(50000); 2723 } else { 2724 delay(drv_usectohz(50000)); 2725 } 2726 } 2727 } 2728 2729 nvme_put32(nvme, NVME_REG_AQA, 0); 2730 nvme_put32(nvme, NVME_REG_ASQ, 0); 2731 nvme_put32(nvme, NVME_REG_ACQ, 0); 2732 2733 csts.r = nvme_get32(nvme, NVME_REG_CSTS); 2734 return (csts.b.csts_rdy == 0 ? B_TRUE : B_FALSE); 2735 } 2736 2737 static void 2738 nvme_shutdown(nvme_t *nvme, boolean_t quiesce) 2739 { 2740 nvme_reg_cc_t cc; 2741 nvme_reg_csts_t csts; 2742 int i; 2743 2744 cc.r = nvme_get32(nvme, NVME_REG_CC); 2745 cc.b.cc_shn = NVME_CC_SHN_NORMAL; 2746 nvme_put32(nvme, NVME_REG_CC, cc.r); 2747 2748 for (i = 0; i < 10; i++) { 2749 csts.r = nvme_get32(nvme, NVME_REG_CSTS); 2750 if (csts.b.csts_shst == NVME_CSTS_SHN_COMPLETE) 2751 break; 2752 2753 if (quiesce) { 2754 drv_usecwait(100000); 2755 } else { 2756 delay(drv_usectohz(100000)); 2757 } 2758 } 2759 } 2760 2761 /* 2762 * Return length of string without trailing spaces. 2763 */ 2764 static int 2765 nvme_strlen(const char *str, int len) 2766 { 2767 if (len <= 0) 2768 return (0); 2769 2770 while (str[--len] == ' ') 2771 ; 2772 2773 return (++len); 2774 } 2775 2776 static void 2777 nvme_config_min_block_size(nvme_t *nvme, char *model, char *val) 2778 { 2779 ulong_t bsize = 0; 2780 char *msg = ""; 2781 2782 if (ddi_strtoul(val, NULL, 0, &bsize) != 0) 2783 goto err; 2784 2785 if (!ISP2(bsize)) { 2786 msg = ": not a power of 2"; 2787 goto err; 2788 } 2789 2790 if (bsize < NVME_DEFAULT_MIN_BLOCK_SIZE) { 2791 msg = ": too low"; 2792 goto err; 2793 } 2794 2795 nvme->n_min_block_size = bsize; 2796 return; 2797 2798 err: 2799 dev_err(nvme->n_dip, CE_WARN, 2800 "!nvme-config-list: ignoring invalid min-phys-block-size '%s' " 2801 "for model '%s'%s", val, model, msg); 2802 2803 nvme->n_min_block_size = NVME_DEFAULT_MIN_BLOCK_SIZE; 2804 } 2805 2806 static void 2807 nvme_config_boolean(nvme_t *nvme, char *model, char *name, char *val, 2808 boolean_t *b) 2809 { 2810 if (strcmp(val, "on") == 0 || 2811 strcmp(val, "true") == 0) 2812 *b = B_TRUE; 2813 else if (strcmp(val, "off") == 0 || 2814 strcmp(val, "false") == 0) 2815 *b = B_FALSE; 2816 else 2817 dev_err(nvme->n_dip, CE_WARN, 2818 "!nvme-config-list: invalid value for %s '%s'" 2819 " for model '%s', ignoring", name, val, model); 2820 } 2821 2822 static void 2823 nvme_config_list(nvme_t *nvme) 2824 { 2825 char **config_list; 2826 uint_t nelem; 2827 int rv, i; 2828 2829 /* 2830 * We're following the pattern of 'sd-config-list' here, but extend it. 2831 * Instead of two we have three separate strings for "model", "fwrev", 2832 * and "name-value-list". 2833 */ 2834 rv = ddi_prop_lookup_string_array(DDI_DEV_T_ANY, nvme->n_dip, 2835 DDI_PROP_DONTPASS, "nvme-config-list", &config_list, &nelem); 2836 2837 if (rv != DDI_PROP_SUCCESS) { 2838 if (rv == DDI_PROP_CANNOT_DECODE) { 2839 dev_err(nvme->n_dip, CE_WARN, 2840 "!nvme-config-list: cannot be decoded"); 2841 } 2842 2843 return; 2844 } 2845 2846 if ((nelem % 3) != 0) { 2847 dev_err(nvme->n_dip, CE_WARN, "!nvme-config-list: must be " 2848 "triplets of <model>/<fwrev>/<name-value-list> strings "); 2849 goto out; 2850 } 2851 2852 for (i = 0; i < nelem; i += 3) { 2853 char *model = config_list[i]; 2854 char *fwrev = config_list[i + 1]; 2855 char *nvp, *save_nv; 2856 int id_model_len, id_fwrev_len; 2857 2858 id_model_len = nvme_strlen(nvme->n_idctl->id_model, 2859 sizeof (nvme->n_idctl->id_model)); 2860 2861 if (strlen(model) != id_model_len) 2862 continue; 2863 2864 if (strncmp(model, nvme->n_idctl->id_model, id_model_len) != 0) 2865 continue; 2866 2867 id_fwrev_len = nvme_strlen(nvme->n_idctl->id_fwrev, 2868 sizeof (nvme->n_idctl->id_fwrev)); 2869 2870 if (strlen(fwrev) != 0) { 2871 boolean_t match = B_FALSE; 2872 char *fwr, *last_fw; 2873 2874 for (fwr = strtok_r(fwrev, ",", &last_fw); 2875 fwr != NULL; 2876 fwr = strtok_r(NULL, ",", &last_fw)) { 2877 if (strlen(fwr) != id_fwrev_len) 2878 continue; 2879 2880 if (strncmp(fwr, nvme->n_idctl->id_fwrev, 2881 id_fwrev_len) == 0) 2882 match = B_TRUE; 2883 } 2884 2885 if (!match) 2886 continue; 2887 } 2888 2889 /* 2890 * We should now have a comma-separated list of name:value 2891 * pairs. 2892 */ 2893 for (nvp = strtok_r(config_list[i + 2], ",", &save_nv); 2894 nvp != NULL; nvp = strtok_r(NULL, ",", &save_nv)) { 2895 char *name = nvp; 2896 char *val = strchr(nvp, ':'); 2897 2898 if (val == NULL || name == val) { 2899 dev_err(nvme->n_dip, CE_WARN, 2900 "!nvme-config-list: <name-value-list> " 2901 "for model '%s' is malformed", model); 2902 goto out; 2903 } 2904 2905 /* 2906 * Null-terminate 'name', move 'val' past ':' sep. 2907 */ 2908 *val++ = '\0'; 2909 2910 /* 2911 * Process the name:val pairs that we know about. 2912 */ 2913 if (strcmp(name, "ignore-unknown-vendor-status") == 0) { 2914 nvme_config_boolean(nvme, model, name, val, 2915 &nvme->n_ignore_unknown_vendor_status); 2916 } else if (strcmp(name, "min-phys-block-size") == 0) { 2917 nvme_config_min_block_size(nvme, model, val); 2918 } else if (strcmp(name, "volatile-write-cache") == 0) { 2919 nvme_config_boolean(nvme, model, name, val, 2920 &nvme->n_write_cache_enabled); 2921 } else { 2922 /* 2923 * Unknown 'name'. 2924 */ 2925 dev_err(nvme->n_dip, CE_WARN, 2926 "!nvme-config-list: unknown config '%s' " 2927 "for model '%s', ignoring", name, model); 2928 } 2929 } 2930 } 2931 2932 out: 2933 ddi_prop_free(config_list); 2934 } 2935 2936 static void 2937 nvme_prepare_devid(nvme_t *nvme, uint32_t nsid) 2938 { 2939 /* 2940 * Section 7.7 of the spec describes how to get a unique ID for 2941 * the controller: the vendor ID, the model name and the serial 2942 * number shall be unique when combined. 2943 * 2944 * If a namespace has no EUI64 we use the above and add the hex 2945 * namespace ID to get a unique ID for the namespace. 2946 */ 2947 char model[sizeof (nvme->n_idctl->id_model) + 1]; 2948 char serial[sizeof (nvme->n_idctl->id_serial) + 1]; 2949 2950 bcopy(nvme->n_idctl->id_model, model, sizeof (nvme->n_idctl->id_model)); 2951 bcopy(nvme->n_idctl->id_serial, serial, 2952 sizeof (nvme->n_idctl->id_serial)); 2953 2954 model[sizeof (nvme->n_idctl->id_model)] = '\0'; 2955 serial[sizeof (nvme->n_idctl->id_serial)] = '\0'; 2956 2957 NVME_NSID2NS(nvme, nsid)->ns_devid = kmem_asprintf("%4X-%s-%s-%X", 2958 nvme->n_idctl->id_vid, model, serial, nsid); 2959 } 2960 2961 static nvme_identify_nsid_list_t * 2962 nvme_update_nsid_list(nvme_t *nvme, int cns) 2963 { 2964 nvme_identify_nsid_list_t *nslist; 2965 2966 /* 2967 * We currently don't handle cases where there are more than 2968 * 1024 active namespaces, requiring several IDENTIFY commands. 2969 */ 2970 if (nvme_identify(nvme, B_FALSE, 0, cns, (void **)&nslist) == 0) 2971 return (nslist); 2972 2973 return (NULL); 2974 } 2975 2976 static boolean_t 2977 nvme_allocated_ns(nvme_namespace_t *ns) 2978 { 2979 nvme_t *nvme = ns->ns_nvme; 2980 uint32_t i; 2981 2982 ASSERT(MUTEX_HELD(&nvme->n_mgmt_mutex)); 2983 2984 /* 2985 * If supported, update the list of allocated namespace IDs. 2986 */ 2987 if (NVME_VERSION_ATLEAST(&nvme->n_version, 1, 2) && 2988 nvme->n_idctl->id_oacs.oa_nsmgmt != 0) { 2989 nvme_identify_nsid_list_t *nslist = nvme_update_nsid_list(nvme, 2990 NVME_IDENTIFY_NSID_ALLOC_LIST); 2991 boolean_t found = B_FALSE; 2992 2993 /* 2994 * When namespace management is supported, this really shouldn't 2995 * be NULL. Treat all namespaces as allocated if it is. 2996 */ 2997 if (nslist == NULL) 2998 return (B_TRUE); 2999 3000 for (i = 0; i < ARRAY_SIZE(nslist->nl_nsid); i++) { 3001 if (ns->ns_id == 0) 3002 break; 3003 3004 if (ns->ns_id == nslist->nl_nsid[i]) 3005 found = B_TRUE; 3006 } 3007 3008 kmem_free(nslist, NVME_IDENTIFY_BUFSIZE); 3009 return (found); 3010 } else { 3011 /* 3012 * If namespace management isn't supported, report all 3013 * namespaces as allocated. 3014 */ 3015 return (B_TRUE); 3016 } 3017 } 3018 3019 static boolean_t 3020 nvme_active_ns(nvme_namespace_t *ns) 3021 { 3022 nvme_t *nvme = ns->ns_nvme; 3023 uint64_t *ptr; 3024 uint32_t i; 3025 3026 ASSERT(MUTEX_HELD(&nvme->n_mgmt_mutex)); 3027 3028 /* 3029 * If supported, update the list of active namespace IDs. 3030 */ 3031 if (NVME_VERSION_ATLEAST(&nvme->n_version, 1, 1)) { 3032 nvme_identify_nsid_list_t *nslist = nvme_update_nsid_list(nvme, 3033 NVME_IDENTIFY_NSID_LIST); 3034 boolean_t found = B_FALSE; 3035 3036 /* 3037 * When namespace management is supported, this really shouldn't 3038 * be NULL. Treat all namespaces as allocated if it is. 3039 */ 3040 if (nslist == NULL) 3041 return (B_TRUE); 3042 3043 for (i = 0; i < ARRAY_SIZE(nslist->nl_nsid); i++) { 3044 if (ns->ns_id == 0) 3045 break; 3046 3047 if (ns->ns_id == nslist->nl_nsid[i]) 3048 found = B_TRUE; 3049 } 3050 3051 kmem_free(nslist, NVME_IDENTIFY_BUFSIZE); 3052 return (found); 3053 } 3054 3055 /* 3056 * Workaround for revision 1.0: 3057 * Check whether the IDENTIFY NAMESPACE data is zero-filled. 3058 */ 3059 for (ptr = (uint64_t *)ns->ns_idns; 3060 ptr != (uint64_t *)(ns->ns_idns + 1); 3061 ptr++) { 3062 if (*ptr != 0) { 3063 return (B_TRUE); 3064 } 3065 } 3066 3067 return (B_FALSE); 3068 } 3069 3070 static int 3071 nvme_init_ns(nvme_t *nvme, int nsid) 3072 { 3073 nvme_namespace_t *ns = NVME_NSID2NS(nvme, nsid); 3074 nvme_identify_nsid_t *idns; 3075 boolean_t was_ignored; 3076 int last_rp; 3077 3078 ns->ns_nvme = nvme; 3079 3080 ASSERT(MUTEX_HELD(&nvme->n_mgmt_mutex)); 3081 3082 if (nvme_identify(nvme, B_FALSE, nsid, NVME_IDENTIFY_NSID, 3083 (void **)&idns) != 0) { 3084 dev_err(nvme->n_dip, CE_WARN, 3085 "!failed to identify namespace %d", nsid); 3086 return (DDI_FAILURE); 3087 } 3088 3089 if (ns->ns_idns != NULL) 3090 kmem_free(ns->ns_idns, sizeof (nvme_identify_nsid_t)); 3091 3092 ns->ns_idns = idns; 3093 ns->ns_id = nsid; 3094 3095 was_ignored = ns->ns_ignore; 3096 3097 ns->ns_allocated = nvme_allocated_ns(ns); 3098 ns->ns_active = nvme_active_ns(ns); 3099 3100 ns->ns_block_count = idns->id_nsize; 3101 ns->ns_block_size = 3102 1 << idns->id_lbaf[idns->id_flbas.lba_format].lbaf_lbads; 3103 ns->ns_best_block_size = ns->ns_block_size; 3104 3105 /* 3106 * Get the EUI64 if present. 3107 */ 3108 if (NVME_VERSION_ATLEAST(&nvme->n_version, 1, 1)) 3109 bcopy(idns->id_eui64, ns->ns_eui64, sizeof (ns->ns_eui64)); 3110 3111 /* 3112 * Get the NGUID if present. 3113 */ 3114 if (NVME_VERSION_ATLEAST(&nvme->n_version, 1, 2)) 3115 bcopy(idns->id_nguid, ns->ns_nguid, sizeof (ns->ns_nguid)); 3116 3117 /*LINTED: E_BAD_PTR_CAST_ALIGN*/ 3118 if (*(uint64_t *)ns->ns_eui64 == 0) 3119 nvme_prepare_devid(nvme, ns->ns_id); 3120 3121 (void) snprintf(ns->ns_name, sizeof (ns->ns_name), "%u", ns->ns_id); 3122 3123 /* 3124 * Find the LBA format with no metadata and the best relative 3125 * performance. A value of 3 means "degraded", 0 is best. 3126 */ 3127 last_rp = 3; 3128 for (int j = 0; j <= idns->id_nlbaf; j++) { 3129 if (idns->id_lbaf[j].lbaf_lbads == 0) 3130 break; 3131 if (idns->id_lbaf[j].lbaf_ms != 0) 3132 continue; 3133 if (idns->id_lbaf[j].lbaf_rp >= last_rp) 3134 continue; 3135 last_rp = idns->id_lbaf[j].lbaf_rp; 3136 ns->ns_best_block_size = 3137 1 << idns->id_lbaf[j].lbaf_lbads; 3138 } 3139 3140 if (ns->ns_best_block_size < nvme->n_min_block_size) 3141 ns->ns_best_block_size = nvme->n_min_block_size; 3142 3143 was_ignored = ns->ns_ignore; 3144 3145 /* 3146 * We currently don't support namespaces that are inactive, or use 3147 * either: 3148 * - protection information 3149 * - illegal block size (< 512) 3150 */ 3151 if (!ns->ns_active) { 3152 ns->ns_ignore = B_TRUE; 3153 } else if (idns->id_dps.dp_pinfo) { 3154 dev_err(nvme->n_dip, CE_WARN, 3155 "!ignoring namespace %d, unsupported feature: " 3156 "pinfo = %d", nsid, idns->id_dps.dp_pinfo); 3157 ns->ns_ignore = B_TRUE; 3158 } else if (ns->ns_block_size < 512) { 3159 dev_err(nvme->n_dip, CE_WARN, 3160 "!ignoring namespace %d, unsupported block size %"PRIu64, 3161 nsid, (uint64_t)ns->ns_block_size); 3162 ns->ns_ignore = B_TRUE; 3163 } else { 3164 ns->ns_ignore = B_FALSE; 3165 } 3166 3167 /* 3168 * Keep a count of namespaces which are attachable. 3169 * See comments in nvme_bd_driveinfo() to understand its effect. 3170 */ 3171 if (was_ignored) { 3172 /* 3173 * Previously ignored, but now not. Count it. 3174 */ 3175 if (!ns->ns_ignore) 3176 nvme->n_namespaces_attachable++; 3177 } else { 3178 /* 3179 * Wasn't ignored previously, but now needs to be. 3180 * Discount it. 3181 */ 3182 if (ns->ns_ignore) 3183 nvme->n_namespaces_attachable--; 3184 } 3185 3186 return (DDI_SUCCESS); 3187 } 3188 3189 static int 3190 nvme_attach_ns(nvme_t *nvme, int nsid) 3191 { 3192 nvme_namespace_t *ns = NVME_NSID2NS(nvme, nsid); 3193 3194 ASSERT(MUTEX_HELD(&nvme->n_mgmt_mutex)); 3195 3196 if (ns->ns_ignore) 3197 return (ENOTSUP); 3198 3199 if (ns->ns_bd_hdl == NULL) { 3200 bd_ops_t ops = nvme_bd_ops; 3201 3202 if (!nvme->n_idctl->id_oncs.on_dset_mgmt) 3203 ops.o_free_space = NULL; 3204 3205 ns->ns_bd_hdl = bd_alloc_handle(ns, &ops, &nvme->n_prp_dma_attr, 3206 KM_SLEEP); 3207 3208 if (ns->ns_bd_hdl == NULL) { 3209 dev_err(nvme->n_dip, CE_WARN, "!Failed to get blkdev " 3210 "handle for namespace id %d", nsid); 3211 return (EINVAL); 3212 } 3213 } 3214 3215 if (bd_attach_handle(nvme->n_dip, ns->ns_bd_hdl) != DDI_SUCCESS) 3216 return (EBUSY); 3217 3218 ns->ns_attached = B_TRUE; 3219 3220 return (0); 3221 } 3222 3223 static int 3224 nvme_detach_ns(nvme_t *nvme, int nsid) 3225 { 3226 nvme_namespace_t *ns = NVME_NSID2NS(nvme, nsid); 3227 int rv; 3228 3229 ASSERT(MUTEX_HELD(&nvme->n_mgmt_mutex)); 3230 3231 if (ns->ns_ignore || !ns->ns_attached) 3232 return (0); 3233 3234 ASSERT(ns->ns_bd_hdl != NULL); 3235 rv = bd_detach_handle(ns->ns_bd_hdl); 3236 if (rv != DDI_SUCCESS) 3237 return (EBUSY); 3238 else 3239 ns->ns_attached = B_FALSE; 3240 3241 return (0); 3242 } 3243 3244 static int 3245 nvme_init(nvme_t *nvme) 3246 { 3247 nvme_reg_cc_t cc = { 0 }; 3248 nvme_reg_aqa_t aqa = { 0 }; 3249 nvme_reg_asq_t asq = { 0 }; 3250 nvme_reg_acq_t acq = { 0 }; 3251 nvme_reg_cap_t cap; 3252 nvme_reg_vs_t vs; 3253 nvme_reg_csts_t csts; 3254 int i = 0; 3255 uint16_t nqueues; 3256 uint_t tq_threads; 3257 char model[sizeof (nvme->n_idctl->id_model) + 1]; 3258 char *vendor, *product; 3259 3260 /* Check controller version */ 3261 vs.r = nvme_get32(nvme, NVME_REG_VS); 3262 nvme->n_version.v_major = vs.b.vs_mjr; 3263 nvme->n_version.v_minor = vs.b.vs_mnr; 3264 dev_err(nvme->n_dip, CE_CONT, "?NVMe spec version %d.%d", 3265 nvme->n_version.v_major, nvme->n_version.v_minor); 3266 3267 if (nvme->n_version.v_major > nvme_version_major) { 3268 dev_err(nvme->n_dip, CE_WARN, "!no support for version > %d.x", 3269 nvme_version_major); 3270 if (nvme->n_strict_version) 3271 goto fail; 3272 } 3273 3274 /* retrieve controller configuration */ 3275 cap.r = nvme_get64(nvme, NVME_REG_CAP); 3276 3277 if ((cap.b.cap_css & NVME_CAP_CSS_NVM) == 0) { 3278 dev_err(nvme->n_dip, CE_WARN, 3279 "!NVM command set not supported by hardware"); 3280 goto fail; 3281 } 3282 3283 nvme->n_nssr_supported = cap.b.cap_nssrs; 3284 nvme->n_doorbell_stride = 4 << cap.b.cap_dstrd; 3285 nvme->n_timeout = cap.b.cap_to; 3286 nvme->n_arbitration_mechanisms = cap.b.cap_ams; 3287 nvme->n_cont_queues_reqd = cap.b.cap_cqr; 3288 nvme->n_max_queue_entries = cap.b.cap_mqes + 1; 3289 3290 /* 3291 * The MPSMIN and MPSMAX fields in the CAP register use 0 to specify 3292 * the base page size of 4k (1<<12), so add 12 here to get the real 3293 * page size value. 3294 */ 3295 nvme->n_pageshift = MIN(MAX(cap.b.cap_mpsmin + 12, PAGESHIFT), 3296 cap.b.cap_mpsmax + 12); 3297 nvme->n_pagesize = 1UL << (nvme->n_pageshift); 3298 3299 /* 3300 * Set up Queue DMA to transfer at least 1 page-aligned page at a time. 3301 */ 3302 nvme->n_queue_dma_attr.dma_attr_align = nvme->n_pagesize; 3303 nvme->n_queue_dma_attr.dma_attr_minxfer = nvme->n_pagesize; 3304 3305 /* 3306 * Set up PRP DMA to transfer 1 page-aligned page at a time. 3307 * Maxxfer may be increased after we identified the controller limits. 3308 */ 3309 nvme->n_prp_dma_attr.dma_attr_maxxfer = nvme->n_pagesize; 3310 nvme->n_prp_dma_attr.dma_attr_minxfer = nvme->n_pagesize; 3311 nvme->n_prp_dma_attr.dma_attr_align = nvme->n_pagesize; 3312 nvme->n_prp_dma_attr.dma_attr_seg = nvme->n_pagesize - 1; 3313 3314 /* 3315 * Reset controller if it's still in ready state. 3316 */ 3317 if (nvme_reset(nvme, B_FALSE) == B_FALSE) { 3318 dev_err(nvme->n_dip, CE_WARN, "!unable to reset controller"); 3319 ddi_fm_service_impact(nvme->n_dip, DDI_SERVICE_LOST); 3320 nvme->n_dead = B_TRUE; 3321 goto fail; 3322 } 3323 3324 /* 3325 * Create the cq array with one completion queue to be assigned 3326 * to the admin queue pair and a limited number of taskqs (4). 3327 */ 3328 if (nvme_create_cq_array(nvme, 1, nvme->n_admin_queue_len, 4) != 3329 DDI_SUCCESS) { 3330 dev_err(nvme->n_dip, CE_WARN, 3331 "!failed to pre-allocate admin completion queue"); 3332 goto fail; 3333 } 3334 /* 3335 * Create the admin queue pair. 3336 */ 3337 if (nvme_alloc_qpair(nvme, nvme->n_admin_queue_len, &nvme->n_adminq, 0) 3338 != DDI_SUCCESS) { 3339 dev_err(nvme->n_dip, CE_WARN, 3340 "!unable to allocate admin qpair"); 3341 goto fail; 3342 } 3343 nvme->n_ioq = kmem_alloc(sizeof (nvme_qpair_t *), KM_SLEEP); 3344 nvme->n_ioq[0] = nvme->n_adminq; 3345 3346 nvme->n_progress |= NVME_ADMIN_QUEUE; 3347 3348 (void) ddi_prop_update_int(DDI_DEV_T_NONE, nvme->n_dip, 3349 "admin-queue-len", nvme->n_admin_queue_len); 3350 3351 aqa.b.aqa_asqs = aqa.b.aqa_acqs = nvme->n_admin_queue_len - 1; 3352 asq = nvme->n_adminq->nq_sqdma->nd_cookie.dmac_laddress; 3353 acq = nvme->n_adminq->nq_cq->ncq_dma->nd_cookie.dmac_laddress; 3354 3355 ASSERT((asq & (nvme->n_pagesize - 1)) == 0); 3356 ASSERT((acq & (nvme->n_pagesize - 1)) == 0); 3357 3358 nvme_put32(nvme, NVME_REG_AQA, aqa.r); 3359 nvme_put64(nvme, NVME_REG_ASQ, asq); 3360 nvme_put64(nvme, NVME_REG_ACQ, acq); 3361 3362 cc.b.cc_ams = 0; /* use Round-Robin arbitration */ 3363 cc.b.cc_css = 0; /* use NVM command set */ 3364 cc.b.cc_mps = nvme->n_pageshift - 12; 3365 cc.b.cc_shn = 0; /* no shutdown in progress */ 3366 cc.b.cc_en = 1; /* enable controller */ 3367 cc.b.cc_iosqes = 6; /* submission queue entry is 2^6 bytes long */ 3368 cc.b.cc_iocqes = 4; /* completion queue entry is 2^4 bytes long */ 3369 3370 nvme_put32(nvme, NVME_REG_CC, cc.r); 3371 3372 /* 3373 * Wait for the controller to become ready. 3374 */ 3375 csts.r = nvme_get32(nvme, NVME_REG_CSTS); 3376 if (csts.b.csts_rdy == 0) { 3377 for (i = 0; i != nvme->n_timeout * 10; i++) { 3378 delay(drv_usectohz(50000)); 3379 csts.r = nvme_get32(nvme, NVME_REG_CSTS); 3380 3381 if (csts.b.csts_cfs == 1) { 3382 dev_err(nvme->n_dip, CE_WARN, 3383 "!controller fatal status at init"); 3384 ddi_fm_service_impact(nvme->n_dip, 3385 DDI_SERVICE_LOST); 3386 nvme->n_dead = B_TRUE; 3387 goto fail; 3388 } 3389 3390 if (csts.b.csts_rdy == 1) 3391 break; 3392 } 3393 } 3394 3395 if (csts.b.csts_rdy == 0) { 3396 dev_err(nvme->n_dip, CE_WARN, "!controller not ready"); 3397 ddi_fm_service_impact(nvme->n_dip, DDI_SERVICE_LOST); 3398 nvme->n_dead = B_TRUE; 3399 goto fail; 3400 } 3401 3402 /* 3403 * Assume an abort command limit of 1. We'll destroy and re-init 3404 * that later when we know the true abort command limit. 3405 */ 3406 sema_init(&nvme->n_abort_sema, 1, NULL, SEMA_DRIVER, NULL); 3407 3408 /* 3409 * Set up initial interrupt for admin queue. 3410 */ 3411 if ((nvme_setup_interrupts(nvme, DDI_INTR_TYPE_MSIX, 1) 3412 != DDI_SUCCESS) && 3413 (nvme_setup_interrupts(nvme, DDI_INTR_TYPE_MSI, 1) 3414 != DDI_SUCCESS) && 3415 (nvme_setup_interrupts(nvme, DDI_INTR_TYPE_FIXED, 1) 3416 != DDI_SUCCESS)) { 3417 dev_err(nvme->n_dip, CE_WARN, 3418 "!failed to setup initial interrupt"); 3419 goto fail; 3420 } 3421 3422 /* 3423 * Post an asynchronous event command to catch errors. 3424 * We assume the asynchronous events are supported as required by 3425 * specification (Figure 40 in section 5 of NVMe 1.2). 3426 * However, since at least qemu does not follow the specification, 3427 * we need a mechanism to protect ourselves. 3428 */ 3429 nvme->n_async_event_supported = B_TRUE; 3430 nvme_async_event(nvme); 3431 3432 /* 3433 * Identify Controller 3434 */ 3435 if (nvme_identify(nvme, B_FALSE, 0, NVME_IDENTIFY_CTRL, 3436 (void **)&nvme->n_idctl) != 0) { 3437 dev_err(nvme->n_dip, CE_WARN, 3438 "!failed to identify controller"); 3439 goto fail; 3440 } 3441 3442 /* 3443 * Process nvme-config-list (if present) in nvme.conf. 3444 */ 3445 nvme_config_list(nvme); 3446 3447 /* 3448 * Get Vendor & Product ID 3449 */ 3450 bcopy(nvme->n_idctl->id_model, model, sizeof (nvme->n_idctl->id_model)); 3451 model[sizeof (nvme->n_idctl->id_model)] = '\0'; 3452 sata_split_model(model, &vendor, &product); 3453 3454 if (vendor == NULL) 3455 nvme->n_vendor = strdup("NVMe"); 3456 else 3457 nvme->n_vendor = strdup(vendor); 3458 3459 nvme->n_product = strdup(product); 3460 3461 /* 3462 * Get controller limits. 3463 */ 3464 nvme->n_async_event_limit = MAX(NVME_MIN_ASYNC_EVENT_LIMIT, 3465 MIN(nvme->n_admin_queue_len / 10, 3466 MIN(nvme->n_idctl->id_aerl + 1, nvme->n_async_event_limit))); 3467 3468 (void) ddi_prop_update_int(DDI_DEV_T_NONE, nvme->n_dip, 3469 "async-event-limit", nvme->n_async_event_limit); 3470 3471 nvme->n_abort_command_limit = nvme->n_idctl->id_acl + 1; 3472 3473 /* 3474 * Reinitialize the semaphore with the true abort command limit 3475 * supported by the hardware. It's not necessary to disable interrupts 3476 * as only command aborts use the semaphore, and no commands are 3477 * executed or aborted while we're here. 3478 */ 3479 sema_destroy(&nvme->n_abort_sema); 3480 sema_init(&nvme->n_abort_sema, nvme->n_abort_command_limit - 1, NULL, 3481 SEMA_DRIVER, NULL); 3482 3483 nvme->n_progress |= NVME_CTRL_LIMITS; 3484 3485 if (nvme->n_idctl->id_mdts == 0) 3486 nvme->n_max_data_transfer_size = nvme->n_pagesize * 65536; 3487 else 3488 nvme->n_max_data_transfer_size = 3489 1ull << (nvme->n_pageshift + nvme->n_idctl->id_mdts); 3490 3491 nvme->n_error_log_len = nvme->n_idctl->id_elpe + 1; 3492 3493 /* 3494 * Limit n_max_data_transfer_size to what we can handle in one PRP. 3495 * Chained PRPs are currently unsupported. 3496 * 3497 * This is a no-op on hardware which doesn't support a transfer size 3498 * big enough to require chained PRPs. 3499 */ 3500 nvme->n_max_data_transfer_size = MIN(nvme->n_max_data_transfer_size, 3501 (nvme->n_pagesize / sizeof (uint64_t) * nvme->n_pagesize)); 3502 3503 nvme->n_prp_dma_attr.dma_attr_maxxfer = nvme->n_max_data_transfer_size; 3504 3505 /* 3506 * Make sure the minimum/maximum queue entry sizes are not 3507 * larger/smaller than the default. 3508 */ 3509 3510 if (((1 << nvme->n_idctl->id_sqes.qes_min) > sizeof (nvme_sqe_t)) || 3511 ((1 << nvme->n_idctl->id_sqes.qes_max) < sizeof (nvme_sqe_t)) || 3512 ((1 << nvme->n_idctl->id_cqes.qes_min) > sizeof (nvme_cqe_t)) || 3513 ((1 << nvme->n_idctl->id_cqes.qes_max) < sizeof (nvme_cqe_t))) 3514 goto fail; 3515 3516 /* 3517 * Check for the presence of a Volatile Write Cache. If present, 3518 * enable or disable based on the value of the property 3519 * volatile-write-cache-enable (default is enabled). 3520 */ 3521 nvme->n_write_cache_present = 3522 nvme->n_idctl->id_vwc.vwc_present == 0 ? B_FALSE : B_TRUE; 3523 3524 (void) ddi_prop_update_int(DDI_DEV_T_NONE, nvme->n_dip, 3525 "volatile-write-cache-present", 3526 nvme->n_write_cache_present ? 1 : 0); 3527 3528 if (!nvme->n_write_cache_present) { 3529 nvme->n_write_cache_enabled = B_FALSE; 3530 } else if (nvme_write_cache_set(nvme, nvme->n_write_cache_enabled) 3531 != 0) { 3532 dev_err(nvme->n_dip, CE_WARN, 3533 "!failed to %sable volatile write cache", 3534 nvme->n_write_cache_enabled ? "en" : "dis"); 3535 /* 3536 * Assume the cache is (still) enabled. 3537 */ 3538 nvme->n_write_cache_enabled = B_TRUE; 3539 } 3540 3541 (void) ddi_prop_update_int(DDI_DEV_T_NONE, nvme->n_dip, 3542 "volatile-write-cache-enable", 3543 nvme->n_write_cache_enabled ? 1 : 0); 3544 3545 /* 3546 * Assume LBA Range Type feature is supported. If it isn't this 3547 * will be set to B_FALSE by nvme_get_features(). 3548 */ 3549 nvme->n_lba_range_supported = B_TRUE; 3550 3551 /* 3552 * Check support for Autonomous Power State Transition. 3553 */ 3554 if (NVME_VERSION_ATLEAST(&nvme->n_version, 1, 1)) 3555 nvme->n_auto_pst_supported = 3556 nvme->n_idctl->id_apsta.ap_sup == 0 ? B_FALSE : B_TRUE; 3557 3558 /* 3559 * Assume Software Progress Marker feature is supported. If it isn't 3560 * this will be set to B_FALSE by nvme_get_features(). 3561 */ 3562 nvme->n_progress_supported = B_TRUE; 3563 3564 /* 3565 * Get number of supported namespaces and allocate namespace array. 3566 */ 3567 nvme->n_namespace_count = nvme->n_idctl->id_nn; 3568 3569 if (nvme->n_namespace_count == 0) { 3570 dev_err(nvme->n_dip, CE_WARN, 3571 "!controllers without namespaces are not supported"); 3572 goto fail; 3573 } 3574 3575 if (nvme->n_namespace_count > NVME_MINOR_MAX) { 3576 dev_err(nvme->n_dip, CE_WARN, 3577 "!too many namespaces: %d, limiting to %d\n", 3578 nvme->n_namespace_count, NVME_MINOR_MAX); 3579 nvme->n_namespace_count = NVME_MINOR_MAX; 3580 } 3581 3582 nvme->n_ns = kmem_zalloc(sizeof (nvme_namespace_t) * 3583 nvme->n_namespace_count, KM_SLEEP); 3584 3585 /* 3586 * Try to set up MSI/MSI-X interrupts. 3587 */ 3588 if ((nvme->n_intr_types & (DDI_INTR_TYPE_MSI | DDI_INTR_TYPE_MSIX)) 3589 != 0) { 3590 nvme_release_interrupts(nvme); 3591 3592 nqueues = MIN(UINT16_MAX, ncpus); 3593 3594 if ((nvme_setup_interrupts(nvme, DDI_INTR_TYPE_MSIX, 3595 nqueues) != DDI_SUCCESS) && 3596 (nvme_setup_interrupts(nvme, DDI_INTR_TYPE_MSI, 3597 nqueues) != DDI_SUCCESS)) { 3598 dev_err(nvme->n_dip, CE_WARN, 3599 "!failed to setup MSI/MSI-X interrupts"); 3600 goto fail; 3601 } 3602 } 3603 3604 /* 3605 * Create I/O queue pairs. 3606 */ 3607 3608 if (nvme_set_nqueues(nvme) != 0) { 3609 dev_err(nvme->n_dip, CE_WARN, 3610 "!failed to set number of I/O queues to %d", 3611 nvme->n_intr_cnt); 3612 goto fail; 3613 } 3614 3615 /* 3616 * Reallocate I/O queue array 3617 */ 3618 kmem_free(nvme->n_ioq, sizeof (nvme_qpair_t *)); 3619 nvme->n_ioq = kmem_zalloc(sizeof (nvme_qpair_t *) * 3620 (nvme->n_submission_queues + 1), KM_SLEEP); 3621 nvme->n_ioq[0] = nvme->n_adminq; 3622 3623 /* 3624 * There should always be at least as many submission queues 3625 * as completion queues. 3626 */ 3627 ASSERT(nvme->n_submission_queues >= nvme->n_completion_queues); 3628 3629 nvme->n_ioq_count = nvme->n_submission_queues; 3630 3631 nvme->n_io_squeue_len = 3632 MIN(nvme->n_io_squeue_len, nvme->n_max_queue_entries); 3633 3634 (void) ddi_prop_update_int(DDI_DEV_T_NONE, nvme->n_dip, "io-squeue-len", 3635 nvme->n_io_squeue_len); 3636 3637 /* 3638 * Pre-allocate completion queues. 3639 * When there are the same number of submission and completion 3640 * queues there is no value in having a larger completion 3641 * queue length. 3642 */ 3643 if (nvme->n_submission_queues == nvme->n_completion_queues) 3644 nvme->n_io_cqueue_len = MIN(nvme->n_io_cqueue_len, 3645 nvme->n_io_squeue_len); 3646 3647 nvme->n_io_cqueue_len = MIN(nvme->n_io_cqueue_len, 3648 nvme->n_max_queue_entries); 3649 3650 (void) ddi_prop_update_int(DDI_DEV_T_NONE, nvme->n_dip, "io-cqueue-len", 3651 nvme->n_io_cqueue_len); 3652 3653 /* 3654 * Assign the equal quantity of taskq threads to each completion 3655 * queue, capping the total number of threads to the number 3656 * of CPUs. 3657 */ 3658 tq_threads = MIN(UINT16_MAX, ncpus) / nvme->n_completion_queues; 3659 3660 /* 3661 * In case the calculation above is zero, we need at least one 3662 * thread per completion queue. 3663 */ 3664 tq_threads = MAX(1, tq_threads); 3665 3666 if (nvme_create_cq_array(nvme, nvme->n_completion_queues + 1, 3667 nvme->n_io_cqueue_len, tq_threads) != DDI_SUCCESS) { 3668 dev_err(nvme->n_dip, CE_WARN, 3669 "!failed to pre-allocate completion queues"); 3670 goto fail; 3671 } 3672 3673 /* 3674 * If we use less completion queues than interrupt vectors return 3675 * some of the interrupt vectors back to the system. 3676 */ 3677 if (nvme->n_completion_queues + 1 < nvme->n_intr_cnt) { 3678 nvme_release_interrupts(nvme); 3679 3680 if (nvme_setup_interrupts(nvme, nvme->n_intr_type, 3681 nvme->n_completion_queues + 1) != DDI_SUCCESS) { 3682 dev_err(nvme->n_dip, CE_WARN, 3683 "!failed to reduce number of interrupts"); 3684 goto fail; 3685 } 3686 } 3687 3688 /* 3689 * Alloc & register I/O queue pairs 3690 */ 3691 3692 for (i = 1; i != nvme->n_ioq_count + 1; i++) { 3693 if (nvme_alloc_qpair(nvme, nvme->n_io_squeue_len, 3694 &nvme->n_ioq[i], i) != DDI_SUCCESS) { 3695 dev_err(nvme->n_dip, CE_WARN, 3696 "!unable to allocate I/O qpair %d", i); 3697 goto fail; 3698 } 3699 3700 if (nvme_create_io_qpair(nvme, nvme->n_ioq[i], i) != 0) { 3701 dev_err(nvme->n_dip, CE_WARN, 3702 "!unable to create I/O qpair %d", i); 3703 goto fail; 3704 } 3705 } 3706 3707 /* 3708 * Post more asynchronous events commands to reduce event reporting 3709 * latency as suggested by the spec. 3710 */ 3711 if (nvme->n_async_event_supported) { 3712 for (i = 1; i != nvme->n_async_event_limit; i++) 3713 nvme_async_event(nvme); 3714 } 3715 3716 return (DDI_SUCCESS); 3717 3718 fail: 3719 (void) nvme_reset(nvme, B_FALSE); 3720 return (DDI_FAILURE); 3721 } 3722 3723 static uint_t 3724 nvme_intr(caddr_t arg1, caddr_t arg2) 3725 { 3726 /*LINTED: E_PTR_BAD_CAST_ALIGN*/ 3727 nvme_t *nvme = (nvme_t *)arg1; 3728 int inum = (int)(uintptr_t)arg2; 3729 int ccnt = 0; 3730 int qnum; 3731 3732 if (inum >= nvme->n_intr_cnt) 3733 return (DDI_INTR_UNCLAIMED); 3734 3735 if (nvme->n_dead) 3736 return (nvme->n_intr_type == DDI_INTR_TYPE_FIXED ? 3737 DDI_INTR_UNCLAIMED : DDI_INTR_CLAIMED); 3738 3739 /* 3740 * The interrupt vector a queue uses is calculated as queue_idx % 3741 * intr_cnt in nvme_create_io_qpair(). Iterate through the queue array 3742 * in steps of n_intr_cnt to process all queues using this vector. 3743 */ 3744 for (qnum = inum; 3745 qnum < nvme->n_cq_count && nvme->n_cq[qnum] != NULL; 3746 qnum += nvme->n_intr_cnt) { 3747 ccnt += nvme_process_iocq(nvme, nvme->n_cq[qnum]); 3748 } 3749 3750 return (ccnt > 0 ? DDI_INTR_CLAIMED : DDI_INTR_UNCLAIMED); 3751 } 3752 3753 static void 3754 nvme_release_interrupts(nvme_t *nvme) 3755 { 3756 int i; 3757 3758 for (i = 0; i < nvme->n_intr_cnt; i++) { 3759 if (nvme->n_inth[i] == NULL) 3760 break; 3761 3762 if (nvme->n_intr_cap & DDI_INTR_FLAG_BLOCK) 3763 (void) ddi_intr_block_disable(&nvme->n_inth[i], 1); 3764 else 3765 (void) ddi_intr_disable(nvme->n_inth[i]); 3766 3767 (void) ddi_intr_remove_handler(nvme->n_inth[i]); 3768 (void) ddi_intr_free(nvme->n_inth[i]); 3769 } 3770 3771 kmem_free(nvme->n_inth, nvme->n_inth_sz); 3772 nvme->n_inth = NULL; 3773 nvme->n_inth_sz = 0; 3774 3775 nvme->n_progress &= ~NVME_INTERRUPTS; 3776 } 3777 3778 static int 3779 nvme_setup_interrupts(nvme_t *nvme, int intr_type, int nqpairs) 3780 { 3781 int nintrs, navail, count; 3782 int ret; 3783 int i; 3784 3785 if (nvme->n_intr_types == 0) { 3786 ret = ddi_intr_get_supported_types(nvme->n_dip, 3787 &nvme->n_intr_types); 3788 if (ret != DDI_SUCCESS) { 3789 dev_err(nvme->n_dip, CE_WARN, 3790 "!%s: ddi_intr_get_supported types failed", 3791 __func__); 3792 return (ret); 3793 } 3794 #ifdef __x86 3795 if (get_hwenv() == HW_VMWARE) 3796 nvme->n_intr_types &= ~DDI_INTR_TYPE_MSIX; 3797 #endif 3798 } 3799 3800 if ((nvme->n_intr_types & intr_type) == 0) 3801 return (DDI_FAILURE); 3802 3803 ret = ddi_intr_get_nintrs(nvme->n_dip, intr_type, &nintrs); 3804 if (ret != DDI_SUCCESS) { 3805 dev_err(nvme->n_dip, CE_WARN, "!%s: ddi_intr_get_nintrs failed", 3806 __func__); 3807 return (ret); 3808 } 3809 3810 ret = ddi_intr_get_navail(nvme->n_dip, intr_type, &navail); 3811 if (ret != DDI_SUCCESS) { 3812 dev_err(nvme->n_dip, CE_WARN, "!%s: ddi_intr_get_navail failed", 3813 __func__); 3814 return (ret); 3815 } 3816 3817 /* We want at most one interrupt per queue pair. */ 3818 if (navail > nqpairs) 3819 navail = nqpairs; 3820 3821 nvme->n_inth_sz = sizeof (ddi_intr_handle_t) * navail; 3822 nvme->n_inth = kmem_zalloc(nvme->n_inth_sz, KM_SLEEP); 3823 3824 ret = ddi_intr_alloc(nvme->n_dip, nvme->n_inth, intr_type, 0, navail, 3825 &count, 0); 3826 if (ret != DDI_SUCCESS) { 3827 dev_err(nvme->n_dip, CE_WARN, "!%s: ddi_intr_alloc failed", 3828 __func__); 3829 goto fail; 3830 } 3831 3832 nvme->n_intr_cnt = count; 3833 3834 ret = ddi_intr_get_pri(nvme->n_inth[0], &nvme->n_intr_pri); 3835 if (ret != DDI_SUCCESS) { 3836 dev_err(nvme->n_dip, CE_WARN, "!%s: ddi_intr_get_pri failed", 3837 __func__); 3838 goto fail; 3839 } 3840 3841 for (i = 0; i < count; i++) { 3842 ret = ddi_intr_add_handler(nvme->n_inth[i], nvme_intr, 3843 (void *)nvme, (void *)(uintptr_t)i); 3844 if (ret != DDI_SUCCESS) { 3845 dev_err(nvme->n_dip, CE_WARN, 3846 "!%s: ddi_intr_add_handler failed", __func__); 3847 goto fail; 3848 } 3849 } 3850 3851 (void) ddi_intr_get_cap(nvme->n_inth[0], &nvme->n_intr_cap); 3852 3853 for (i = 0; i < count; i++) { 3854 if (nvme->n_intr_cap & DDI_INTR_FLAG_BLOCK) 3855 ret = ddi_intr_block_enable(&nvme->n_inth[i], 1); 3856 else 3857 ret = ddi_intr_enable(nvme->n_inth[i]); 3858 3859 if (ret != DDI_SUCCESS) { 3860 dev_err(nvme->n_dip, CE_WARN, 3861 "!%s: enabling interrupt %d failed", __func__, i); 3862 goto fail; 3863 } 3864 } 3865 3866 nvme->n_intr_type = intr_type; 3867 3868 nvme->n_progress |= NVME_INTERRUPTS; 3869 3870 return (DDI_SUCCESS); 3871 3872 fail: 3873 nvme_release_interrupts(nvme); 3874 3875 return (ret); 3876 } 3877 3878 static int 3879 nvme_fm_errcb(dev_info_t *dip, ddi_fm_error_t *fm_error, const void *arg) 3880 { 3881 _NOTE(ARGUNUSED(arg)); 3882 3883 pci_ereport_post(dip, fm_error, NULL); 3884 return (fm_error->fme_status); 3885 } 3886 3887 static void 3888 nvme_remove_callback(dev_info_t *dip, ddi_eventcookie_t cookie, void *a, 3889 void *b) 3890 { 3891 nvme_t *nvme = a; 3892 3893 nvme->n_dead = B_TRUE; 3894 3895 /* 3896 * Fail all outstanding commands, including those in the admin queue 3897 * (queue 0). 3898 */ 3899 for (uint_t i = 0; i < nvme->n_ioq_count + 1; i++) { 3900 nvme_qpair_t *qp = nvme->n_ioq[i]; 3901 3902 mutex_enter(&qp->nq_mutex); 3903 for (size_t j = 0; j < qp->nq_nentry; j++) { 3904 nvme_cmd_t *cmd = qp->nq_cmd[j]; 3905 nvme_cmd_t *u_cmd; 3906 3907 if (cmd == NULL) { 3908 continue; 3909 } 3910 3911 /* 3912 * Since we have the queue lock held the entire time we 3913 * iterate over it, it's not possible for the queue to 3914 * change underneath us. Thus, we don't need to check 3915 * that the return value of nvme_unqueue_cmd matches the 3916 * requested cmd to unqueue. 3917 */ 3918 u_cmd = nvme_unqueue_cmd(nvme, qp, cmd->nc_sqe.sqe_cid); 3919 taskq_dispatch_ent(qp->nq_cq->ncq_cmd_taskq, 3920 cmd->nc_callback, cmd, TQ_NOSLEEP, &cmd->nc_tqent); 3921 3922 ASSERT3P(u_cmd, ==, cmd); 3923 } 3924 mutex_exit(&qp->nq_mutex); 3925 } 3926 } 3927 3928 static int 3929 nvme_attach(dev_info_t *dip, ddi_attach_cmd_t cmd) 3930 { 3931 nvme_t *nvme; 3932 int instance; 3933 int nregs; 3934 off_t regsize; 3935 int i; 3936 char name[32]; 3937 boolean_t attached_ns; 3938 3939 if (cmd != DDI_ATTACH) 3940 return (DDI_FAILURE); 3941 3942 instance = ddi_get_instance(dip); 3943 3944 if (ddi_soft_state_zalloc(nvme_state, instance) != DDI_SUCCESS) 3945 return (DDI_FAILURE); 3946 3947 nvme = ddi_get_soft_state(nvme_state, instance); 3948 ddi_set_driver_private(dip, nvme); 3949 nvme->n_dip = dip; 3950 3951 /* 3952 * Set up event handlers for hot removal. While npe(4D) supports the hot 3953 * removal event being injected for devices, the same is not true of all 3954 * of our possible parents (i.e. pci(4D) as of this writing). The most 3955 * common case this shows up is in some virtualization environments. We 3956 * should treat this as non-fatal so that way devices work but leave 3957 * this set up in such a way that if a nexus does grow support for this 3958 * we're good to go. 3959 */ 3960 if (ddi_get_eventcookie(nvme->n_dip, DDI_DEVI_REMOVE_EVENT, 3961 &nvme->n_rm_cookie) == DDI_SUCCESS) { 3962 if (ddi_add_event_handler(nvme->n_dip, nvme->n_rm_cookie, 3963 nvme_remove_callback, nvme, &nvme->n_ev_rm_cb_id) != 3964 DDI_SUCCESS) { 3965 goto fail; 3966 } 3967 } else { 3968 nvme->n_ev_rm_cb_id = NULL; 3969 } 3970 3971 mutex_init(&nvme->n_minor_mutex, NULL, MUTEX_DRIVER, NULL); 3972 nvme->n_progress |= NVME_MUTEX_INIT; 3973 3974 nvme->n_strict_version = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 3975 DDI_PROP_DONTPASS, "strict-version", 1) == 1 ? B_TRUE : B_FALSE; 3976 nvme->n_ignore_unknown_vendor_status = ddi_prop_get_int(DDI_DEV_T_ANY, 3977 dip, DDI_PROP_DONTPASS, "ignore-unknown-vendor-status", 0) == 1 ? 3978 B_TRUE : B_FALSE; 3979 nvme->n_admin_queue_len = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 3980 DDI_PROP_DONTPASS, "admin-queue-len", NVME_DEFAULT_ADMIN_QUEUE_LEN); 3981 nvme->n_io_squeue_len = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 3982 DDI_PROP_DONTPASS, "io-squeue-len", NVME_DEFAULT_IO_QUEUE_LEN); 3983 /* 3984 * Double up the default for completion queues in case of 3985 * queue sharing. 3986 */ 3987 nvme->n_io_cqueue_len = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 3988 DDI_PROP_DONTPASS, "io-cqueue-len", 2 * NVME_DEFAULT_IO_QUEUE_LEN); 3989 nvme->n_async_event_limit = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 3990 DDI_PROP_DONTPASS, "async-event-limit", 3991 NVME_DEFAULT_ASYNC_EVENT_LIMIT); 3992 nvme->n_write_cache_enabled = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 3993 DDI_PROP_DONTPASS, "volatile-write-cache-enable", 1) != 0 ? 3994 B_TRUE : B_FALSE; 3995 nvme->n_min_block_size = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 3996 DDI_PROP_DONTPASS, "min-phys-block-size", 3997 NVME_DEFAULT_MIN_BLOCK_SIZE); 3998 nvme->n_submission_queues = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 3999 DDI_PROP_DONTPASS, "max-submission-queues", -1); 4000 nvme->n_completion_queues = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 4001 DDI_PROP_DONTPASS, "max-completion-queues", -1); 4002 4003 if (!ISP2(nvme->n_min_block_size) || 4004 (nvme->n_min_block_size < NVME_DEFAULT_MIN_BLOCK_SIZE)) { 4005 dev_err(dip, CE_WARN, "!min-phys-block-size %s, " 4006 "using default %d", ISP2(nvme->n_min_block_size) ? 4007 "too low" : "not a power of 2", 4008 NVME_DEFAULT_MIN_BLOCK_SIZE); 4009 nvme->n_min_block_size = NVME_DEFAULT_MIN_BLOCK_SIZE; 4010 } 4011 4012 if (nvme->n_submission_queues != -1 && 4013 (nvme->n_submission_queues < 1 || 4014 nvme->n_submission_queues > UINT16_MAX)) { 4015 dev_err(dip, CE_WARN, "!\"submission-queues\"=%d is not " 4016 "valid. Must be [1..%d]", nvme->n_submission_queues, 4017 UINT16_MAX); 4018 nvme->n_submission_queues = -1; 4019 } 4020 4021 if (nvme->n_completion_queues != -1 && 4022 (nvme->n_completion_queues < 1 || 4023 nvme->n_completion_queues > UINT16_MAX)) { 4024 dev_err(dip, CE_WARN, "!\"completion-queues\"=%d is not " 4025 "valid. Must be [1..%d]", nvme->n_completion_queues, 4026 UINT16_MAX); 4027 nvme->n_completion_queues = -1; 4028 } 4029 4030 if (nvme->n_admin_queue_len < NVME_MIN_ADMIN_QUEUE_LEN) 4031 nvme->n_admin_queue_len = NVME_MIN_ADMIN_QUEUE_LEN; 4032 else if (nvme->n_admin_queue_len > NVME_MAX_ADMIN_QUEUE_LEN) 4033 nvme->n_admin_queue_len = NVME_MAX_ADMIN_QUEUE_LEN; 4034 4035 if (nvme->n_io_squeue_len < NVME_MIN_IO_QUEUE_LEN) 4036 nvme->n_io_squeue_len = NVME_MIN_IO_QUEUE_LEN; 4037 if (nvme->n_io_cqueue_len < NVME_MIN_IO_QUEUE_LEN) 4038 nvme->n_io_cqueue_len = NVME_MIN_IO_QUEUE_LEN; 4039 4040 if (nvme->n_async_event_limit < 1) 4041 nvme->n_async_event_limit = NVME_DEFAULT_ASYNC_EVENT_LIMIT; 4042 4043 nvme->n_reg_acc_attr = nvme_reg_acc_attr; 4044 nvme->n_queue_dma_attr = nvme_queue_dma_attr; 4045 nvme->n_prp_dma_attr = nvme_prp_dma_attr; 4046 nvme->n_sgl_dma_attr = nvme_sgl_dma_attr; 4047 4048 /* 4049 * Set up FMA support. 4050 */ 4051 nvme->n_fm_cap = ddi_getprop(DDI_DEV_T_ANY, dip, 4052 DDI_PROP_CANSLEEP | DDI_PROP_DONTPASS, "fm-capable", 4053 DDI_FM_EREPORT_CAPABLE | DDI_FM_ACCCHK_CAPABLE | 4054 DDI_FM_DMACHK_CAPABLE | DDI_FM_ERRCB_CAPABLE); 4055 4056 ddi_fm_init(dip, &nvme->n_fm_cap, &nvme->n_fm_ibc); 4057 4058 if (nvme->n_fm_cap) { 4059 if (nvme->n_fm_cap & DDI_FM_ACCCHK_CAPABLE) 4060 nvme->n_reg_acc_attr.devacc_attr_access = 4061 DDI_FLAGERR_ACC; 4062 4063 if (nvme->n_fm_cap & DDI_FM_DMACHK_CAPABLE) { 4064 nvme->n_prp_dma_attr.dma_attr_flags |= DDI_DMA_FLAGERR; 4065 nvme->n_sgl_dma_attr.dma_attr_flags |= DDI_DMA_FLAGERR; 4066 } 4067 4068 if (DDI_FM_EREPORT_CAP(nvme->n_fm_cap) || 4069 DDI_FM_ERRCB_CAP(nvme->n_fm_cap)) 4070 pci_ereport_setup(dip); 4071 4072 if (DDI_FM_ERRCB_CAP(nvme->n_fm_cap)) 4073 ddi_fm_handler_register(dip, nvme_fm_errcb, 4074 (void *)nvme); 4075 } 4076 4077 nvme->n_progress |= NVME_FMA_INIT; 4078 4079 /* 4080 * The spec defines several register sets. Only the controller 4081 * registers (set 1) are currently used. 4082 */ 4083 if (ddi_dev_nregs(dip, &nregs) == DDI_FAILURE || 4084 nregs < 2 || 4085 ddi_dev_regsize(dip, 1, ®size) == DDI_FAILURE) 4086 goto fail; 4087 4088 if (ddi_regs_map_setup(dip, 1, &nvme->n_regs, 0, regsize, 4089 &nvme->n_reg_acc_attr, &nvme->n_regh) != DDI_SUCCESS) { 4090 dev_err(dip, CE_WARN, "!failed to map regset 1"); 4091 goto fail; 4092 } 4093 4094 nvme->n_progress |= NVME_REGS_MAPPED; 4095 4096 /* 4097 * Create PRP DMA cache 4098 */ 4099 (void) snprintf(name, sizeof (name), "%s%d_prp_cache", 4100 ddi_driver_name(dip), ddi_get_instance(dip)); 4101 nvme->n_prp_cache = kmem_cache_create(name, sizeof (nvme_dma_t), 4102 0, nvme_prp_dma_constructor, nvme_prp_dma_destructor, 4103 NULL, (void *)nvme, NULL, 0); 4104 4105 if (nvme_init(nvme) != DDI_SUCCESS) 4106 goto fail; 4107 4108 /* 4109 * Initialize the driver with the UFM subsystem 4110 */ 4111 if (ddi_ufm_init(dip, DDI_UFM_CURRENT_VERSION, &nvme_ufm_ops, 4112 &nvme->n_ufmh, nvme) != 0) { 4113 dev_err(dip, CE_WARN, "!failed to initialize UFM subsystem"); 4114 goto fail; 4115 } 4116 mutex_init(&nvme->n_fwslot_mutex, NULL, MUTEX_DRIVER, NULL); 4117 ddi_ufm_update(nvme->n_ufmh); 4118 nvme->n_progress |= NVME_UFM_INIT; 4119 4120 mutex_init(&nvme->n_mgmt_mutex, NULL, MUTEX_DRIVER, NULL); 4121 nvme->n_progress |= NVME_MGMT_INIT; 4122 4123 /* 4124 * Identify namespaces. 4125 */ 4126 mutex_enter(&nvme->n_mgmt_mutex); 4127 4128 for (i = 1; i <= nvme->n_namespace_count; i++) { 4129 nvme_namespace_t *ns = NVME_NSID2NS(nvme, i); 4130 4131 /* 4132 * Namespaces start out ignored. When nvme_init_ns() checks 4133 * their properties and finds they can be used, it will set 4134 * ns_ignore to B_FALSE. It will also use this state change 4135 * to keep an accurate count of attachable namespaces. 4136 */ 4137 ns->ns_ignore = B_TRUE; 4138 if (nvme_init_ns(nvme, i) != 0) { 4139 mutex_exit(&nvme->n_mgmt_mutex); 4140 goto fail; 4141 } 4142 4143 if (ddi_create_minor_node(nvme->n_dip, ns->ns_name, S_IFCHR, 4144 NVME_MINOR(ddi_get_instance(nvme->n_dip), i), 4145 DDI_NT_NVME_ATTACHMENT_POINT, 0) != DDI_SUCCESS) { 4146 mutex_exit(&nvme->n_mgmt_mutex); 4147 dev_err(dip, CE_WARN, 4148 "!failed to create minor node for namespace %d", i); 4149 goto fail; 4150 } 4151 } 4152 4153 if (ddi_create_minor_node(dip, "devctl", S_IFCHR, 4154 NVME_MINOR(ddi_get_instance(dip), 0), DDI_NT_NVME_NEXUS, 0) 4155 != DDI_SUCCESS) { 4156 mutex_exit(&nvme->n_mgmt_mutex); 4157 dev_err(dip, CE_WARN, "nvme_attach: " 4158 "cannot create devctl minor node"); 4159 goto fail; 4160 } 4161 4162 attached_ns = B_FALSE; 4163 for (i = 1; i <= nvme->n_namespace_count; i++) { 4164 int rv; 4165 4166 rv = nvme_attach_ns(nvme, i); 4167 if (rv == 0) { 4168 attached_ns = B_TRUE; 4169 } else if (rv != ENOTSUP) { 4170 dev_err(nvme->n_dip, CE_WARN, 4171 "!failed to attach namespace %d: %d", i, rv); 4172 /* 4173 * Once we have successfully attached a namespace we 4174 * can no longer fail the driver attach as there is now 4175 * a blkdev child node linked to this device, and 4176 * our node is not yet in the attached state. 4177 */ 4178 if (!attached_ns) { 4179 mutex_exit(&nvme->n_mgmt_mutex); 4180 goto fail; 4181 } 4182 } 4183 } 4184 4185 mutex_exit(&nvme->n_mgmt_mutex); 4186 4187 return (DDI_SUCCESS); 4188 4189 fail: 4190 /* attach successful anyway so that FMA can retire the device */ 4191 if (nvme->n_dead) 4192 return (DDI_SUCCESS); 4193 4194 (void) nvme_detach(dip, DDI_DETACH); 4195 4196 return (DDI_FAILURE); 4197 } 4198 4199 static int 4200 nvme_detach(dev_info_t *dip, ddi_detach_cmd_t cmd) 4201 { 4202 int instance, i; 4203 nvme_t *nvme; 4204 4205 if (cmd != DDI_DETACH) 4206 return (DDI_FAILURE); 4207 4208 instance = ddi_get_instance(dip); 4209 4210 nvme = ddi_get_soft_state(nvme_state, instance); 4211 4212 if (nvme == NULL) 4213 return (DDI_FAILURE); 4214 4215 /* 4216 * Remove all minor nodes from the device regardless of the source in 4217 * one swoop. 4218 */ 4219 ddi_remove_minor_node(dip, NULL); 4220 4221 /* 4222 * We need to remove the event handler as one of the first things that 4223 * we do. If we proceed with other teardown without removing the event 4224 * handler, we could end up in a very unfortunate race with ourselves. 4225 * The DDI does not serialize these with detach (just like timeout(9F) 4226 * and others). 4227 */ 4228 if (nvme->n_ev_rm_cb_id != NULL) { 4229 (void) ddi_remove_event_handler(nvme->n_ev_rm_cb_id); 4230 } 4231 nvme->n_ev_rm_cb_id = NULL; 4232 4233 if (nvme->n_ns) { 4234 for (i = 1; i <= nvme->n_namespace_count; i++) { 4235 nvme_namespace_t *ns = NVME_NSID2NS(nvme, i); 4236 4237 if (ns->ns_bd_hdl) { 4238 (void) bd_detach_handle(ns->ns_bd_hdl); 4239 bd_free_handle(ns->ns_bd_hdl); 4240 } 4241 4242 if (ns->ns_idns) 4243 kmem_free(ns->ns_idns, 4244 sizeof (nvme_identify_nsid_t)); 4245 if (ns->ns_devid) 4246 strfree(ns->ns_devid); 4247 } 4248 4249 kmem_free(nvme->n_ns, sizeof (nvme_namespace_t) * 4250 nvme->n_namespace_count); 4251 } 4252 4253 if (nvme->n_progress & NVME_MGMT_INIT) { 4254 mutex_destroy(&nvme->n_mgmt_mutex); 4255 } 4256 4257 if (nvme->n_progress & NVME_UFM_INIT) { 4258 ddi_ufm_fini(nvme->n_ufmh); 4259 mutex_destroy(&nvme->n_fwslot_mutex); 4260 } 4261 4262 if (nvme->n_progress & NVME_INTERRUPTS) 4263 nvme_release_interrupts(nvme); 4264 4265 for (i = 0; i < nvme->n_cq_count; i++) { 4266 if (nvme->n_cq[i]->ncq_cmd_taskq != NULL) 4267 taskq_wait(nvme->n_cq[i]->ncq_cmd_taskq); 4268 } 4269 4270 if (nvme->n_progress & NVME_MUTEX_INIT) { 4271 mutex_destroy(&nvme->n_minor_mutex); 4272 } 4273 4274 if (nvme->n_ioq_count > 0) { 4275 for (i = 1; i != nvme->n_ioq_count + 1; i++) { 4276 if (nvme->n_ioq[i] != NULL) { 4277 /* TODO: send destroy queue commands */ 4278 nvme_free_qpair(nvme->n_ioq[i]); 4279 } 4280 } 4281 4282 kmem_free(nvme->n_ioq, sizeof (nvme_qpair_t *) * 4283 (nvme->n_ioq_count + 1)); 4284 } 4285 4286 if (nvme->n_prp_cache != NULL) { 4287 kmem_cache_destroy(nvme->n_prp_cache); 4288 } 4289 4290 if (nvme->n_progress & NVME_REGS_MAPPED) { 4291 nvme_shutdown(nvme, B_FALSE); 4292 (void) nvme_reset(nvme, B_FALSE); 4293 } 4294 4295 if (nvme->n_progress & NVME_CTRL_LIMITS) 4296 sema_destroy(&nvme->n_abort_sema); 4297 4298 if (nvme->n_progress & NVME_ADMIN_QUEUE) 4299 nvme_free_qpair(nvme->n_adminq); 4300 4301 if (nvme->n_cq_count > 0) { 4302 nvme_destroy_cq_array(nvme, 0); 4303 nvme->n_cq = NULL; 4304 nvme->n_cq_count = 0; 4305 } 4306 4307 if (nvme->n_idctl) 4308 kmem_free(nvme->n_idctl, NVME_IDENTIFY_BUFSIZE); 4309 4310 if (nvme->n_progress & NVME_REGS_MAPPED) 4311 ddi_regs_map_free(&nvme->n_regh); 4312 4313 if (nvme->n_progress & NVME_FMA_INIT) { 4314 if (DDI_FM_ERRCB_CAP(nvme->n_fm_cap)) 4315 ddi_fm_handler_unregister(nvme->n_dip); 4316 4317 if (DDI_FM_EREPORT_CAP(nvme->n_fm_cap) || 4318 DDI_FM_ERRCB_CAP(nvme->n_fm_cap)) 4319 pci_ereport_teardown(nvme->n_dip); 4320 4321 ddi_fm_fini(nvme->n_dip); 4322 } 4323 4324 if (nvme->n_vendor != NULL) 4325 strfree(nvme->n_vendor); 4326 4327 if (nvme->n_product != NULL) 4328 strfree(nvme->n_product); 4329 4330 ddi_soft_state_free(nvme_state, instance); 4331 4332 return (DDI_SUCCESS); 4333 } 4334 4335 static int 4336 nvme_quiesce(dev_info_t *dip) 4337 { 4338 int instance; 4339 nvme_t *nvme; 4340 4341 instance = ddi_get_instance(dip); 4342 4343 nvme = ddi_get_soft_state(nvme_state, instance); 4344 4345 if (nvme == NULL) 4346 return (DDI_FAILURE); 4347 4348 nvme_shutdown(nvme, B_TRUE); 4349 4350 (void) nvme_reset(nvme, B_TRUE); 4351 4352 return (DDI_SUCCESS); 4353 } 4354 4355 static int 4356 nvme_fill_prp(nvme_cmd_t *cmd, ddi_dma_handle_t dma) 4357 { 4358 nvme_t *nvme = cmd->nc_nvme; 4359 uint_t nprp_per_page, nprp; 4360 uint64_t *prp; 4361 const ddi_dma_cookie_t *cookie; 4362 uint_t idx; 4363 uint_t ncookies = ddi_dma_ncookies(dma); 4364 4365 if (ncookies == 0) 4366 return (DDI_FAILURE); 4367 4368 if ((cookie = ddi_dma_cookie_get(dma, 0)) == NULL) 4369 return (DDI_FAILURE); 4370 cmd->nc_sqe.sqe_dptr.d_prp[0] = cookie->dmac_laddress; 4371 4372 if (ncookies == 1) { 4373 cmd->nc_sqe.sqe_dptr.d_prp[1] = 0; 4374 return (DDI_SUCCESS); 4375 } else if (ncookies == 2) { 4376 if ((cookie = ddi_dma_cookie_get(dma, 1)) == NULL) 4377 return (DDI_FAILURE); 4378 cmd->nc_sqe.sqe_dptr.d_prp[1] = cookie->dmac_laddress; 4379 return (DDI_SUCCESS); 4380 } 4381 4382 /* 4383 * At this point, we're always operating on cookies at 4384 * index >= 1 and writing the addresses of those cookies 4385 * into a new page. The address of that page is stored 4386 * as the second PRP entry. 4387 */ 4388 nprp_per_page = nvme->n_pagesize / sizeof (uint64_t); 4389 ASSERT(nprp_per_page > 0); 4390 4391 /* 4392 * We currently don't support chained PRPs and set up our DMA 4393 * attributes to reflect that. If we still get an I/O request 4394 * that needs a chained PRP something is very wrong. Account 4395 * for the first cookie here, which we've placed in d_prp[0]. 4396 */ 4397 nprp = howmany(ncookies - 1, nprp_per_page); 4398 VERIFY(nprp == 1); 4399 4400 /* 4401 * Allocate a page of pointers, in which we'll write the 4402 * addresses of cookies 1 to `ncookies`. 4403 */ 4404 cmd->nc_prp = kmem_cache_alloc(nvme->n_prp_cache, KM_SLEEP); 4405 bzero(cmd->nc_prp->nd_memp, cmd->nc_prp->nd_len); 4406 cmd->nc_sqe.sqe_dptr.d_prp[1] = cmd->nc_prp->nd_cookie.dmac_laddress; 4407 4408 prp = (uint64_t *)cmd->nc_prp->nd_memp; 4409 for (idx = 1; idx < ncookies; idx++) { 4410 if ((cookie = ddi_dma_cookie_get(dma, idx)) == NULL) 4411 return (DDI_FAILURE); 4412 *prp++ = cookie->dmac_laddress; 4413 } 4414 4415 (void) ddi_dma_sync(cmd->nc_prp->nd_dmah, 0, cmd->nc_prp->nd_len, 4416 DDI_DMA_SYNC_FORDEV); 4417 return (DDI_SUCCESS); 4418 } 4419 4420 /* 4421 * The maximum number of requests supported for a deallocate request is 4422 * NVME_DSET_MGMT_MAX_RANGES (256) -- this is from the NVMe 1.1 spec (and 4423 * unchanged through at least 1.4a). The definition of nvme_range_t is also 4424 * from the NVMe 1.1 spec. Together, the result is that all of the ranges for 4425 * a deallocate request will fit into the smallest supported namespace page 4426 * (4k). 4427 */ 4428 CTASSERT(sizeof (nvme_range_t) * NVME_DSET_MGMT_MAX_RANGES == 4096); 4429 4430 static int 4431 nvme_fill_ranges(nvme_cmd_t *cmd, bd_xfer_t *xfer, uint64_t blocksize, 4432 int allocflag) 4433 { 4434 const dkioc_free_list_t *dfl = xfer->x_dfl; 4435 const dkioc_free_list_ext_t *exts = dfl->dfl_exts; 4436 nvme_t *nvme = cmd->nc_nvme; 4437 nvme_range_t *ranges = NULL; 4438 uint_t i; 4439 4440 /* 4441 * The number of ranges in the request is 0s based (that is 4442 * word10 == 0 -> 1 range, word10 == 1 -> 2 ranges, ..., 4443 * word10 == 255 -> 256 ranges). Therefore the allowed values are 4444 * [1..NVME_DSET_MGMT_MAX_RANGES]. If blkdev gives us a bad request, 4445 * we either provided bad info in nvme_bd_driveinfo() or there is a bug 4446 * in blkdev. 4447 */ 4448 VERIFY3U(dfl->dfl_num_exts, >, 0); 4449 VERIFY3U(dfl->dfl_num_exts, <=, NVME_DSET_MGMT_MAX_RANGES); 4450 cmd->nc_sqe.sqe_cdw10 = (dfl->dfl_num_exts - 1) & 0xff; 4451 4452 cmd->nc_sqe.sqe_cdw11 = NVME_DSET_MGMT_ATTR_DEALLOCATE; 4453 4454 cmd->nc_prp = kmem_cache_alloc(nvme->n_prp_cache, allocflag); 4455 if (cmd->nc_prp == NULL) 4456 return (DDI_FAILURE); 4457 4458 bzero(cmd->nc_prp->nd_memp, cmd->nc_prp->nd_len); 4459 ranges = (nvme_range_t *)cmd->nc_prp->nd_memp; 4460 4461 cmd->nc_sqe.sqe_dptr.d_prp[0] = cmd->nc_prp->nd_cookie.dmac_laddress; 4462 cmd->nc_sqe.sqe_dptr.d_prp[1] = 0; 4463 4464 for (i = 0; i < dfl->dfl_num_exts; i++) { 4465 uint64_t lba, len; 4466 4467 lba = (dfl->dfl_offset + exts[i].dfle_start) / blocksize; 4468 len = exts[i].dfle_length / blocksize; 4469 4470 VERIFY3U(len, <=, UINT32_MAX); 4471 4472 /* No context attributes for a deallocate request */ 4473 ranges[i].nr_ctxattr = 0; 4474 ranges[i].nr_len = len; 4475 ranges[i].nr_lba = lba; 4476 } 4477 4478 (void) ddi_dma_sync(cmd->nc_prp->nd_dmah, 0, cmd->nc_prp->nd_len, 4479 DDI_DMA_SYNC_FORDEV); 4480 4481 return (DDI_SUCCESS); 4482 } 4483 4484 static nvme_cmd_t * 4485 nvme_create_nvm_cmd(nvme_namespace_t *ns, uint8_t opc, bd_xfer_t *xfer) 4486 { 4487 nvme_t *nvme = ns->ns_nvme; 4488 nvme_cmd_t *cmd; 4489 int allocflag; 4490 4491 /* 4492 * Blkdev only sets BD_XFER_POLL when dumping, so don't sleep. 4493 */ 4494 allocflag = (xfer->x_flags & BD_XFER_POLL) ? KM_NOSLEEP : KM_SLEEP; 4495 cmd = nvme_alloc_cmd(nvme, allocflag); 4496 4497 if (cmd == NULL) 4498 return (NULL); 4499 4500 cmd->nc_sqe.sqe_opc = opc; 4501 cmd->nc_callback = nvme_bd_xfer_done; 4502 cmd->nc_xfer = xfer; 4503 4504 switch (opc) { 4505 case NVME_OPC_NVM_WRITE: 4506 case NVME_OPC_NVM_READ: 4507 VERIFY(xfer->x_nblks <= 0x10000); 4508 4509 cmd->nc_sqe.sqe_nsid = ns->ns_id; 4510 4511 cmd->nc_sqe.sqe_cdw10 = xfer->x_blkno & 0xffffffffu; 4512 cmd->nc_sqe.sqe_cdw11 = (xfer->x_blkno >> 32); 4513 cmd->nc_sqe.sqe_cdw12 = (uint16_t)(xfer->x_nblks - 1); 4514 4515 if (nvme_fill_prp(cmd, xfer->x_dmah) != DDI_SUCCESS) 4516 goto fail; 4517 break; 4518 4519 case NVME_OPC_NVM_FLUSH: 4520 cmd->nc_sqe.sqe_nsid = ns->ns_id; 4521 break; 4522 4523 case NVME_OPC_NVM_DSET_MGMT: 4524 cmd->nc_sqe.sqe_nsid = ns->ns_id; 4525 4526 if (nvme_fill_ranges(cmd, xfer, 4527 (uint64_t)ns->ns_block_size, allocflag) != DDI_SUCCESS) 4528 goto fail; 4529 break; 4530 4531 default: 4532 goto fail; 4533 } 4534 4535 return (cmd); 4536 4537 fail: 4538 nvme_free_cmd(cmd); 4539 return (NULL); 4540 } 4541 4542 static void 4543 nvme_bd_xfer_done(void *arg) 4544 { 4545 nvme_cmd_t *cmd = arg; 4546 bd_xfer_t *xfer = cmd->nc_xfer; 4547 int error = 0; 4548 4549 error = nvme_check_cmd_status(cmd); 4550 nvme_free_cmd(cmd); 4551 4552 bd_xfer_done(xfer, error); 4553 } 4554 4555 static void 4556 nvme_bd_driveinfo(void *arg, bd_drive_t *drive) 4557 { 4558 nvme_namespace_t *ns = arg; 4559 nvme_t *nvme = ns->ns_nvme; 4560 uint_t ns_count = MAX(1, nvme->n_namespaces_attachable); 4561 boolean_t mutex_exit_needed = B_TRUE; 4562 4563 /* 4564 * nvme_bd_driveinfo is called by blkdev in two situations: 4565 * - during bd_attach_handle(), which we call with the mutex held 4566 * - during bd_attach(), which may be called with or without the 4567 * mutex held 4568 */ 4569 if (mutex_owned(&nvme->n_mgmt_mutex)) 4570 mutex_exit_needed = B_FALSE; 4571 else 4572 mutex_enter(&nvme->n_mgmt_mutex); 4573 4574 /* 4575 * Set the blkdev qcount to the number of submission queues. 4576 * It will then create one waitq/runq pair for each submission 4577 * queue and spread I/O requests across the queues. 4578 */ 4579 drive->d_qcount = nvme->n_ioq_count; 4580 4581 /* 4582 * I/O activity to individual namespaces is distributed across 4583 * each of the d_qcount blkdev queues (which has been set to 4584 * the number of nvme submission queues). d_qsize is the number 4585 * of submitted and not completed I/Os within each queue that blkdev 4586 * will allow before it starts holding them in the waitq. 4587 * 4588 * Each namespace will create a child blkdev instance, for each one 4589 * we try and set the d_qsize so that each namespace gets an 4590 * equal portion of the submission queue. 4591 * 4592 * If post instantiation of the nvme drive, n_namespaces_attachable 4593 * changes and a namespace is attached it could calculate a 4594 * different d_qsize. It may even be that the sum of the d_qsizes is 4595 * now beyond the submission queue size. Should that be the case 4596 * and the I/O rate is such that blkdev attempts to submit more 4597 * I/Os than the size of the submission queue, the excess I/Os 4598 * will be held behind the semaphore nq_sema. 4599 */ 4600 drive->d_qsize = nvme->n_io_squeue_len / ns_count; 4601 4602 /* 4603 * Don't let the queue size drop below the minimum, though. 4604 */ 4605 drive->d_qsize = MAX(drive->d_qsize, NVME_MIN_IO_QUEUE_LEN); 4606 4607 /* 4608 * d_maxxfer is not set, which means the value is taken from the DMA 4609 * attributes specified to bd_alloc_handle. 4610 */ 4611 4612 drive->d_removable = B_FALSE; 4613 drive->d_hotpluggable = B_FALSE; 4614 4615 bcopy(ns->ns_eui64, drive->d_eui64, sizeof (drive->d_eui64)); 4616 drive->d_target = ns->ns_id; 4617 drive->d_lun = 0; 4618 4619 drive->d_model = nvme->n_idctl->id_model; 4620 drive->d_model_len = sizeof (nvme->n_idctl->id_model); 4621 drive->d_vendor = nvme->n_vendor; 4622 drive->d_vendor_len = strlen(nvme->n_vendor); 4623 drive->d_product = nvme->n_product; 4624 drive->d_product_len = strlen(nvme->n_product); 4625 drive->d_serial = nvme->n_idctl->id_serial; 4626 drive->d_serial_len = sizeof (nvme->n_idctl->id_serial); 4627 drive->d_revision = nvme->n_idctl->id_fwrev; 4628 drive->d_revision_len = sizeof (nvme->n_idctl->id_fwrev); 4629 4630 /* 4631 * If we support the dataset management command, the only restrictions 4632 * on a discard request are the maximum number of ranges (segments) 4633 * per single request. 4634 */ 4635 if (nvme->n_idctl->id_oncs.on_dset_mgmt) 4636 drive->d_max_free_seg = NVME_DSET_MGMT_MAX_RANGES; 4637 4638 if (mutex_exit_needed) 4639 mutex_exit(&nvme->n_mgmt_mutex); 4640 } 4641 4642 static int 4643 nvme_bd_mediainfo(void *arg, bd_media_t *media) 4644 { 4645 nvme_namespace_t *ns = arg; 4646 nvme_t *nvme = ns->ns_nvme; 4647 boolean_t mutex_exit_needed = B_TRUE; 4648 4649 if (nvme->n_dead) { 4650 return (EIO); 4651 } 4652 4653 /* 4654 * nvme_bd_mediainfo is called by blkdev in various situations, 4655 * most of them out of our control. There's one exception though: 4656 * When we call bd_state_change() in response to "namespace change" 4657 * notification, where the mutex is already being held by us. 4658 */ 4659 if (mutex_owned(&nvme->n_mgmt_mutex)) 4660 mutex_exit_needed = B_FALSE; 4661 else 4662 mutex_enter(&nvme->n_mgmt_mutex); 4663 4664 media->m_nblks = ns->ns_block_count; 4665 media->m_blksize = ns->ns_block_size; 4666 media->m_readonly = B_FALSE; 4667 media->m_solidstate = B_TRUE; 4668 4669 media->m_pblksize = ns->ns_best_block_size; 4670 4671 if (mutex_exit_needed) 4672 mutex_exit(&nvme->n_mgmt_mutex); 4673 4674 return (0); 4675 } 4676 4677 static int 4678 nvme_bd_cmd(nvme_namespace_t *ns, bd_xfer_t *xfer, uint8_t opc) 4679 { 4680 nvme_t *nvme = ns->ns_nvme; 4681 nvme_cmd_t *cmd; 4682 nvme_qpair_t *ioq; 4683 boolean_t poll; 4684 int ret; 4685 4686 if (nvme->n_dead) { 4687 return (EIO); 4688 } 4689 4690 cmd = nvme_create_nvm_cmd(ns, opc, xfer); 4691 if (cmd == NULL) 4692 return (ENOMEM); 4693 4694 cmd->nc_sqid = xfer->x_qnum + 1; 4695 ASSERT(cmd->nc_sqid <= nvme->n_ioq_count); 4696 ioq = nvme->n_ioq[cmd->nc_sqid]; 4697 4698 /* 4699 * Get the polling flag before submitting the command. The command may 4700 * complete immediately after it was submitted, which means we must 4701 * treat both cmd and xfer as if they have been freed already. 4702 */ 4703 poll = (xfer->x_flags & BD_XFER_POLL) != 0; 4704 4705 ret = nvme_submit_io_cmd(ioq, cmd); 4706 4707 if (ret != 0) 4708 return (ret); 4709 4710 if (!poll) 4711 return (0); 4712 4713 do { 4714 cmd = nvme_retrieve_cmd(nvme, ioq); 4715 if (cmd != NULL) 4716 cmd->nc_callback(cmd); 4717 else 4718 drv_usecwait(10); 4719 } while (ioq->nq_active_cmds != 0); 4720 4721 return (0); 4722 } 4723 4724 static int 4725 nvme_bd_read(void *arg, bd_xfer_t *xfer) 4726 { 4727 nvme_namespace_t *ns = arg; 4728 4729 return (nvme_bd_cmd(ns, xfer, NVME_OPC_NVM_READ)); 4730 } 4731 4732 static int 4733 nvme_bd_write(void *arg, bd_xfer_t *xfer) 4734 { 4735 nvme_namespace_t *ns = arg; 4736 4737 return (nvme_bd_cmd(ns, xfer, NVME_OPC_NVM_WRITE)); 4738 } 4739 4740 static int 4741 nvme_bd_sync(void *arg, bd_xfer_t *xfer) 4742 { 4743 nvme_namespace_t *ns = arg; 4744 4745 if (ns->ns_nvme->n_dead) 4746 return (EIO); 4747 4748 /* 4749 * If the volatile write cache is not present or not enabled the FLUSH 4750 * command is a no-op, so we can take a shortcut here. 4751 */ 4752 if (!ns->ns_nvme->n_write_cache_present) { 4753 bd_xfer_done(xfer, ENOTSUP); 4754 return (0); 4755 } 4756 4757 if (!ns->ns_nvme->n_write_cache_enabled) { 4758 bd_xfer_done(xfer, 0); 4759 return (0); 4760 } 4761 4762 return (nvme_bd_cmd(ns, xfer, NVME_OPC_NVM_FLUSH)); 4763 } 4764 4765 static int 4766 nvme_bd_devid(void *arg, dev_info_t *devinfo, ddi_devid_t *devid) 4767 { 4768 nvme_namespace_t *ns = arg; 4769 nvme_t *nvme = ns->ns_nvme; 4770 4771 if (nvme->n_dead) { 4772 return (EIO); 4773 } 4774 4775 if (*(uint64_t *)ns->ns_nguid != 0 || 4776 *(uint64_t *)(ns->ns_nguid + 8) != 0) { 4777 return (ddi_devid_init(devinfo, DEVID_NVME_NGUID, 4778 sizeof (ns->ns_nguid), ns->ns_nguid, devid)); 4779 } else if (*(uint64_t *)ns->ns_eui64 != 0) { 4780 return (ddi_devid_init(devinfo, DEVID_NVME_EUI64, 4781 sizeof (ns->ns_eui64), ns->ns_eui64, devid)); 4782 } else { 4783 return (ddi_devid_init(devinfo, DEVID_NVME_NSID, 4784 strlen(ns->ns_devid), ns->ns_devid, devid)); 4785 } 4786 } 4787 4788 static int 4789 nvme_bd_free_space(void *arg, bd_xfer_t *xfer) 4790 { 4791 nvme_namespace_t *ns = arg; 4792 4793 if (xfer->x_dfl == NULL) 4794 return (EINVAL); 4795 4796 if (!ns->ns_nvme->n_idctl->id_oncs.on_dset_mgmt) 4797 return (ENOTSUP); 4798 4799 return (nvme_bd_cmd(ns, xfer, NVME_OPC_NVM_DSET_MGMT)); 4800 } 4801 4802 static int 4803 nvme_open(dev_t *devp, int flag, int otyp, cred_t *cred_p) 4804 { 4805 #ifndef __lock_lint 4806 _NOTE(ARGUNUSED(cred_p)); 4807 #endif 4808 minor_t minor = getminor(*devp); 4809 nvme_t *nvme = ddi_get_soft_state(nvme_state, NVME_MINOR_INST(minor)); 4810 int nsid = NVME_MINOR_NSID(minor); 4811 nvme_minor_state_t *nm; 4812 int rv = 0; 4813 4814 if (otyp != OTYP_CHR) 4815 return (EINVAL); 4816 4817 if (nvme == NULL) 4818 return (ENXIO); 4819 4820 if (nsid > nvme->n_namespace_count) 4821 return (ENXIO); 4822 4823 if (nvme->n_dead) 4824 return (EIO); 4825 4826 mutex_enter(&nvme->n_minor_mutex); 4827 4828 /* 4829 * First check the devctl node and error out if it's been opened 4830 * exclusively already by any other thread. 4831 */ 4832 if (nvme->n_minor.nm_oexcl != NULL && 4833 nvme->n_minor.nm_oexcl != curthread) { 4834 rv = EBUSY; 4835 goto out; 4836 } 4837 4838 nm = nsid == 0 ? &nvme->n_minor : &(NVME_NSID2NS(nvme, nsid)->ns_minor); 4839 4840 if (flag & FEXCL) { 4841 if (nm->nm_oexcl != NULL || nm->nm_open) { 4842 rv = EBUSY; 4843 goto out; 4844 } 4845 4846 /* 4847 * If at least one namespace is already open, fail the 4848 * exclusive open of the devctl node. 4849 */ 4850 if (nsid == 0) { 4851 for (int i = 1; i <= nvme->n_namespace_count; i++) { 4852 if (NVME_NSID2NS(nvme, i)->ns_minor.nm_open) { 4853 rv = EBUSY; 4854 goto out; 4855 } 4856 } 4857 } 4858 4859 nm->nm_oexcl = curthread; 4860 } 4861 4862 nm->nm_open = B_TRUE; 4863 4864 out: 4865 mutex_exit(&nvme->n_minor_mutex); 4866 return (rv); 4867 4868 } 4869 4870 static int 4871 nvme_close(dev_t dev, int flag, int otyp, cred_t *cred_p) 4872 { 4873 #ifndef __lock_lint 4874 _NOTE(ARGUNUSED(cred_p)); 4875 _NOTE(ARGUNUSED(flag)); 4876 #endif 4877 minor_t minor = getminor(dev); 4878 nvme_t *nvme = ddi_get_soft_state(nvme_state, NVME_MINOR_INST(minor)); 4879 int nsid = NVME_MINOR_NSID(minor); 4880 nvme_minor_state_t *nm; 4881 4882 if (otyp != OTYP_CHR) 4883 return (ENXIO); 4884 4885 if (nvme == NULL) 4886 return (ENXIO); 4887 4888 if (nsid > nvme->n_namespace_count) 4889 return (ENXIO); 4890 4891 nm = nsid == 0 ? &nvme->n_minor : &(NVME_NSID2NS(nvme, nsid)->ns_minor); 4892 4893 mutex_enter(&nvme->n_minor_mutex); 4894 if (nm->nm_oexcl != NULL) { 4895 ASSERT(nm->nm_oexcl == curthread); 4896 nm->nm_oexcl = NULL; 4897 } 4898 4899 ASSERT(nm->nm_open); 4900 nm->nm_open = B_FALSE; 4901 mutex_exit(&nvme->n_minor_mutex); 4902 4903 return (0); 4904 } 4905 4906 static int 4907 nvme_ioctl_identify(nvme_t *nvme, int nsid, nvme_ioctl_t *nioc, int mode, 4908 cred_t *cred_p) 4909 { 4910 _NOTE(ARGUNUSED(cred_p)); 4911 int rv = 0; 4912 void *idctl; 4913 4914 if ((mode & FREAD) == 0) 4915 return (EPERM); 4916 4917 if (nioc->n_len < NVME_IDENTIFY_BUFSIZE) 4918 return (EINVAL); 4919 4920 switch (nioc->n_arg) { 4921 case NVME_IDENTIFY_NSID: 4922 /* 4923 * If we support namespace management, set the nsid to -1 to 4924 * retrieve the common namespace capabilities. Otherwise 4925 * have a best guess by returning identify data for namespace 1. 4926 */ 4927 if (nsid == 0) 4928 nsid = nvme->n_idctl->id_oacs.oa_nsmgmt == 1 ? -1 : 1; 4929 break; 4930 4931 case NVME_IDENTIFY_CTRL: 4932 /* 4933 * Let NVME_IDENTIFY_CTRL work the same on devctl and attachment 4934 * point nodes. 4935 */ 4936 nsid = 0; 4937 break; 4938 4939 case NVME_IDENTIFY_NSID_LIST: 4940 if (!NVME_VERSION_ATLEAST(&nvme->n_version, 1, 1)) 4941 return (ENOTSUP); 4942 4943 /* 4944 * For now, always try to get the list of active NSIDs starting 4945 * at the first namespace. This will have to be revisited should 4946 * the need arise to support more than 1024 namespaces. 4947 */ 4948 nsid = 0; 4949 break; 4950 4951 case NVME_IDENTIFY_NSID_DESC: 4952 if (!NVME_VERSION_ATLEAST(&nvme->n_version, 1, 3)) 4953 return (ENOTSUP); 4954 break; 4955 4956 case NVME_IDENTIFY_NSID_ALLOC: 4957 if (!NVME_VERSION_ATLEAST(&nvme->n_version, 1, 2) || 4958 (nvme->n_idctl->id_oacs.oa_nsmgmt == 0)) 4959 return (ENOTSUP); 4960 4961 /* 4962 * To make this work on a devctl node, make this return the 4963 * identify data for namespace 1. We assume that any NVMe 4964 * device supports at least one namespace, which has ID 1. 4965 */ 4966 if (nsid == 0) 4967 nsid = 1; 4968 break; 4969 4970 case NVME_IDENTIFY_NSID_ALLOC_LIST: 4971 if (!NVME_VERSION_ATLEAST(&nvme->n_version, 1, 2) || 4972 (nvme->n_idctl->id_oacs.oa_nsmgmt == 0)) 4973 return (ENOTSUP); 4974 4975 /* 4976 * For now, always try to get the list of allocated NSIDs 4977 * starting at the first namespace. This will have to be 4978 * revisited should the need arise to support more than 1024 4979 * namespaces. 4980 */ 4981 nsid = 0; 4982 break; 4983 4984 case NVME_IDENTIFY_NSID_CTRL_LIST: 4985 if (!NVME_VERSION_ATLEAST(&nvme->n_version, 1, 2) || 4986 (nvme->n_idctl->id_oacs.oa_nsmgmt == 0)) 4987 return (ENOTSUP); 4988 4989 if (nsid == 0) 4990 return (EINVAL); 4991 break; 4992 4993 case NVME_IDENTIFY_CTRL_LIST: 4994 if (!NVME_VERSION_ATLEAST(&nvme->n_version, 1, 2) || 4995 (nvme->n_idctl->id_oacs.oa_nsmgmt == 0)) 4996 return (ENOTSUP); 4997 4998 if (nsid != 0) 4999 return (EINVAL); 5000 break; 5001 5002 default: 5003 return (EINVAL); 5004 } 5005 5006 if ((rv = nvme_identify(nvme, B_TRUE, nsid, nioc->n_arg & 0xff, 5007 (void **)&idctl)) != 0) 5008 return (rv); 5009 5010 if (ddi_copyout(idctl, (void *)nioc->n_buf, NVME_IDENTIFY_BUFSIZE, mode) 5011 != 0) 5012 rv = EFAULT; 5013 5014 kmem_free(idctl, NVME_IDENTIFY_BUFSIZE); 5015 5016 return (rv); 5017 } 5018 5019 /* 5020 * Execute commands on behalf of the various ioctls. 5021 */ 5022 static int 5023 nvme_ioc_cmd(nvme_t *nvme, nvme_sqe_t *sqe, boolean_t is_admin, void *data_addr, 5024 uint32_t data_len, int rwk, nvme_cqe_t *cqe, uint_t timeout) 5025 { 5026 nvme_cmd_t *cmd; 5027 nvme_qpair_t *ioq; 5028 int rv = 0; 5029 5030 cmd = nvme_alloc_cmd(nvme, KM_SLEEP); 5031 if (is_admin) { 5032 cmd->nc_sqid = 0; 5033 ioq = nvme->n_adminq; 5034 } else { 5035 cmd->nc_sqid = (CPU->cpu_id % nvme->n_ioq_count) + 1; 5036 ASSERT(cmd->nc_sqid <= nvme->n_ioq_count); 5037 ioq = nvme->n_ioq[cmd->nc_sqid]; 5038 } 5039 5040 /* 5041 * This function is used to facilitate requests from 5042 * userspace, so don't panic if the command fails. This 5043 * is especially true for admin passthru commands, where 5044 * the actual command data structure is entirely defined 5045 * by userspace. 5046 */ 5047 cmd->nc_dontpanic = B_TRUE; 5048 5049 cmd->nc_callback = nvme_wakeup_cmd; 5050 cmd->nc_sqe = *sqe; 5051 5052 if ((rwk & (FREAD | FWRITE)) != 0) { 5053 if (data_addr == NULL) { 5054 rv = EINVAL; 5055 goto free_cmd; 5056 } 5057 5058 if (nvme_zalloc_dma(nvme, data_len, DDI_DMA_READ, 5059 &nvme->n_prp_dma_attr, &cmd->nc_dma) != DDI_SUCCESS) { 5060 dev_err(nvme->n_dip, CE_WARN, 5061 "!nvme_zalloc_dma failed for nvme_ioc_cmd()"); 5062 5063 rv = ENOMEM; 5064 goto free_cmd; 5065 } 5066 5067 if ((rv = nvme_fill_prp(cmd, cmd->nc_dma->nd_dmah)) != 0) 5068 goto free_cmd; 5069 5070 if ((rwk & FWRITE) != 0) { 5071 if (ddi_copyin(data_addr, cmd->nc_dma->nd_memp, 5072 data_len, rwk & FKIOCTL) != 0) { 5073 rv = EFAULT; 5074 goto free_cmd; 5075 } 5076 } 5077 } 5078 5079 if (is_admin) { 5080 nvme_admin_cmd(cmd, timeout); 5081 } else { 5082 mutex_enter(&cmd->nc_mutex); 5083 5084 rv = nvme_submit_io_cmd(ioq, cmd); 5085 5086 if (rv == EAGAIN) { 5087 mutex_exit(&cmd->nc_mutex); 5088 dev_err(cmd->nc_nvme->n_dip, CE_WARN, 5089 "!nvme_ioc_cmd() failed, I/O Q full"); 5090 goto free_cmd; 5091 } 5092 5093 nvme_wait_cmd(cmd, timeout); 5094 5095 mutex_exit(&cmd->nc_mutex); 5096 } 5097 5098 if (cqe != NULL) 5099 *cqe = cmd->nc_cqe; 5100 5101 if ((rv = nvme_check_cmd_status(cmd)) != 0) { 5102 dev_err(nvme->n_dip, CE_WARN, 5103 "!nvme_ioc_cmd() failed with sct = %x, sc = %x", 5104 cmd->nc_cqe.cqe_sf.sf_sct, cmd->nc_cqe.cqe_sf.sf_sc); 5105 5106 goto free_cmd; 5107 } 5108 5109 if ((rwk & FREAD) != 0) { 5110 if (ddi_copyout(cmd->nc_dma->nd_memp, 5111 data_addr, data_len, rwk & FKIOCTL) != 0) 5112 rv = EFAULT; 5113 } 5114 5115 free_cmd: 5116 nvme_free_cmd(cmd); 5117 5118 return (rv); 5119 } 5120 5121 static int 5122 nvme_ioctl_capabilities(nvme_t *nvme, int nsid, nvme_ioctl_t *nioc, 5123 int mode, cred_t *cred_p) 5124 { 5125 _NOTE(ARGUNUSED(nsid, cred_p)); 5126 int rv = 0; 5127 nvme_reg_cap_t cap = { 0 }; 5128 nvme_capabilities_t nc; 5129 5130 if ((mode & FREAD) == 0) 5131 return (EPERM); 5132 5133 if (nioc->n_len < sizeof (nc)) 5134 return (EINVAL); 5135 5136 cap.r = nvme_get64(nvme, NVME_REG_CAP); 5137 5138 /* 5139 * The MPSMIN and MPSMAX fields in the CAP register use 0 to 5140 * specify the base page size of 4k (1<<12), so add 12 here to 5141 * get the real page size value. 5142 */ 5143 nc.mpsmax = 1 << (12 + cap.b.cap_mpsmax); 5144 nc.mpsmin = 1 << (12 + cap.b.cap_mpsmin); 5145 5146 if (ddi_copyout(&nc, (void *)nioc->n_buf, sizeof (nc), mode) != 0) 5147 rv = EFAULT; 5148 5149 return (rv); 5150 } 5151 5152 static int 5153 nvme_ioctl_get_logpage(nvme_t *nvme, int nsid, nvme_ioctl_t *nioc, 5154 int mode, cred_t *cred_p) 5155 { 5156 _NOTE(ARGUNUSED(cred_p)); 5157 void *log = NULL; 5158 size_t bufsize = 0; 5159 int rv = 0; 5160 5161 if ((mode & FREAD) == 0) 5162 return (EPERM); 5163 5164 if (nsid > 0 && !NVME_NSID2NS(nvme, nsid)->ns_active) 5165 return (EINVAL); 5166 5167 switch (nioc->n_arg) { 5168 case NVME_LOGPAGE_ERROR: 5169 if (nsid != 0) 5170 return (EINVAL); 5171 break; 5172 case NVME_LOGPAGE_HEALTH: 5173 if (nsid != 0 && nvme->n_idctl->id_lpa.lp_smart == 0) 5174 return (EINVAL); 5175 5176 if (nsid == 0) 5177 nsid = (uint32_t)-1; 5178 5179 break; 5180 case NVME_LOGPAGE_FWSLOT: 5181 if (nsid != 0) 5182 return (EINVAL); 5183 break; 5184 default: 5185 if (!NVME_IS_VENDOR_SPECIFIC_LOGPAGE(nioc->n_arg)) 5186 return (EINVAL); 5187 if (nioc->n_len > NVME_VENDOR_SPECIFIC_LOGPAGE_MAX_SIZE) { 5188 dev_err(nvme->n_dip, CE_NOTE, "!Vendor-specific log " 5189 "page size exceeds device maximum supported size: " 5190 "%lu", NVME_VENDOR_SPECIFIC_LOGPAGE_MAX_SIZE); 5191 return (EINVAL); 5192 } 5193 if (nioc->n_len == 0) 5194 return (EINVAL); 5195 bufsize = nioc->n_len; 5196 if (nsid == 0) 5197 nsid = (uint32_t)-1; 5198 } 5199 5200 if (nvme_get_logpage(nvme, B_TRUE, &log, &bufsize, nioc->n_arg, nsid) 5201 != DDI_SUCCESS) 5202 return (EIO); 5203 5204 if (nioc->n_len < bufsize) { 5205 kmem_free(log, bufsize); 5206 return (EINVAL); 5207 } 5208 5209 if (ddi_copyout(log, (void *)nioc->n_buf, bufsize, mode) != 0) 5210 rv = EFAULT; 5211 5212 nioc->n_len = bufsize; 5213 kmem_free(log, bufsize); 5214 5215 return (rv); 5216 } 5217 5218 static int 5219 nvme_ioctl_get_features(nvme_t *nvme, int nsid, nvme_ioctl_t *nioc, 5220 int mode, cred_t *cred_p) 5221 { 5222 _NOTE(ARGUNUSED(cred_p)); 5223 void *buf = NULL; 5224 size_t bufsize = 0; 5225 uint32_t res = 0; 5226 uint8_t feature; 5227 int rv = 0; 5228 5229 if ((mode & FREAD) == 0) 5230 return (EPERM); 5231 5232 if (nsid > 0 && !NVME_NSID2NS(nvme, nsid)->ns_active) 5233 return (EINVAL); 5234 5235 if ((nioc->n_arg >> 32) > 0xff) 5236 return (EINVAL); 5237 5238 feature = (uint8_t)(nioc->n_arg >> 32); 5239 5240 switch (feature) { 5241 case NVME_FEAT_ARBITRATION: 5242 case NVME_FEAT_POWER_MGMT: 5243 case NVME_FEAT_ERROR: 5244 case NVME_FEAT_NQUEUES: 5245 case NVME_FEAT_INTR_COAL: 5246 case NVME_FEAT_WRITE_ATOM: 5247 case NVME_FEAT_ASYNC_EVENT: 5248 case NVME_FEAT_PROGRESS: 5249 if (nsid != 0) 5250 return (EINVAL); 5251 break; 5252 5253 case NVME_FEAT_TEMPERATURE: 5254 if (nsid != 0) 5255 return (EINVAL); 5256 res = nioc->n_arg & 0xffffffffUL; 5257 if (NVME_VERSION_ATLEAST(&nvme->n_version, 1, 2)) { 5258 nvme_temp_threshold_t tt; 5259 5260 tt.r = res; 5261 if (tt.b.tt_thsel != NVME_TEMP_THRESH_OVER && 5262 tt.b.tt_thsel != NVME_TEMP_THRESH_UNDER) { 5263 return (EINVAL); 5264 } 5265 5266 if (tt.b.tt_tmpsel > NVME_TEMP_THRESH_MAX_SENSOR) { 5267 return (EINVAL); 5268 } 5269 } else if (res != 0) { 5270 return (ENOTSUP); 5271 } 5272 break; 5273 5274 case NVME_FEAT_INTR_VECT: 5275 if (nsid != 0) 5276 return (EINVAL); 5277 5278 res = nioc->n_arg & 0xffffffffUL; 5279 if (res >= nvme->n_intr_cnt) 5280 return (EINVAL); 5281 break; 5282 5283 case NVME_FEAT_LBA_RANGE: 5284 if (nvme->n_lba_range_supported == B_FALSE) 5285 return (EINVAL); 5286 5287 if (nsid == 0 || 5288 nsid > nvme->n_namespace_count) 5289 return (EINVAL); 5290 5291 break; 5292 5293 case NVME_FEAT_WRITE_CACHE: 5294 if (nsid != 0) 5295 return (EINVAL); 5296 5297 if (!nvme->n_write_cache_present) 5298 return (EINVAL); 5299 5300 break; 5301 5302 case NVME_FEAT_AUTO_PST: 5303 if (nsid != 0) 5304 return (EINVAL); 5305 5306 if (!nvme->n_auto_pst_supported) 5307 return (EINVAL); 5308 5309 break; 5310 5311 default: 5312 return (EINVAL); 5313 } 5314 5315 rv = nvme_get_features(nvme, B_TRUE, nsid, feature, &res, &buf, 5316 &bufsize); 5317 if (rv != 0) 5318 return (rv); 5319 5320 if (nioc->n_len < bufsize) { 5321 kmem_free(buf, bufsize); 5322 return (EINVAL); 5323 } 5324 5325 if (buf && ddi_copyout(buf, (void*)nioc->n_buf, bufsize, mode) != 0) 5326 rv = EFAULT; 5327 5328 kmem_free(buf, bufsize); 5329 nioc->n_arg = res; 5330 nioc->n_len = bufsize; 5331 5332 return (rv); 5333 } 5334 5335 static int 5336 nvme_ioctl_intr_cnt(nvme_t *nvme, int nsid, nvme_ioctl_t *nioc, int mode, 5337 cred_t *cred_p) 5338 { 5339 _NOTE(ARGUNUSED(nsid, mode, cred_p)); 5340 5341 if ((mode & FREAD) == 0) 5342 return (EPERM); 5343 5344 nioc->n_arg = nvme->n_intr_cnt; 5345 return (0); 5346 } 5347 5348 static int 5349 nvme_ioctl_version(nvme_t *nvme, int nsid, nvme_ioctl_t *nioc, int mode, 5350 cred_t *cred_p) 5351 { 5352 _NOTE(ARGUNUSED(nsid, cred_p)); 5353 int rv = 0; 5354 5355 if ((mode & FREAD) == 0) 5356 return (EPERM); 5357 5358 if (nioc->n_len < sizeof (nvme->n_version)) 5359 return (ENOMEM); 5360 5361 if (ddi_copyout(&nvme->n_version, (void *)nioc->n_buf, 5362 sizeof (nvme->n_version), mode) != 0) 5363 rv = EFAULT; 5364 5365 return (rv); 5366 } 5367 5368 static int 5369 nvme_ioctl_format(nvme_t *nvme, int nsid, nvme_ioctl_t *nioc, int mode, 5370 cred_t *cred_p) 5371 { 5372 _NOTE(ARGUNUSED(mode)); 5373 nvme_format_nvm_t frmt = { 0 }; 5374 int c_nsid = nsid != 0 ? nsid : 1; 5375 nvme_identify_nsid_t *idns; 5376 nvme_minor_state_t *nm; 5377 5378 if ((mode & FWRITE) == 0 || secpolicy_sys_config(cred_p, B_FALSE) != 0) 5379 return (EPERM); 5380 5381 nm = nsid == 0 ? &nvme->n_minor : &(NVME_NSID2NS(nvme, nsid)->ns_minor); 5382 if (nm->nm_oexcl != curthread) 5383 return (EACCES); 5384 5385 if (nsid != 0) { 5386 if (NVME_NSID2NS(nvme, nsid)->ns_attached) 5387 return (EBUSY); 5388 else if (!NVME_NSID2NS(nvme, nsid)->ns_active) 5389 return (EINVAL); 5390 } 5391 5392 frmt.r = nioc->n_arg & 0xffffffff; 5393 5394 /* 5395 * Check whether the FORMAT NVM command is supported. 5396 */ 5397 if (nvme->n_idctl->id_oacs.oa_format == 0) 5398 return (ENOTSUP); 5399 5400 /* 5401 * Don't allow format or secure erase of individual namespace if that 5402 * would cause a format or secure erase of all namespaces. 5403 */ 5404 if (nsid != 0 && nvme->n_idctl->id_fna.fn_format != 0) 5405 return (EINVAL); 5406 5407 if (nsid != 0 && frmt.b.fm_ses != NVME_FRMT_SES_NONE && 5408 nvme->n_idctl->id_fna.fn_sec_erase != 0) 5409 return (EINVAL); 5410 5411 /* 5412 * Don't allow formatting with Protection Information. 5413 */ 5414 if (frmt.b.fm_pi != 0 || frmt.b.fm_pil != 0 || frmt.b.fm_ms != 0) 5415 return (EINVAL); 5416 5417 /* 5418 * Don't allow formatting using an illegal LBA format, or any LBA format 5419 * that uses metadata. 5420 */ 5421 idns = NVME_NSID2NS(nvme, c_nsid)->ns_idns; 5422 if (frmt.b.fm_lbaf > idns->id_nlbaf || 5423 idns->id_lbaf[frmt.b.fm_lbaf].lbaf_ms != 0) 5424 return (EINVAL); 5425 5426 /* 5427 * Don't allow formatting using an illegal Secure Erase setting. 5428 */ 5429 if (frmt.b.fm_ses > NVME_FRMT_MAX_SES || 5430 (frmt.b.fm_ses == NVME_FRMT_SES_CRYPTO && 5431 nvme->n_idctl->id_fna.fn_crypt_erase == 0)) 5432 return (EINVAL); 5433 5434 if (nsid == 0) 5435 nsid = (uint32_t)-1; 5436 5437 return (nvme_format_nvm(nvme, B_TRUE, nsid, frmt.b.fm_lbaf, B_FALSE, 0, 5438 B_FALSE, frmt.b.fm_ses)); 5439 } 5440 5441 static int 5442 nvme_ioctl_detach(nvme_t *nvme, int nsid, nvme_ioctl_t *nioc, int mode, 5443 cred_t *cred_p) 5444 { 5445 _NOTE(ARGUNUSED(nioc, mode)); 5446 int rv; 5447 5448 if ((mode & FWRITE) == 0 || secpolicy_sys_config(cred_p, B_FALSE) != 0) 5449 return (EPERM); 5450 5451 if (nsid == 0) 5452 return (EINVAL); 5453 5454 if (NVME_NSID2NS(nvme, nsid)->ns_minor.nm_oexcl != curthread) 5455 return (EACCES); 5456 5457 mutex_enter(&nvme->n_mgmt_mutex); 5458 5459 rv = nvme_detach_ns(nvme, nsid); 5460 5461 mutex_exit(&nvme->n_mgmt_mutex); 5462 5463 return (rv); 5464 } 5465 5466 static int 5467 nvme_ioctl_attach(nvme_t *nvme, int nsid, nvme_ioctl_t *nioc, int mode, 5468 cred_t *cred_p) 5469 { 5470 _NOTE(ARGUNUSED(nioc, mode)); 5471 int rv; 5472 5473 if ((mode & FWRITE) == 0 || secpolicy_sys_config(cred_p, B_FALSE) != 0) 5474 return (EPERM); 5475 5476 if (nsid == 0) 5477 return (EINVAL); 5478 5479 if (NVME_NSID2NS(nvme, nsid)->ns_minor.nm_oexcl != curthread) 5480 return (EACCES); 5481 5482 mutex_enter(&nvme->n_mgmt_mutex); 5483 5484 if (nvme_init_ns(nvme, nsid) != DDI_SUCCESS) { 5485 mutex_exit(&nvme->n_mgmt_mutex); 5486 return (EIO); 5487 } 5488 5489 rv = nvme_attach_ns(nvme, nsid); 5490 5491 mutex_exit(&nvme->n_mgmt_mutex); 5492 return (rv); 5493 } 5494 5495 static void 5496 nvme_ufm_update(nvme_t *nvme) 5497 { 5498 mutex_enter(&nvme->n_fwslot_mutex); 5499 ddi_ufm_update(nvme->n_ufmh); 5500 if (nvme->n_fwslot != NULL) { 5501 kmem_free(nvme->n_fwslot, sizeof (nvme_fwslot_log_t)); 5502 nvme->n_fwslot = NULL; 5503 } 5504 mutex_exit(&nvme->n_fwslot_mutex); 5505 } 5506 5507 /* 5508 * Download new firmware to the device's internal staging area. We do not call 5509 * nvme_ufm_update() here because after a firmware download, there has been no 5510 * change to any of the actual persistent firmware data. That requires a 5511 * subsequent ioctl (NVME_IOC_FIRMWARE_COMMIT) to commit the firmware to a slot 5512 * or to activate a slot. 5513 */ 5514 static int 5515 nvme_ioctl_firmware_download(nvme_t *nvme, int nsid, nvme_ioctl_t *nioc, 5516 int mode, cred_t *cred_p) 5517 { 5518 int rv = 0; 5519 size_t len, copylen; 5520 offset_t offset; 5521 uintptr_t buf; 5522 nvme_cqe_t cqe = { 0 }; 5523 nvme_sqe_t sqe = { 5524 .sqe_opc = NVME_OPC_FW_IMAGE_LOAD 5525 }; 5526 5527 if ((mode & FWRITE) == 0 || secpolicy_sys_config(cred_p, B_FALSE) != 0) 5528 return (EPERM); 5529 5530 if (nvme->n_idctl->id_oacs.oa_firmware == 0) 5531 return (ENOTSUP); 5532 5533 if (nsid != 0) 5534 return (EINVAL); 5535 5536 /* 5537 * The offset (in n_len) is restricted to the number of DWORDs in 5538 * 32 bits. 5539 */ 5540 if (nioc->n_len > NVME_FW_OFFSETB_MAX) 5541 return (EINVAL); 5542 5543 /* Confirm that both offset and length are a multiple of DWORD bytes */ 5544 if ((nioc->n_len & NVME_DWORD_MASK) != 0 || 5545 (nioc->n_arg & NVME_DWORD_MASK) != 0) 5546 return (EINVAL); 5547 5548 len = nioc->n_len; 5549 offset = nioc->n_arg; 5550 buf = (uintptr_t)nioc->n_buf; 5551 5552 nioc->n_arg = 0; 5553 5554 while (len > 0 && rv == 0) { 5555 /* 5556 * nvme_ioc_cmd() does not use SGLs or PRP lists. 5557 * It is limited to 2 PRPs per NVM command, so limit 5558 * the size of the data to 2 pages. 5559 */ 5560 copylen = MIN(2 * nvme->n_pagesize, len); 5561 5562 sqe.sqe_cdw10 = (uint32_t)(copylen >> NVME_DWORD_SHIFT) - 1; 5563 sqe.sqe_cdw11 = (uint32_t)(offset >> NVME_DWORD_SHIFT); 5564 5565 rv = nvme_ioc_cmd(nvme, &sqe, B_TRUE, (void *)buf, copylen, 5566 FWRITE, &cqe, nvme_admin_cmd_timeout); 5567 5568 /* 5569 * Regardless of whether the command succeeded or not, whether 5570 * there's an errno in rv to be returned, we'll return any 5571 * command-specific status code in n_arg. 5572 * 5573 * As n_arg isn't cleared in all other possible code paths 5574 * returning an error, we return the status code as a negative 5575 * value so it can be distinguished easily from whatever value 5576 * was passed in n_arg originally. This of course only works as 5577 * long as arguments passed in n_arg are less than INT64_MAX, 5578 * which they currently are. 5579 */ 5580 if (cqe.cqe_sf.sf_sct == NVME_CQE_SCT_SPECIFIC) 5581 nioc->n_arg = (uint64_t)-cqe.cqe_sf.sf_sc; 5582 5583 buf += copylen; 5584 offset += copylen; 5585 len -= copylen; 5586 } 5587 5588 return (rv); 5589 } 5590 5591 static int 5592 nvme_ioctl_firmware_commit(nvme_t *nvme, int nsid, nvme_ioctl_t *nioc, 5593 int mode, cred_t *cred_p) 5594 { 5595 nvme_firmware_commit_dw10_t fc_dw10 = { 0 }; 5596 uint32_t slot = nioc->n_arg & 0xffffffff; 5597 uint32_t action = nioc->n_arg >> 32; 5598 nvme_cqe_t cqe = { 0 }; 5599 nvme_sqe_t sqe = { 5600 .sqe_opc = NVME_OPC_FW_ACTIVATE 5601 }; 5602 int timeout; 5603 int rv; 5604 5605 if ((mode & FWRITE) == 0 || secpolicy_sys_config(cred_p, B_FALSE) != 0) 5606 return (EPERM); 5607 5608 if (nvme->n_idctl->id_oacs.oa_firmware == 0) 5609 return (ENOTSUP); 5610 5611 if (nsid != 0) 5612 return (EINVAL); 5613 5614 /* Validate slot is in range. */ 5615 if (slot < NVME_FW_SLOT_MIN || slot > NVME_FW_SLOT_MAX) 5616 return (EINVAL); 5617 5618 switch (action) { 5619 case NVME_FWC_SAVE: 5620 case NVME_FWC_SAVE_ACTIVATE: 5621 if (slot == 1 && nvme->n_idctl->id_frmw.fw_readonly) 5622 return (EROFS); 5623 break; 5624 case NVME_FWC_ACTIVATE: 5625 case NVME_FWC_ACTIVATE_IMMED: 5626 break; 5627 default: 5628 return (EINVAL); 5629 } 5630 5631 /* 5632 * Use the extended timeout for all firmware operations here as we've 5633 * seen examples in the field where an activate may take longer than the 5634 * 1s period that we've asked for. 5635 */ 5636 timeout = nvme_commit_save_cmd_timeout; 5637 5638 fc_dw10.b.fc_slot = slot; 5639 fc_dw10.b.fc_action = action; 5640 sqe.sqe_cdw10 = fc_dw10.r; 5641 5642 nioc->n_arg = 0; 5643 rv = nvme_ioc_cmd(nvme, &sqe, B_TRUE, NULL, 0, 0, &cqe, timeout); 5644 5645 /* 5646 * Regardless of whether the command succeeded or not, whether 5647 * there's an errno in rv to be returned, we'll return any 5648 * command-specific status code in n_arg. 5649 * 5650 * As n_arg isn't cleared in all other possible code paths 5651 * returning an error, we return the status code as a negative 5652 * value so it can be distinguished easily from whatever value 5653 * was passed in n_arg originally. This of course only works as 5654 * long as arguments passed in n_arg are less than INT64_MAX, 5655 * which they currently are. 5656 */ 5657 if (cqe.cqe_sf.sf_sct == NVME_CQE_SCT_SPECIFIC) 5658 nioc->n_arg = (uint64_t)-cqe.cqe_sf.sf_sc; 5659 5660 /* 5661 * Let the DDI UFM subsystem know that the firmware information for 5662 * this device has changed. We perform this unconditionally as an 5663 * invalidation doesn't particularly hurt us. 5664 */ 5665 nvme_ufm_update(nvme); 5666 5667 return (rv); 5668 } 5669 5670 /* 5671 * Helper to copy in a passthru command from userspace, handling 5672 * different data models. 5673 */ 5674 static int 5675 nvme_passthru_copy_cmd_in(const void *buf, nvme_passthru_cmd_t *cmd, int mode) 5676 { 5677 #ifdef _MULTI_DATAMODEL 5678 switch (ddi_model_convert_from(mode & FMODELS)) { 5679 case DDI_MODEL_ILP32: { 5680 nvme_passthru_cmd32_t cmd32; 5681 if (ddi_copyin(buf, (void*)&cmd32, sizeof (cmd32), mode) != 0) 5682 return (-1); 5683 cmd->npc_opcode = cmd32.npc_opcode; 5684 cmd->npc_timeout = cmd32.npc_timeout; 5685 cmd->npc_flags = cmd32.npc_flags; 5686 cmd->npc_cdw12 = cmd32.npc_cdw12; 5687 cmd->npc_cdw13 = cmd32.npc_cdw13; 5688 cmd->npc_cdw14 = cmd32.npc_cdw14; 5689 cmd->npc_cdw15 = cmd32.npc_cdw15; 5690 cmd->npc_buflen = cmd32.npc_buflen; 5691 cmd->npc_buf = cmd32.npc_buf; 5692 break; 5693 } 5694 case DDI_MODEL_NONE: 5695 #endif 5696 if (ddi_copyin(buf, (void*)cmd, sizeof (nvme_passthru_cmd_t), 5697 mode) != 0) 5698 return (-1); 5699 #ifdef _MULTI_DATAMODEL 5700 break; 5701 } 5702 #endif 5703 return (0); 5704 } 5705 5706 /* 5707 * Helper to copy out a passthru command result to userspace, handling 5708 * different data models. 5709 */ 5710 static int 5711 nvme_passthru_copy_cmd_out(const nvme_passthru_cmd_t *cmd, void *buf, int mode) 5712 { 5713 #ifdef _MULTI_DATAMODEL 5714 switch (ddi_model_convert_from(mode & FMODELS)) { 5715 case DDI_MODEL_ILP32: { 5716 nvme_passthru_cmd32_t cmd32; 5717 bzero(&cmd32, sizeof (cmd32)); 5718 cmd32.npc_opcode = cmd->npc_opcode; 5719 cmd32.npc_status = cmd->npc_status; 5720 cmd32.npc_err = cmd->npc_err; 5721 cmd32.npc_timeout = cmd->npc_timeout; 5722 cmd32.npc_flags = cmd->npc_flags; 5723 cmd32.npc_cdw0 = cmd->npc_cdw0; 5724 cmd32.npc_cdw12 = cmd->npc_cdw12; 5725 cmd32.npc_cdw13 = cmd->npc_cdw13; 5726 cmd32.npc_cdw14 = cmd->npc_cdw14; 5727 cmd32.npc_cdw15 = cmd->npc_cdw15; 5728 cmd32.npc_buflen = (size32_t)cmd->npc_buflen; 5729 cmd32.npc_buf = (uintptr32_t)cmd->npc_buf; 5730 if (ddi_copyout(&cmd32, buf, sizeof (cmd32), mode) != 0) 5731 return (-1); 5732 break; 5733 } 5734 case DDI_MODEL_NONE: 5735 #endif 5736 if (ddi_copyout(cmd, buf, sizeof (nvme_passthru_cmd_t), 5737 mode) != 0) 5738 return (-1); 5739 #ifdef _MULTI_DATAMODEL 5740 break; 5741 } 5742 #endif 5743 return (0); 5744 } 5745 5746 /* 5747 * Run an arbitrary vendor-specific admin command on the device. 5748 */ 5749 static int 5750 nvme_ioctl_passthru(nvme_t *nvme, int nsid, nvme_ioctl_t *nioc, int mode, 5751 cred_t *cred_p) 5752 { 5753 int rv = 0; 5754 uint_t timeout = 0; 5755 int rwk = 0; 5756 nvme_passthru_cmd_t cmd; 5757 size_t expected_passthru_size = 0; 5758 nvme_sqe_t sqe; 5759 nvme_cqe_t cqe; 5760 5761 bzero(&cmd, sizeof (cmd)); 5762 bzero(&sqe, sizeof (sqe)); 5763 bzero(&cqe, sizeof (cqe)); 5764 5765 /* 5766 * Basic checks: permissions, data model, argument size. 5767 */ 5768 if ((mode & FWRITE) == 0 || secpolicy_sys_config(cred_p, B_FALSE) != 0) 5769 return (EPERM); 5770 5771 /* 5772 * Compute the expected size of the argument buffer 5773 */ 5774 #ifdef _MULTI_DATAMODEL 5775 switch (ddi_model_convert_from(mode & FMODELS)) { 5776 case DDI_MODEL_ILP32: 5777 expected_passthru_size = sizeof (nvme_passthru_cmd32_t); 5778 break; 5779 case DDI_MODEL_NONE: 5780 #endif 5781 expected_passthru_size = sizeof (nvme_passthru_cmd_t); 5782 #ifdef _MULTI_DATAMODEL 5783 break; 5784 } 5785 #endif 5786 5787 if (nioc->n_len != expected_passthru_size) { 5788 cmd.npc_err = NVME_PASSTHRU_ERR_CMD_SIZE; 5789 rv = EINVAL; 5790 goto out; 5791 } 5792 5793 /* 5794 * Ensure the device supports the standard vendor specific 5795 * admin command format. 5796 */ 5797 if (!nvme->n_idctl->id_nvscc.nv_spec) { 5798 cmd.npc_err = NVME_PASSTHRU_ERR_NOT_SUPPORTED; 5799 rv = ENOTSUP; 5800 goto out; 5801 } 5802 5803 if (nvme_passthru_copy_cmd_in((const void*)nioc->n_buf, &cmd, mode)) 5804 return (EFAULT); 5805 5806 if (!NVME_IS_VENDOR_SPECIFIC_CMD(cmd.npc_opcode)) { 5807 cmd.npc_err = NVME_PASSTHRU_ERR_INVALID_OPCODE; 5808 rv = EINVAL; 5809 goto out; 5810 } 5811 5812 /* 5813 * This restriction is not mandated by the spec, so future work 5814 * could relax this if it's necessary to support commands that both 5815 * read and write. 5816 */ 5817 if ((cmd.npc_flags & NVME_PASSTHRU_READ) != 0 && 5818 (cmd.npc_flags & NVME_PASSTHRU_WRITE) != 0) { 5819 cmd.npc_err = NVME_PASSTHRU_ERR_READ_AND_WRITE; 5820 rv = EINVAL; 5821 goto out; 5822 } 5823 if (cmd.npc_timeout > nvme_vendor_specific_admin_cmd_max_timeout) { 5824 cmd.npc_err = NVME_PASSTHRU_ERR_INVALID_TIMEOUT; 5825 rv = EINVAL; 5826 goto out; 5827 } 5828 timeout = cmd.npc_timeout; 5829 5830 /* 5831 * Passed-thru command buffer verification: 5832 * - Size is multiple of DWords 5833 * - Non-null iff the length is non-zero 5834 * - Null if neither reading nor writing data. 5835 * - Non-null if reading or writing. 5836 * - Maximum buffer size. 5837 */ 5838 if ((cmd.npc_buflen % sizeof (uint32_t)) != 0) { 5839 cmd.npc_err = NVME_PASSTHRU_ERR_INVALID_BUFFER; 5840 rv = EINVAL; 5841 goto out; 5842 } 5843 if (((void*)cmd.npc_buf != NULL && cmd.npc_buflen == 0) || 5844 ((void*)cmd.npc_buf == NULL && cmd.npc_buflen != 0)) { 5845 cmd.npc_err = NVME_PASSTHRU_ERR_INVALID_BUFFER; 5846 rv = EINVAL; 5847 goto out; 5848 } 5849 if (cmd.npc_flags == 0 && (void*)cmd.npc_buf != NULL) { 5850 cmd.npc_err = NVME_PASSTHRU_ERR_INVALID_BUFFER; 5851 rv = EINVAL; 5852 goto out; 5853 } 5854 if ((cmd.npc_flags != 0) && ((void*)cmd.npc_buf == NULL)) { 5855 cmd.npc_err = NVME_PASSTHRU_ERR_INVALID_BUFFER; 5856 rv = EINVAL; 5857 goto out; 5858 } 5859 if (cmd.npc_buflen > nvme_vendor_specific_admin_cmd_size) { 5860 cmd.npc_err = NVME_PASSTHRU_ERR_INVALID_BUFFER; 5861 rv = EINVAL; 5862 goto out; 5863 } 5864 if ((cmd.npc_buflen >> NVME_DWORD_SHIFT) > UINT32_MAX) { 5865 cmd.npc_err = NVME_PASSTHRU_ERR_INVALID_BUFFER; 5866 rv = EINVAL; 5867 goto out; 5868 } 5869 5870 sqe.sqe_opc = cmd.npc_opcode; 5871 sqe.sqe_nsid = nsid; 5872 sqe.sqe_cdw10 = (uint32_t)(cmd.npc_buflen >> NVME_DWORD_SHIFT); 5873 sqe.sqe_cdw12 = cmd.npc_cdw12; 5874 sqe.sqe_cdw13 = cmd.npc_cdw13; 5875 sqe.sqe_cdw14 = cmd.npc_cdw14; 5876 sqe.sqe_cdw15 = cmd.npc_cdw15; 5877 if ((cmd.npc_flags & NVME_PASSTHRU_READ) != 0) 5878 rwk = FREAD; 5879 else if ((cmd.npc_flags & NVME_PASSTHRU_WRITE) != 0) 5880 rwk = FWRITE; 5881 5882 rv = nvme_ioc_cmd(nvme, &sqe, B_TRUE, (void*)cmd.npc_buf, 5883 cmd.npc_buflen, rwk, &cqe, timeout); 5884 cmd.npc_status = cqe.cqe_sf.sf_sc; 5885 cmd.npc_cdw0 = cqe.cqe_dw0; 5886 5887 out: 5888 if (nvme_passthru_copy_cmd_out(&cmd, (void*)nioc->n_buf, mode)) 5889 rv = EFAULT; 5890 return (rv); 5891 } 5892 5893 static int 5894 nvme_ioctl_ns_info(nvme_t *nvme, int nsid, nvme_ioctl_t *nioc, int mode, 5895 cred_t *cred_p) 5896 { 5897 _NOTE(ARGUNUSED(cred_p)); 5898 nvme_namespace_t *ns; 5899 nvme_ns_info_t *info; 5900 int ret; 5901 5902 if ((mode & FREAD) == 0) 5903 return (EPERM); 5904 5905 if (nioc->n_len < sizeof (nvme_ns_info_t)) 5906 return (EINVAL); 5907 5908 /* 5909 * If we have the controller open (as indicated by nsid set to zero) 5910 * then we will allow the caller to specify a namespace id in n_arg. 5911 */ 5912 if (nsid == 0) { 5913 if (nioc->n_arg == 0 || nioc->n_arg > nvme->n_namespace_count) 5914 return (EINVAL); 5915 nsid = (int)nioc->n_arg; 5916 } else if (nioc->n_arg != 0) { 5917 return (EINVAL); 5918 } 5919 5920 ASSERT3S(nsid, >, 0); 5921 ns = NVME_NSID2NS(nvme, nsid); 5922 5923 info = kmem_zalloc(sizeof (nvme_ns_info_t), KM_NOSLEEP_LAZY); 5924 if (info == NULL) 5925 return (ENOMEM); 5926 5927 mutex_enter(&nvme->n_mgmt_mutex); 5928 5929 if (ns->ns_allocated) 5930 info->nni_state |= NVME_NS_STATE_ALLOCATED; 5931 5932 if (ns->ns_active) 5933 info->nni_state |= NVME_NS_STATE_ACTIVE; 5934 5935 if (ns->ns_ignore) 5936 info->nni_state |= NVME_NS_STATE_IGNORED; 5937 5938 if (ns->ns_attached) { 5939 const char *addr; 5940 5941 info->nni_state |= NVME_NS_STATE_ATTACHED; 5942 addr = bd_address(ns->ns_bd_hdl); 5943 if (strlcpy(info->nni_addr, addr, sizeof (info->nni_addr)) >= 5944 sizeof (info->nni_addr)) { 5945 mutex_exit(&nvme->n_mgmt_mutex); 5946 ret = EOVERFLOW; 5947 goto done; 5948 } 5949 } 5950 5951 bcopy(ns->ns_idns, &info->nni_id, sizeof (nvme_identify_nsid_t)); 5952 mutex_exit(&nvme->n_mgmt_mutex); 5953 5954 if (ddi_copyout(info, (void *)nioc->n_buf, sizeof (nvme_ns_info_t), 5955 mode & FKIOCTL) != 0) { 5956 ret = EFAULT; 5957 } else { 5958 ret = 0; 5959 } 5960 5961 done: 5962 kmem_free(info, sizeof (nvme_ns_info_t)); 5963 return (ret); 5964 } 5965 5966 static int 5967 nvme_ioctl(dev_t dev, int cmd, intptr_t arg, int mode, cred_t *cred_p, 5968 int *rval_p) 5969 { 5970 #ifndef __lock_lint 5971 _NOTE(ARGUNUSED(rval_p)); 5972 #endif 5973 minor_t minor = getminor(dev); 5974 nvme_t *nvme = ddi_get_soft_state(nvme_state, NVME_MINOR_INST(minor)); 5975 int nsid = NVME_MINOR_NSID(minor); 5976 int rv = 0; 5977 nvme_ioctl_t nioc; 5978 5979 int (*nvme_ioctl[])(nvme_t *, int, nvme_ioctl_t *, int, cred_t *) = { 5980 NULL, 5981 nvme_ioctl_identify, 5982 NULL, 5983 nvme_ioctl_capabilities, 5984 nvme_ioctl_get_logpage, 5985 nvme_ioctl_get_features, 5986 nvme_ioctl_intr_cnt, 5987 nvme_ioctl_version, 5988 nvme_ioctl_format, 5989 nvme_ioctl_detach, 5990 nvme_ioctl_attach, 5991 nvme_ioctl_firmware_download, 5992 nvme_ioctl_firmware_commit, 5993 nvme_ioctl_passthru, 5994 nvme_ioctl_ns_info 5995 }; 5996 5997 if (nvme == NULL) 5998 return (ENXIO); 5999 6000 if (nsid > nvme->n_namespace_count) 6001 return (ENXIO); 6002 6003 if (IS_DEVCTL(cmd)) 6004 return (ndi_devctl_ioctl(nvme->n_dip, cmd, arg, mode, 0)); 6005 6006 #ifdef _MULTI_DATAMODEL 6007 switch (ddi_model_convert_from(mode & FMODELS)) { 6008 case DDI_MODEL_ILP32: { 6009 nvme_ioctl32_t nioc32; 6010 if (ddi_copyin((void*)arg, &nioc32, sizeof (nvme_ioctl32_t), 6011 mode) != 0) 6012 return (EFAULT); 6013 nioc.n_len = nioc32.n_len; 6014 nioc.n_buf = nioc32.n_buf; 6015 nioc.n_arg = nioc32.n_arg; 6016 break; 6017 } 6018 case DDI_MODEL_NONE: 6019 #endif 6020 if (ddi_copyin((void*)arg, &nioc, sizeof (nvme_ioctl_t), mode) 6021 != 0) 6022 return (EFAULT); 6023 #ifdef _MULTI_DATAMODEL 6024 break; 6025 } 6026 #endif 6027 6028 if (nvme->n_dead && cmd != NVME_IOC_DETACH) 6029 return (EIO); 6030 6031 if (IS_NVME_IOC(cmd) && nvme_ioctl[NVME_IOC_CMD(cmd)] != NULL) 6032 rv = nvme_ioctl[NVME_IOC_CMD(cmd)](nvme, nsid, &nioc, mode, 6033 cred_p); 6034 else 6035 rv = EINVAL; 6036 6037 #ifdef _MULTI_DATAMODEL 6038 switch (ddi_model_convert_from(mode & FMODELS)) { 6039 case DDI_MODEL_ILP32: { 6040 nvme_ioctl32_t nioc32; 6041 6042 nioc32.n_len = (size32_t)nioc.n_len; 6043 nioc32.n_buf = (uintptr32_t)nioc.n_buf; 6044 nioc32.n_arg = nioc.n_arg; 6045 6046 if (ddi_copyout(&nioc32, (void *)arg, sizeof (nvme_ioctl32_t), 6047 mode) != 0) 6048 return (EFAULT); 6049 break; 6050 } 6051 case DDI_MODEL_NONE: 6052 #endif 6053 if (ddi_copyout(&nioc, (void *)arg, sizeof (nvme_ioctl_t), mode) 6054 != 0) 6055 return (EFAULT); 6056 #ifdef _MULTI_DATAMODEL 6057 break; 6058 } 6059 #endif 6060 6061 return (rv); 6062 } 6063 6064 /* 6065 * DDI UFM Callbacks 6066 */ 6067 static int 6068 nvme_ufm_fill_image(ddi_ufm_handle_t *ufmh, void *arg, uint_t imgno, 6069 ddi_ufm_image_t *img) 6070 { 6071 nvme_t *nvme = arg; 6072 6073 if (imgno != 0) 6074 return (EINVAL); 6075 6076 ddi_ufm_image_set_desc(img, "Firmware"); 6077 ddi_ufm_image_set_nslots(img, nvme->n_idctl->id_frmw.fw_nslot); 6078 6079 return (0); 6080 } 6081 6082 /* 6083 * Fill out firmware slot information for the requested slot. The firmware 6084 * slot information is gathered by requesting the Firmware Slot Information log 6085 * page. The format of the page is described in section 5.10.1.3. 6086 * 6087 * We lazily cache the log page on the first call and then invalidate the cache 6088 * data after a successful firmware download or firmware commit command. 6089 * The cached data is protected by a mutex as the state can change 6090 * asynchronous to this callback. 6091 */ 6092 static int 6093 nvme_ufm_fill_slot(ddi_ufm_handle_t *ufmh, void *arg, uint_t imgno, 6094 uint_t slotno, ddi_ufm_slot_t *slot) 6095 { 6096 nvme_t *nvme = arg; 6097 void *log = NULL; 6098 size_t bufsize; 6099 ddi_ufm_attr_t attr = 0; 6100 char fw_ver[NVME_FWVER_SZ + 1]; 6101 int ret; 6102 6103 if (imgno > 0 || slotno > (nvme->n_idctl->id_frmw.fw_nslot - 1)) 6104 return (EINVAL); 6105 6106 mutex_enter(&nvme->n_fwslot_mutex); 6107 if (nvme->n_fwslot == NULL) { 6108 ret = nvme_get_logpage(nvme, B_TRUE, &log, &bufsize, 6109 NVME_LOGPAGE_FWSLOT, 0); 6110 if (ret != DDI_SUCCESS || 6111 bufsize != sizeof (nvme_fwslot_log_t)) { 6112 if (log != NULL) 6113 kmem_free(log, bufsize); 6114 mutex_exit(&nvme->n_fwslot_mutex); 6115 return (EIO); 6116 } 6117 nvme->n_fwslot = (nvme_fwslot_log_t *)log; 6118 } 6119 6120 /* 6121 * NVMe numbers firmware slots starting at 1 6122 */ 6123 if (slotno == (nvme->n_fwslot->fw_afi - 1)) 6124 attr |= DDI_UFM_ATTR_ACTIVE; 6125 6126 if (slotno != 0 || nvme->n_idctl->id_frmw.fw_readonly == 0) 6127 attr |= DDI_UFM_ATTR_WRITEABLE; 6128 6129 if (nvme->n_fwslot->fw_frs[slotno][0] == '\0') { 6130 attr |= DDI_UFM_ATTR_EMPTY; 6131 } else { 6132 (void) strncpy(fw_ver, nvme->n_fwslot->fw_frs[slotno], 6133 NVME_FWVER_SZ); 6134 fw_ver[NVME_FWVER_SZ] = '\0'; 6135 ddi_ufm_slot_set_version(slot, fw_ver); 6136 } 6137 mutex_exit(&nvme->n_fwslot_mutex); 6138 6139 ddi_ufm_slot_set_attrs(slot, attr); 6140 6141 return (0); 6142 } 6143 6144 static int 6145 nvme_ufm_getcaps(ddi_ufm_handle_t *ufmh, void *arg, ddi_ufm_cap_t *caps) 6146 { 6147 *caps = DDI_UFM_CAP_REPORT; 6148 return (0); 6149 } 6150