1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (c) 2016 Avago Technologies. All rights reserved. 4 */ 5 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 6 #include <linux/module.h> 7 #include <linux/parser.h> 8 #include <uapi/scsi/fc/fc_fs.h> 9 #include <uapi/scsi/fc/fc_els.h> 10 #include <linux/delay.h> 11 #include <linux/overflow.h> 12 13 #include "nvme.h" 14 #include "fabrics.h" 15 #include <linux/nvme-fc-driver.h> 16 #include <linux/nvme-fc.h> 17 #include "fc.h" 18 #include <scsi/scsi_transport_fc.h> 19 20 /* *************************** Data Structures/Defines ****************** */ 21 22 23 enum nvme_fc_queue_flags { 24 NVME_FC_Q_CONNECTED = 0, 25 NVME_FC_Q_LIVE, 26 }; 27 28 #define NVME_FC_DEFAULT_DEV_LOSS_TMO 60 /* seconds */ 29 30 struct nvme_fc_queue { 31 struct nvme_fc_ctrl *ctrl; 32 struct device *dev; 33 struct blk_mq_hw_ctx *hctx; 34 void *lldd_handle; 35 size_t cmnd_capsule_len; 36 u32 qnum; 37 u32 rqcnt; 38 u32 seqno; 39 40 u64 connection_id; 41 atomic_t csn; 42 43 unsigned long flags; 44 } __aligned(sizeof(u64)); /* alignment for other things alloc'd with */ 45 46 enum nvme_fcop_flags { 47 FCOP_FLAGS_TERMIO = (1 << 0), 48 FCOP_FLAGS_AEN = (1 << 1), 49 }; 50 51 struct nvmefc_ls_req_op { 52 struct nvmefc_ls_req ls_req; 53 54 struct nvme_fc_rport *rport; 55 struct nvme_fc_queue *queue; 56 struct request *rq; 57 u32 flags; 58 59 int ls_error; 60 struct completion ls_done; 61 struct list_head lsreq_list; /* rport->ls_req_list */ 62 bool req_queued; 63 }; 64 65 struct nvmefc_ls_rcv_op { 66 struct nvme_fc_rport *rport; 67 struct nvmefc_ls_rsp *lsrsp; 68 union nvmefc_ls_requests *rqstbuf; 69 union nvmefc_ls_responses *rspbuf; 70 u16 rqstdatalen; 71 bool handled; 72 dma_addr_t rspdma; 73 struct list_head lsrcv_list; /* rport->ls_rcv_list */ 74 } __aligned(sizeof(u64)); /* alignment for other things alloc'd with */ 75 76 enum nvme_fcpop_state { 77 FCPOP_STATE_UNINIT = 0, 78 FCPOP_STATE_IDLE = 1, 79 FCPOP_STATE_ACTIVE = 2, 80 FCPOP_STATE_ABORTED = 3, 81 FCPOP_STATE_COMPLETE = 4, 82 }; 83 84 struct nvme_fc_fcp_op { 85 struct nvme_request nreq; /* 86 * nvme/host/core.c 87 * requires this to be 88 * the 1st element in the 89 * private structure 90 * associated with the 91 * request. 92 */ 93 struct nvmefc_fcp_req fcp_req; 94 95 struct nvme_fc_ctrl *ctrl; 96 struct nvme_fc_queue *queue; 97 struct request *rq; 98 99 atomic_t state; 100 u32 flags; 101 u32 rqno; 102 u32 nents; 103 104 struct nvme_fc_cmd_iu cmd_iu; 105 struct nvme_fc_ersp_iu rsp_iu; 106 }; 107 108 struct nvme_fcp_op_w_sgl { 109 struct nvme_fc_fcp_op op; 110 struct scatterlist sgl[NVME_INLINE_SG_CNT]; 111 uint8_t priv[]; 112 }; 113 114 struct nvme_fc_lport { 115 struct nvme_fc_local_port localport; 116 117 struct ida endp_cnt; 118 struct list_head port_list; /* nvme_fc_port_list */ 119 struct list_head endp_list; 120 struct device *dev; /* physical device for dma */ 121 struct nvme_fc_port_template *ops; 122 struct kref ref; 123 atomic_t act_rport_cnt; 124 } __aligned(sizeof(u64)); /* alignment for other things alloc'd with */ 125 126 struct nvme_fc_rport { 127 struct nvme_fc_remote_port remoteport; 128 129 struct list_head endp_list; /* for lport->endp_list */ 130 struct list_head ctrl_list; 131 struct list_head ls_req_list; 132 struct list_head ls_rcv_list; 133 struct list_head disc_list; 134 struct device *dev; /* physical device for dma */ 135 struct nvme_fc_lport *lport; 136 spinlock_t lock; 137 struct kref ref; 138 atomic_t act_ctrl_cnt; 139 unsigned long dev_loss_end; 140 struct work_struct lsrcv_work; 141 } __aligned(sizeof(u64)); /* alignment for other things alloc'd with */ 142 143 /* fc_ctrl flags values - specified as bit positions */ 144 #define ASSOC_ACTIVE 0 145 #define FCCTRL_TERMIO 1 146 147 struct nvme_fc_ctrl { 148 spinlock_t lock; 149 struct nvme_fc_queue *queues; 150 struct device *dev; 151 struct nvme_fc_lport *lport; 152 struct nvme_fc_rport *rport; 153 u32 cnum; 154 155 bool ioq_live; 156 atomic_t err_work_active; 157 u64 association_id; 158 struct nvmefc_ls_rcv_op *rcv_disconn; 159 160 struct list_head ctrl_list; /* rport->ctrl_list */ 161 162 struct blk_mq_tag_set admin_tag_set; 163 struct blk_mq_tag_set tag_set; 164 165 struct delayed_work connect_work; 166 struct work_struct err_work; 167 168 struct kref ref; 169 unsigned long flags; 170 u32 iocnt; 171 wait_queue_head_t ioabort_wait; 172 173 struct nvme_fc_fcp_op aen_ops[NVME_NR_AEN_COMMANDS]; 174 175 struct nvme_ctrl ctrl; 176 }; 177 178 static inline struct nvme_fc_ctrl * 179 to_fc_ctrl(struct nvme_ctrl *ctrl) 180 { 181 return container_of(ctrl, struct nvme_fc_ctrl, ctrl); 182 } 183 184 static inline struct nvme_fc_lport * 185 localport_to_lport(struct nvme_fc_local_port *portptr) 186 { 187 return container_of(portptr, struct nvme_fc_lport, localport); 188 } 189 190 static inline struct nvme_fc_rport * 191 remoteport_to_rport(struct nvme_fc_remote_port *portptr) 192 { 193 return container_of(portptr, struct nvme_fc_rport, remoteport); 194 } 195 196 static inline struct nvmefc_ls_req_op * 197 ls_req_to_lsop(struct nvmefc_ls_req *lsreq) 198 { 199 return container_of(lsreq, struct nvmefc_ls_req_op, ls_req); 200 } 201 202 static inline struct nvme_fc_fcp_op * 203 fcp_req_to_fcp_op(struct nvmefc_fcp_req *fcpreq) 204 { 205 return container_of(fcpreq, struct nvme_fc_fcp_op, fcp_req); 206 } 207 208 209 210 /* *************************** Globals **************************** */ 211 212 213 static DEFINE_SPINLOCK(nvme_fc_lock); 214 215 static LIST_HEAD(nvme_fc_lport_list); 216 static DEFINE_IDA(nvme_fc_local_port_cnt); 217 static DEFINE_IDA(nvme_fc_ctrl_cnt); 218 219 static struct workqueue_struct *nvme_fc_wq; 220 221 static bool nvme_fc_waiting_to_unload; 222 static DECLARE_COMPLETION(nvme_fc_unload_proceed); 223 224 /* 225 * These items are short-term. They will eventually be moved into 226 * a generic FC class. See comments in module init. 227 */ 228 static struct device *fc_udev_device; 229 230 static void nvme_fc_complete_rq(struct request *rq); 231 232 /* *********************** FC-NVME Port Management ************************ */ 233 234 static void __nvme_fc_delete_hw_queue(struct nvme_fc_ctrl *, 235 struct nvme_fc_queue *, unsigned int); 236 237 static void nvme_fc_handle_ls_rqst_work(struct work_struct *work); 238 239 240 static void 241 nvme_fc_free_lport(struct kref *ref) 242 { 243 struct nvme_fc_lport *lport = 244 container_of(ref, struct nvme_fc_lport, ref); 245 unsigned long flags; 246 247 WARN_ON(lport->localport.port_state != FC_OBJSTATE_DELETED); 248 WARN_ON(!list_empty(&lport->endp_list)); 249 250 /* remove from transport list */ 251 spin_lock_irqsave(&nvme_fc_lock, flags); 252 list_del(&lport->port_list); 253 if (nvme_fc_waiting_to_unload && list_empty(&nvme_fc_lport_list)) 254 complete(&nvme_fc_unload_proceed); 255 spin_unlock_irqrestore(&nvme_fc_lock, flags); 256 257 ida_simple_remove(&nvme_fc_local_port_cnt, lport->localport.port_num); 258 ida_destroy(&lport->endp_cnt); 259 260 put_device(lport->dev); 261 262 kfree(lport); 263 } 264 265 static void 266 nvme_fc_lport_put(struct nvme_fc_lport *lport) 267 { 268 kref_put(&lport->ref, nvme_fc_free_lport); 269 } 270 271 static int 272 nvme_fc_lport_get(struct nvme_fc_lport *lport) 273 { 274 return kref_get_unless_zero(&lport->ref); 275 } 276 277 278 static struct nvme_fc_lport * 279 nvme_fc_attach_to_unreg_lport(struct nvme_fc_port_info *pinfo, 280 struct nvme_fc_port_template *ops, 281 struct device *dev) 282 { 283 struct nvme_fc_lport *lport; 284 unsigned long flags; 285 286 spin_lock_irqsave(&nvme_fc_lock, flags); 287 288 list_for_each_entry(lport, &nvme_fc_lport_list, port_list) { 289 if (lport->localport.node_name != pinfo->node_name || 290 lport->localport.port_name != pinfo->port_name) 291 continue; 292 293 if (lport->dev != dev) { 294 lport = ERR_PTR(-EXDEV); 295 goto out_done; 296 } 297 298 if (lport->localport.port_state != FC_OBJSTATE_DELETED) { 299 lport = ERR_PTR(-EEXIST); 300 goto out_done; 301 } 302 303 if (!nvme_fc_lport_get(lport)) { 304 /* 305 * fails if ref cnt already 0. If so, 306 * act as if lport already deleted 307 */ 308 lport = NULL; 309 goto out_done; 310 } 311 312 /* resume the lport */ 313 314 lport->ops = ops; 315 lport->localport.port_role = pinfo->port_role; 316 lport->localport.port_id = pinfo->port_id; 317 lport->localport.port_state = FC_OBJSTATE_ONLINE; 318 319 spin_unlock_irqrestore(&nvme_fc_lock, flags); 320 321 return lport; 322 } 323 324 lport = NULL; 325 326 out_done: 327 spin_unlock_irqrestore(&nvme_fc_lock, flags); 328 329 return lport; 330 } 331 332 /** 333 * nvme_fc_register_localport - transport entry point called by an 334 * LLDD to register the existence of a NVME 335 * host FC port. 336 * @pinfo: pointer to information about the port to be registered 337 * @template: LLDD entrypoints and operational parameters for the port 338 * @dev: physical hardware device node port corresponds to. Will be 339 * used for DMA mappings 340 * @portptr: pointer to a local port pointer. Upon success, the routine 341 * will allocate a nvme_fc_local_port structure and place its 342 * address in the local port pointer. Upon failure, local port 343 * pointer will be set to 0. 344 * 345 * Returns: 346 * a completion status. Must be 0 upon success; a negative errno 347 * (ex: -ENXIO) upon failure. 348 */ 349 int 350 nvme_fc_register_localport(struct nvme_fc_port_info *pinfo, 351 struct nvme_fc_port_template *template, 352 struct device *dev, 353 struct nvme_fc_local_port **portptr) 354 { 355 struct nvme_fc_lport *newrec; 356 unsigned long flags; 357 int ret, idx; 358 359 if (!template->localport_delete || !template->remoteport_delete || 360 !template->ls_req || !template->fcp_io || 361 !template->ls_abort || !template->fcp_abort || 362 !template->max_hw_queues || !template->max_sgl_segments || 363 !template->max_dif_sgl_segments || !template->dma_boundary) { 364 ret = -EINVAL; 365 goto out_reghost_failed; 366 } 367 368 /* 369 * look to see if there is already a localport that had been 370 * deregistered and in the process of waiting for all the 371 * references to fully be removed. If the references haven't 372 * expired, we can simply re-enable the localport. Remoteports 373 * and controller reconnections should resume naturally. 374 */ 375 newrec = nvme_fc_attach_to_unreg_lport(pinfo, template, dev); 376 377 /* found an lport, but something about its state is bad */ 378 if (IS_ERR(newrec)) { 379 ret = PTR_ERR(newrec); 380 goto out_reghost_failed; 381 382 /* found existing lport, which was resumed */ 383 } else if (newrec) { 384 *portptr = &newrec->localport; 385 return 0; 386 } 387 388 /* nothing found - allocate a new localport struct */ 389 390 newrec = kmalloc((sizeof(*newrec) + template->local_priv_sz), 391 GFP_KERNEL); 392 if (!newrec) { 393 ret = -ENOMEM; 394 goto out_reghost_failed; 395 } 396 397 idx = ida_simple_get(&nvme_fc_local_port_cnt, 0, 0, GFP_KERNEL); 398 if (idx < 0) { 399 ret = -ENOSPC; 400 goto out_fail_kfree; 401 } 402 403 if (!get_device(dev) && dev) { 404 ret = -ENODEV; 405 goto out_ida_put; 406 } 407 408 INIT_LIST_HEAD(&newrec->port_list); 409 INIT_LIST_HEAD(&newrec->endp_list); 410 kref_init(&newrec->ref); 411 atomic_set(&newrec->act_rport_cnt, 0); 412 newrec->ops = template; 413 newrec->dev = dev; 414 ida_init(&newrec->endp_cnt); 415 if (template->local_priv_sz) 416 newrec->localport.private = &newrec[1]; 417 else 418 newrec->localport.private = NULL; 419 newrec->localport.node_name = pinfo->node_name; 420 newrec->localport.port_name = pinfo->port_name; 421 newrec->localport.port_role = pinfo->port_role; 422 newrec->localport.port_id = pinfo->port_id; 423 newrec->localport.port_state = FC_OBJSTATE_ONLINE; 424 newrec->localport.port_num = idx; 425 426 spin_lock_irqsave(&nvme_fc_lock, flags); 427 list_add_tail(&newrec->port_list, &nvme_fc_lport_list); 428 spin_unlock_irqrestore(&nvme_fc_lock, flags); 429 430 if (dev) 431 dma_set_seg_boundary(dev, template->dma_boundary); 432 433 *portptr = &newrec->localport; 434 return 0; 435 436 out_ida_put: 437 ida_simple_remove(&nvme_fc_local_port_cnt, idx); 438 out_fail_kfree: 439 kfree(newrec); 440 out_reghost_failed: 441 *portptr = NULL; 442 443 return ret; 444 } 445 EXPORT_SYMBOL_GPL(nvme_fc_register_localport); 446 447 /** 448 * nvme_fc_unregister_localport - transport entry point called by an 449 * LLDD to deregister/remove a previously 450 * registered a NVME host FC port. 451 * @portptr: pointer to the (registered) local port that is to be deregistered. 452 * 453 * Returns: 454 * a completion status. Must be 0 upon success; a negative errno 455 * (ex: -ENXIO) upon failure. 456 */ 457 int 458 nvme_fc_unregister_localport(struct nvme_fc_local_port *portptr) 459 { 460 struct nvme_fc_lport *lport = localport_to_lport(portptr); 461 unsigned long flags; 462 463 if (!portptr) 464 return -EINVAL; 465 466 spin_lock_irqsave(&nvme_fc_lock, flags); 467 468 if (portptr->port_state != FC_OBJSTATE_ONLINE) { 469 spin_unlock_irqrestore(&nvme_fc_lock, flags); 470 return -EINVAL; 471 } 472 portptr->port_state = FC_OBJSTATE_DELETED; 473 474 spin_unlock_irqrestore(&nvme_fc_lock, flags); 475 476 if (atomic_read(&lport->act_rport_cnt) == 0) 477 lport->ops->localport_delete(&lport->localport); 478 479 nvme_fc_lport_put(lport); 480 481 return 0; 482 } 483 EXPORT_SYMBOL_GPL(nvme_fc_unregister_localport); 484 485 /* 486 * TRADDR strings, per FC-NVME are fixed format: 487 * "nn-0x<16hexdigits>:pn-0x<16hexdigits>" - 43 characters 488 * udev event will only differ by prefix of what field is 489 * being specified: 490 * "NVMEFC_HOST_TRADDR=" or "NVMEFC_TRADDR=" - 19 max characters 491 * 19 + 43 + null_fudge = 64 characters 492 */ 493 #define FCNVME_TRADDR_LENGTH 64 494 495 static void 496 nvme_fc_signal_discovery_scan(struct nvme_fc_lport *lport, 497 struct nvme_fc_rport *rport) 498 { 499 char hostaddr[FCNVME_TRADDR_LENGTH]; /* NVMEFC_HOST_TRADDR=...*/ 500 char tgtaddr[FCNVME_TRADDR_LENGTH]; /* NVMEFC_TRADDR=...*/ 501 char *envp[4] = { "FC_EVENT=nvmediscovery", hostaddr, tgtaddr, NULL }; 502 503 if (!(rport->remoteport.port_role & FC_PORT_ROLE_NVME_DISCOVERY)) 504 return; 505 506 snprintf(hostaddr, sizeof(hostaddr), 507 "NVMEFC_HOST_TRADDR=nn-0x%016llx:pn-0x%016llx", 508 lport->localport.node_name, lport->localport.port_name); 509 snprintf(tgtaddr, sizeof(tgtaddr), 510 "NVMEFC_TRADDR=nn-0x%016llx:pn-0x%016llx", 511 rport->remoteport.node_name, rport->remoteport.port_name); 512 kobject_uevent_env(&fc_udev_device->kobj, KOBJ_CHANGE, envp); 513 } 514 515 static void 516 nvme_fc_free_rport(struct kref *ref) 517 { 518 struct nvme_fc_rport *rport = 519 container_of(ref, struct nvme_fc_rport, ref); 520 struct nvme_fc_lport *lport = 521 localport_to_lport(rport->remoteport.localport); 522 unsigned long flags; 523 524 WARN_ON(rport->remoteport.port_state != FC_OBJSTATE_DELETED); 525 WARN_ON(!list_empty(&rport->ctrl_list)); 526 527 /* remove from lport list */ 528 spin_lock_irqsave(&nvme_fc_lock, flags); 529 list_del(&rport->endp_list); 530 spin_unlock_irqrestore(&nvme_fc_lock, flags); 531 532 WARN_ON(!list_empty(&rport->disc_list)); 533 ida_simple_remove(&lport->endp_cnt, rport->remoteport.port_num); 534 535 kfree(rport); 536 537 nvme_fc_lport_put(lport); 538 } 539 540 static void 541 nvme_fc_rport_put(struct nvme_fc_rport *rport) 542 { 543 kref_put(&rport->ref, nvme_fc_free_rport); 544 } 545 546 static int 547 nvme_fc_rport_get(struct nvme_fc_rport *rport) 548 { 549 return kref_get_unless_zero(&rport->ref); 550 } 551 552 static void 553 nvme_fc_resume_controller(struct nvme_fc_ctrl *ctrl) 554 { 555 switch (ctrl->ctrl.state) { 556 case NVME_CTRL_NEW: 557 case NVME_CTRL_CONNECTING: 558 /* 559 * As all reconnects were suppressed, schedule a 560 * connect. 561 */ 562 dev_info(ctrl->ctrl.device, 563 "NVME-FC{%d}: connectivity re-established. " 564 "Attempting reconnect\n", ctrl->cnum); 565 566 queue_delayed_work(nvme_wq, &ctrl->connect_work, 0); 567 break; 568 569 case NVME_CTRL_RESETTING: 570 /* 571 * Controller is already in the process of terminating the 572 * association. No need to do anything further. The reconnect 573 * step will naturally occur after the reset completes. 574 */ 575 break; 576 577 default: 578 /* no action to take - let it delete */ 579 break; 580 } 581 } 582 583 static struct nvme_fc_rport * 584 nvme_fc_attach_to_suspended_rport(struct nvme_fc_lport *lport, 585 struct nvme_fc_port_info *pinfo) 586 { 587 struct nvme_fc_rport *rport; 588 struct nvme_fc_ctrl *ctrl; 589 unsigned long flags; 590 591 spin_lock_irqsave(&nvme_fc_lock, flags); 592 593 list_for_each_entry(rport, &lport->endp_list, endp_list) { 594 if (rport->remoteport.node_name != pinfo->node_name || 595 rport->remoteport.port_name != pinfo->port_name) 596 continue; 597 598 if (!nvme_fc_rport_get(rport)) { 599 rport = ERR_PTR(-ENOLCK); 600 goto out_done; 601 } 602 603 spin_unlock_irqrestore(&nvme_fc_lock, flags); 604 605 spin_lock_irqsave(&rport->lock, flags); 606 607 /* has it been unregistered */ 608 if (rport->remoteport.port_state != FC_OBJSTATE_DELETED) { 609 /* means lldd called us twice */ 610 spin_unlock_irqrestore(&rport->lock, flags); 611 nvme_fc_rport_put(rport); 612 return ERR_PTR(-ESTALE); 613 } 614 615 rport->remoteport.port_role = pinfo->port_role; 616 rport->remoteport.port_id = pinfo->port_id; 617 rport->remoteport.port_state = FC_OBJSTATE_ONLINE; 618 rport->dev_loss_end = 0; 619 620 /* 621 * kick off a reconnect attempt on all associations to the 622 * remote port. A successful reconnects will resume i/o. 623 */ 624 list_for_each_entry(ctrl, &rport->ctrl_list, ctrl_list) 625 nvme_fc_resume_controller(ctrl); 626 627 spin_unlock_irqrestore(&rport->lock, flags); 628 629 return rport; 630 } 631 632 rport = NULL; 633 634 out_done: 635 spin_unlock_irqrestore(&nvme_fc_lock, flags); 636 637 return rport; 638 } 639 640 static inline void 641 __nvme_fc_set_dev_loss_tmo(struct nvme_fc_rport *rport, 642 struct nvme_fc_port_info *pinfo) 643 { 644 if (pinfo->dev_loss_tmo) 645 rport->remoteport.dev_loss_tmo = pinfo->dev_loss_tmo; 646 else 647 rport->remoteport.dev_loss_tmo = NVME_FC_DEFAULT_DEV_LOSS_TMO; 648 } 649 650 /** 651 * nvme_fc_register_remoteport - transport entry point called by an 652 * LLDD to register the existence of a NVME 653 * subsystem FC port on its fabric. 654 * @localport: pointer to the (registered) local port that the remote 655 * subsystem port is connected to. 656 * @pinfo: pointer to information about the port to be registered 657 * @portptr: pointer to a remote port pointer. Upon success, the routine 658 * will allocate a nvme_fc_remote_port structure and place its 659 * address in the remote port pointer. Upon failure, remote port 660 * pointer will be set to 0. 661 * 662 * Returns: 663 * a completion status. Must be 0 upon success; a negative errno 664 * (ex: -ENXIO) upon failure. 665 */ 666 int 667 nvme_fc_register_remoteport(struct nvme_fc_local_port *localport, 668 struct nvme_fc_port_info *pinfo, 669 struct nvme_fc_remote_port **portptr) 670 { 671 struct nvme_fc_lport *lport = localport_to_lport(localport); 672 struct nvme_fc_rport *newrec; 673 unsigned long flags; 674 int ret, idx; 675 676 if (!nvme_fc_lport_get(lport)) { 677 ret = -ESHUTDOWN; 678 goto out_reghost_failed; 679 } 680 681 /* 682 * look to see if there is already a remoteport that is waiting 683 * for a reconnect (within dev_loss_tmo) with the same WWN's. 684 * If so, transition to it and reconnect. 685 */ 686 newrec = nvme_fc_attach_to_suspended_rport(lport, pinfo); 687 688 /* found an rport, but something about its state is bad */ 689 if (IS_ERR(newrec)) { 690 ret = PTR_ERR(newrec); 691 goto out_lport_put; 692 693 /* found existing rport, which was resumed */ 694 } else if (newrec) { 695 nvme_fc_lport_put(lport); 696 __nvme_fc_set_dev_loss_tmo(newrec, pinfo); 697 nvme_fc_signal_discovery_scan(lport, newrec); 698 *portptr = &newrec->remoteport; 699 return 0; 700 } 701 702 /* nothing found - allocate a new remoteport struct */ 703 704 newrec = kmalloc((sizeof(*newrec) + lport->ops->remote_priv_sz), 705 GFP_KERNEL); 706 if (!newrec) { 707 ret = -ENOMEM; 708 goto out_lport_put; 709 } 710 711 idx = ida_simple_get(&lport->endp_cnt, 0, 0, GFP_KERNEL); 712 if (idx < 0) { 713 ret = -ENOSPC; 714 goto out_kfree_rport; 715 } 716 717 INIT_LIST_HEAD(&newrec->endp_list); 718 INIT_LIST_HEAD(&newrec->ctrl_list); 719 INIT_LIST_HEAD(&newrec->ls_req_list); 720 INIT_LIST_HEAD(&newrec->disc_list); 721 kref_init(&newrec->ref); 722 atomic_set(&newrec->act_ctrl_cnt, 0); 723 spin_lock_init(&newrec->lock); 724 newrec->remoteport.localport = &lport->localport; 725 INIT_LIST_HEAD(&newrec->ls_rcv_list); 726 newrec->dev = lport->dev; 727 newrec->lport = lport; 728 if (lport->ops->remote_priv_sz) 729 newrec->remoteport.private = &newrec[1]; 730 else 731 newrec->remoteport.private = NULL; 732 newrec->remoteport.port_role = pinfo->port_role; 733 newrec->remoteport.node_name = pinfo->node_name; 734 newrec->remoteport.port_name = pinfo->port_name; 735 newrec->remoteport.port_id = pinfo->port_id; 736 newrec->remoteport.port_state = FC_OBJSTATE_ONLINE; 737 newrec->remoteport.port_num = idx; 738 __nvme_fc_set_dev_loss_tmo(newrec, pinfo); 739 INIT_WORK(&newrec->lsrcv_work, nvme_fc_handle_ls_rqst_work); 740 741 spin_lock_irqsave(&nvme_fc_lock, flags); 742 list_add_tail(&newrec->endp_list, &lport->endp_list); 743 spin_unlock_irqrestore(&nvme_fc_lock, flags); 744 745 nvme_fc_signal_discovery_scan(lport, newrec); 746 747 *portptr = &newrec->remoteport; 748 return 0; 749 750 out_kfree_rport: 751 kfree(newrec); 752 out_lport_put: 753 nvme_fc_lport_put(lport); 754 out_reghost_failed: 755 *portptr = NULL; 756 return ret; 757 } 758 EXPORT_SYMBOL_GPL(nvme_fc_register_remoteport); 759 760 static int 761 nvme_fc_abort_lsops(struct nvme_fc_rport *rport) 762 { 763 struct nvmefc_ls_req_op *lsop; 764 unsigned long flags; 765 766 restart: 767 spin_lock_irqsave(&rport->lock, flags); 768 769 list_for_each_entry(lsop, &rport->ls_req_list, lsreq_list) { 770 if (!(lsop->flags & FCOP_FLAGS_TERMIO)) { 771 lsop->flags |= FCOP_FLAGS_TERMIO; 772 spin_unlock_irqrestore(&rport->lock, flags); 773 rport->lport->ops->ls_abort(&rport->lport->localport, 774 &rport->remoteport, 775 &lsop->ls_req); 776 goto restart; 777 } 778 } 779 spin_unlock_irqrestore(&rport->lock, flags); 780 781 return 0; 782 } 783 784 static void 785 nvme_fc_ctrl_connectivity_loss(struct nvme_fc_ctrl *ctrl) 786 { 787 dev_info(ctrl->ctrl.device, 788 "NVME-FC{%d}: controller connectivity lost. Awaiting " 789 "Reconnect", ctrl->cnum); 790 791 switch (ctrl->ctrl.state) { 792 case NVME_CTRL_NEW: 793 case NVME_CTRL_LIVE: 794 /* 795 * Schedule a controller reset. The reset will terminate the 796 * association and schedule the reconnect timer. Reconnects 797 * will be attempted until either the ctlr_loss_tmo 798 * (max_retries * connect_delay) expires or the remoteport's 799 * dev_loss_tmo expires. 800 */ 801 if (nvme_reset_ctrl(&ctrl->ctrl)) { 802 dev_warn(ctrl->ctrl.device, 803 "NVME-FC{%d}: Couldn't schedule reset.\n", 804 ctrl->cnum); 805 nvme_delete_ctrl(&ctrl->ctrl); 806 } 807 break; 808 809 case NVME_CTRL_CONNECTING: 810 /* 811 * The association has already been terminated and the 812 * controller is attempting reconnects. No need to do anything 813 * futher. Reconnects will be attempted until either the 814 * ctlr_loss_tmo (max_retries * connect_delay) expires or the 815 * remoteport's dev_loss_tmo expires. 816 */ 817 break; 818 819 case NVME_CTRL_RESETTING: 820 /* 821 * Controller is already in the process of terminating the 822 * association. No need to do anything further. The reconnect 823 * step will kick in naturally after the association is 824 * terminated. 825 */ 826 break; 827 828 case NVME_CTRL_DELETING: 829 default: 830 /* no action to take - let it delete */ 831 break; 832 } 833 } 834 835 /** 836 * nvme_fc_unregister_remoteport - transport entry point called by an 837 * LLDD to deregister/remove a previously 838 * registered a NVME subsystem FC port. 839 * @portptr: pointer to the (registered) remote port that is to be 840 * deregistered. 841 * 842 * Returns: 843 * a completion status. Must be 0 upon success; a negative errno 844 * (ex: -ENXIO) upon failure. 845 */ 846 int 847 nvme_fc_unregister_remoteport(struct nvme_fc_remote_port *portptr) 848 { 849 struct nvme_fc_rport *rport = remoteport_to_rport(portptr); 850 struct nvme_fc_ctrl *ctrl; 851 unsigned long flags; 852 853 if (!portptr) 854 return -EINVAL; 855 856 spin_lock_irqsave(&rport->lock, flags); 857 858 if (portptr->port_state != FC_OBJSTATE_ONLINE) { 859 spin_unlock_irqrestore(&rport->lock, flags); 860 return -EINVAL; 861 } 862 portptr->port_state = FC_OBJSTATE_DELETED; 863 864 rport->dev_loss_end = jiffies + (portptr->dev_loss_tmo * HZ); 865 866 list_for_each_entry(ctrl, &rport->ctrl_list, ctrl_list) { 867 /* if dev_loss_tmo==0, dev loss is immediate */ 868 if (!portptr->dev_loss_tmo) { 869 dev_warn(ctrl->ctrl.device, 870 "NVME-FC{%d}: controller connectivity lost.\n", 871 ctrl->cnum); 872 nvme_delete_ctrl(&ctrl->ctrl); 873 } else 874 nvme_fc_ctrl_connectivity_loss(ctrl); 875 } 876 877 spin_unlock_irqrestore(&rport->lock, flags); 878 879 nvme_fc_abort_lsops(rport); 880 881 if (atomic_read(&rport->act_ctrl_cnt) == 0) 882 rport->lport->ops->remoteport_delete(portptr); 883 884 /* 885 * release the reference, which will allow, if all controllers 886 * go away, which should only occur after dev_loss_tmo occurs, 887 * for the rport to be torn down. 888 */ 889 nvme_fc_rport_put(rport); 890 891 return 0; 892 } 893 EXPORT_SYMBOL_GPL(nvme_fc_unregister_remoteport); 894 895 /** 896 * nvme_fc_rescan_remoteport - transport entry point called by an 897 * LLDD to request a nvme device rescan. 898 * @remoteport: pointer to the (registered) remote port that is to be 899 * rescanned. 900 * 901 * Returns: N/A 902 */ 903 void 904 nvme_fc_rescan_remoteport(struct nvme_fc_remote_port *remoteport) 905 { 906 struct nvme_fc_rport *rport = remoteport_to_rport(remoteport); 907 908 nvme_fc_signal_discovery_scan(rport->lport, rport); 909 } 910 EXPORT_SYMBOL_GPL(nvme_fc_rescan_remoteport); 911 912 int 913 nvme_fc_set_remoteport_devloss(struct nvme_fc_remote_port *portptr, 914 u32 dev_loss_tmo) 915 { 916 struct nvme_fc_rport *rport = remoteport_to_rport(portptr); 917 unsigned long flags; 918 919 spin_lock_irqsave(&rport->lock, flags); 920 921 if (portptr->port_state != FC_OBJSTATE_ONLINE) { 922 spin_unlock_irqrestore(&rport->lock, flags); 923 return -EINVAL; 924 } 925 926 /* a dev_loss_tmo of 0 (immediate) is allowed to be set */ 927 rport->remoteport.dev_loss_tmo = dev_loss_tmo; 928 929 spin_unlock_irqrestore(&rport->lock, flags); 930 931 return 0; 932 } 933 EXPORT_SYMBOL_GPL(nvme_fc_set_remoteport_devloss); 934 935 936 /* *********************** FC-NVME DMA Handling **************************** */ 937 938 /* 939 * The fcloop device passes in a NULL device pointer. Real LLD's will 940 * pass in a valid device pointer. If NULL is passed to the dma mapping 941 * routines, depending on the platform, it may or may not succeed, and 942 * may crash. 943 * 944 * As such: 945 * Wrapper all the dma routines and check the dev pointer. 946 * 947 * If simple mappings (return just a dma address, we'll noop them, 948 * returning a dma address of 0. 949 * 950 * On more complex mappings (dma_map_sg), a pseudo routine fills 951 * in the scatter list, setting all dma addresses to 0. 952 */ 953 954 static inline dma_addr_t 955 fc_dma_map_single(struct device *dev, void *ptr, size_t size, 956 enum dma_data_direction dir) 957 { 958 return dev ? dma_map_single(dev, ptr, size, dir) : (dma_addr_t)0L; 959 } 960 961 static inline int 962 fc_dma_mapping_error(struct device *dev, dma_addr_t dma_addr) 963 { 964 return dev ? dma_mapping_error(dev, dma_addr) : 0; 965 } 966 967 static inline void 968 fc_dma_unmap_single(struct device *dev, dma_addr_t addr, size_t size, 969 enum dma_data_direction dir) 970 { 971 if (dev) 972 dma_unmap_single(dev, addr, size, dir); 973 } 974 975 static inline void 976 fc_dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr, size_t size, 977 enum dma_data_direction dir) 978 { 979 if (dev) 980 dma_sync_single_for_cpu(dev, addr, size, dir); 981 } 982 983 static inline void 984 fc_dma_sync_single_for_device(struct device *dev, dma_addr_t addr, size_t size, 985 enum dma_data_direction dir) 986 { 987 if (dev) 988 dma_sync_single_for_device(dev, addr, size, dir); 989 } 990 991 /* pseudo dma_map_sg call */ 992 static int 993 fc_map_sg(struct scatterlist *sg, int nents) 994 { 995 struct scatterlist *s; 996 int i; 997 998 WARN_ON(nents == 0 || sg[0].length == 0); 999 1000 for_each_sg(sg, s, nents, i) { 1001 s->dma_address = 0L; 1002 #ifdef CONFIG_NEED_SG_DMA_LENGTH 1003 s->dma_length = s->length; 1004 #endif 1005 } 1006 return nents; 1007 } 1008 1009 static inline int 1010 fc_dma_map_sg(struct device *dev, struct scatterlist *sg, int nents, 1011 enum dma_data_direction dir) 1012 { 1013 return dev ? dma_map_sg(dev, sg, nents, dir) : fc_map_sg(sg, nents); 1014 } 1015 1016 static inline void 1017 fc_dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents, 1018 enum dma_data_direction dir) 1019 { 1020 if (dev) 1021 dma_unmap_sg(dev, sg, nents, dir); 1022 } 1023 1024 /* *********************** FC-NVME LS Handling **************************** */ 1025 1026 static void nvme_fc_ctrl_put(struct nvme_fc_ctrl *); 1027 static int nvme_fc_ctrl_get(struct nvme_fc_ctrl *); 1028 1029 static void nvme_fc_error_recovery(struct nvme_fc_ctrl *ctrl, char *errmsg); 1030 1031 static void 1032 __nvme_fc_finish_ls_req(struct nvmefc_ls_req_op *lsop) 1033 { 1034 struct nvme_fc_rport *rport = lsop->rport; 1035 struct nvmefc_ls_req *lsreq = &lsop->ls_req; 1036 unsigned long flags; 1037 1038 spin_lock_irqsave(&rport->lock, flags); 1039 1040 if (!lsop->req_queued) { 1041 spin_unlock_irqrestore(&rport->lock, flags); 1042 return; 1043 } 1044 1045 list_del(&lsop->lsreq_list); 1046 1047 lsop->req_queued = false; 1048 1049 spin_unlock_irqrestore(&rport->lock, flags); 1050 1051 fc_dma_unmap_single(rport->dev, lsreq->rqstdma, 1052 (lsreq->rqstlen + lsreq->rsplen), 1053 DMA_BIDIRECTIONAL); 1054 1055 nvme_fc_rport_put(rport); 1056 } 1057 1058 static int 1059 __nvme_fc_send_ls_req(struct nvme_fc_rport *rport, 1060 struct nvmefc_ls_req_op *lsop, 1061 void (*done)(struct nvmefc_ls_req *req, int status)) 1062 { 1063 struct nvmefc_ls_req *lsreq = &lsop->ls_req; 1064 unsigned long flags; 1065 int ret = 0; 1066 1067 if (rport->remoteport.port_state != FC_OBJSTATE_ONLINE) 1068 return -ECONNREFUSED; 1069 1070 if (!nvme_fc_rport_get(rport)) 1071 return -ESHUTDOWN; 1072 1073 lsreq->done = done; 1074 lsop->rport = rport; 1075 lsop->req_queued = false; 1076 INIT_LIST_HEAD(&lsop->lsreq_list); 1077 init_completion(&lsop->ls_done); 1078 1079 lsreq->rqstdma = fc_dma_map_single(rport->dev, lsreq->rqstaddr, 1080 lsreq->rqstlen + lsreq->rsplen, 1081 DMA_BIDIRECTIONAL); 1082 if (fc_dma_mapping_error(rport->dev, lsreq->rqstdma)) { 1083 ret = -EFAULT; 1084 goto out_putrport; 1085 } 1086 lsreq->rspdma = lsreq->rqstdma + lsreq->rqstlen; 1087 1088 spin_lock_irqsave(&rport->lock, flags); 1089 1090 list_add_tail(&lsop->lsreq_list, &rport->ls_req_list); 1091 1092 lsop->req_queued = true; 1093 1094 spin_unlock_irqrestore(&rport->lock, flags); 1095 1096 ret = rport->lport->ops->ls_req(&rport->lport->localport, 1097 &rport->remoteport, lsreq); 1098 if (ret) 1099 goto out_unlink; 1100 1101 return 0; 1102 1103 out_unlink: 1104 lsop->ls_error = ret; 1105 spin_lock_irqsave(&rport->lock, flags); 1106 lsop->req_queued = false; 1107 list_del(&lsop->lsreq_list); 1108 spin_unlock_irqrestore(&rport->lock, flags); 1109 fc_dma_unmap_single(rport->dev, lsreq->rqstdma, 1110 (lsreq->rqstlen + lsreq->rsplen), 1111 DMA_BIDIRECTIONAL); 1112 out_putrport: 1113 nvme_fc_rport_put(rport); 1114 1115 return ret; 1116 } 1117 1118 static void 1119 nvme_fc_send_ls_req_done(struct nvmefc_ls_req *lsreq, int status) 1120 { 1121 struct nvmefc_ls_req_op *lsop = ls_req_to_lsop(lsreq); 1122 1123 lsop->ls_error = status; 1124 complete(&lsop->ls_done); 1125 } 1126 1127 static int 1128 nvme_fc_send_ls_req(struct nvme_fc_rport *rport, struct nvmefc_ls_req_op *lsop) 1129 { 1130 struct nvmefc_ls_req *lsreq = &lsop->ls_req; 1131 struct fcnvme_ls_rjt *rjt = lsreq->rspaddr; 1132 int ret; 1133 1134 ret = __nvme_fc_send_ls_req(rport, lsop, nvme_fc_send_ls_req_done); 1135 1136 if (!ret) { 1137 /* 1138 * No timeout/not interruptible as we need the struct 1139 * to exist until the lldd calls us back. Thus mandate 1140 * wait until driver calls back. lldd responsible for 1141 * the timeout action 1142 */ 1143 wait_for_completion(&lsop->ls_done); 1144 1145 __nvme_fc_finish_ls_req(lsop); 1146 1147 ret = lsop->ls_error; 1148 } 1149 1150 if (ret) 1151 return ret; 1152 1153 /* ACC or RJT payload ? */ 1154 if (rjt->w0.ls_cmd == FCNVME_LS_RJT) 1155 return -ENXIO; 1156 1157 return 0; 1158 } 1159 1160 static int 1161 nvme_fc_send_ls_req_async(struct nvme_fc_rport *rport, 1162 struct nvmefc_ls_req_op *lsop, 1163 void (*done)(struct nvmefc_ls_req *req, int status)) 1164 { 1165 /* don't wait for completion */ 1166 1167 return __nvme_fc_send_ls_req(rport, lsop, done); 1168 } 1169 1170 static int 1171 nvme_fc_connect_admin_queue(struct nvme_fc_ctrl *ctrl, 1172 struct nvme_fc_queue *queue, u16 qsize, u16 ersp_ratio) 1173 { 1174 struct nvmefc_ls_req_op *lsop; 1175 struct nvmefc_ls_req *lsreq; 1176 struct fcnvme_ls_cr_assoc_rqst *assoc_rqst; 1177 struct fcnvme_ls_cr_assoc_acc *assoc_acc; 1178 unsigned long flags; 1179 int ret, fcret = 0; 1180 1181 lsop = kzalloc((sizeof(*lsop) + 1182 sizeof(*assoc_rqst) + sizeof(*assoc_acc) + 1183 ctrl->lport->ops->lsrqst_priv_sz), GFP_KERNEL); 1184 if (!lsop) { 1185 dev_info(ctrl->ctrl.device, 1186 "NVME-FC{%d}: send Create Association failed: ENOMEM\n", 1187 ctrl->cnum); 1188 ret = -ENOMEM; 1189 goto out_no_memory; 1190 } 1191 1192 assoc_rqst = (struct fcnvme_ls_cr_assoc_rqst *)&lsop[1]; 1193 assoc_acc = (struct fcnvme_ls_cr_assoc_acc *)&assoc_rqst[1]; 1194 lsreq = &lsop->ls_req; 1195 if (ctrl->lport->ops->lsrqst_priv_sz) 1196 lsreq->private = &assoc_acc[1]; 1197 else 1198 lsreq->private = NULL; 1199 1200 assoc_rqst->w0.ls_cmd = FCNVME_LS_CREATE_ASSOCIATION; 1201 assoc_rqst->desc_list_len = 1202 cpu_to_be32(sizeof(struct fcnvme_lsdesc_cr_assoc_cmd)); 1203 1204 assoc_rqst->assoc_cmd.desc_tag = 1205 cpu_to_be32(FCNVME_LSDESC_CREATE_ASSOC_CMD); 1206 assoc_rqst->assoc_cmd.desc_len = 1207 fcnvme_lsdesc_len( 1208 sizeof(struct fcnvme_lsdesc_cr_assoc_cmd)); 1209 1210 assoc_rqst->assoc_cmd.ersp_ratio = cpu_to_be16(ersp_ratio); 1211 assoc_rqst->assoc_cmd.sqsize = cpu_to_be16(qsize - 1); 1212 /* Linux supports only Dynamic controllers */ 1213 assoc_rqst->assoc_cmd.cntlid = cpu_to_be16(0xffff); 1214 uuid_copy(&assoc_rqst->assoc_cmd.hostid, &ctrl->ctrl.opts->host->id); 1215 strncpy(assoc_rqst->assoc_cmd.hostnqn, ctrl->ctrl.opts->host->nqn, 1216 min(FCNVME_ASSOC_HOSTNQN_LEN, NVMF_NQN_SIZE)); 1217 strncpy(assoc_rqst->assoc_cmd.subnqn, ctrl->ctrl.opts->subsysnqn, 1218 min(FCNVME_ASSOC_SUBNQN_LEN, NVMF_NQN_SIZE)); 1219 1220 lsop->queue = queue; 1221 lsreq->rqstaddr = assoc_rqst; 1222 lsreq->rqstlen = sizeof(*assoc_rqst); 1223 lsreq->rspaddr = assoc_acc; 1224 lsreq->rsplen = sizeof(*assoc_acc); 1225 lsreq->timeout = NVME_FC_LS_TIMEOUT_SEC; 1226 1227 ret = nvme_fc_send_ls_req(ctrl->rport, lsop); 1228 if (ret) 1229 goto out_free_buffer; 1230 1231 /* process connect LS completion */ 1232 1233 /* validate the ACC response */ 1234 if (assoc_acc->hdr.w0.ls_cmd != FCNVME_LS_ACC) 1235 fcret = VERR_LSACC; 1236 else if (assoc_acc->hdr.desc_list_len != 1237 fcnvme_lsdesc_len( 1238 sizeof(struct fcnvme_ls_cr_assoc_acc))) 1239 fcret = VERR_CR_ASSOC_ACC_LEN; 1240 else if (assoc_acc->hdr.rqst.desc_tag != 1241 cpu_to_be32(FCNVME_LSDESC_RQST)) 1242 fcret = VERR_LSDESC_RQST; 1243 else if (assoc_acc->hdr.rqst.desc_len != 1244 fcnvme_lsdesc_len(sizeof(struct fcnvme_lsdesc_rqst))) 1245 fcret = VERR_LSDESC_RQST_LEN; 1246 else if (assoc_acc->hdr.rqst.w0.ls_cmd != FCNVME_LS_CREATE_ASSOCIATION) 1247 fcret = VERR_CR_ASSOC; 1248 else if (assoc_acc->associd.desc_tag != 1249 cpu_to_be32(FCNVME_LSDESC_ASSOC_ID)) 1250 fcret = VERR_ASSOC_ID; 1251 else if (assoc_acc->associd.desc_len != 1252 fcnvme_lsdesc_len( 1253 sizeof(struct fcnvme_lsdesc_assoc_id))) 1254 fcret = VERR_ASSOC_ID_LEN; 1255 else if (assoc_acc->connectid.desc_tag != 1256 cpu_to_be32(FCNVME_LSDESC_CONN_ID)) 1257 fcret = VERR_CONN_ID; 1258 else if (assoc_acc->connectid.desc_len != 1259 fcnvme_lsdesc_len(sizeof(struct fcnvme_lsdesc_conn_id))) 1260 fcret = VERR_CONN_ID_LEN; 1261 1262 if (fcret) { 1263 ret = -EBADF; 1264 dev_err(ctrl->dev, 1265 "q %d Create Association LS failed: %s\n", 1266 queue->qnum, validation_errors[fcret]); 1267 } else { 1268 spin_lock_irqsave(&ctrl->lock, flags); 1269 ctrl->association_id = 1270 be64_to_cpu(assoc_acc->associd.association_id); 1271 queue->connection_id = 1272 be64_to_cpu(assoc_acc->connectid.connection_id); 1273 set_bit(NVME_FC_Q_CONNECTED, &queue->flags); 1274 spin_unlock_irqrestore(&ctrl->lock, flags); 1275 } 1276 1277 out_free_buffer: 1278 kfree(lsop); 1279 out_no_memory: 1280 if (ret) 1281 dev_err(ctrl->dev, 1282 "queue %d connect admin queue failed (%d).\n", 1283 queue->qnum, ret); 1284 return ret; 1285 } 1286 1287 static int 1288 nvme_fc_connect_queue(struct nvme_fc_ctrl *ctrl, struct nvme_fc_queue *queue, 1289 u16 qsize, u16 ersp_ratio) 1290 { 1291 struct nvmefc_ls_req_op *lsop; 1292 struct nvmefc_ls_req *lsreq; 1293 struct fcnvme_ls_cr_conn_rqst *conn_rqst; 1294 struct fcnvme_ls_cr_conn_acc *conn_acc; 1295 int ret, fcret = 0; 1296 1297 lsop = kzalloc((sizeof(*lsop) + 1298 sizeof(*conn_rqst) + sizeof(*conn_acc) + 1299 ctrl->lport->ops->lsrqst_priv_sz), GFP_KERNEL); 1300 if (!lsop) { 1301 dev_info(ctrl->ctrl.device, 1302 "NVME-FC{%d}: send Create Connection failed: ENOMEM\n", 1303 ctrl->cnum); 1304 ret = -ENOMEM; 1305 goto out_no_memory; 1306 } 1307 1308 conn_rqst = (struct fcnvme_ls_cr_conn_rqst *)&lsop[1]; 1309 conn_acc = (struct fcnvme_ls_cr_conn_acc *)&conn_rqst[1]; 1310 lsreq = &lsop->ls_req; 1311 if (ctrl->lport->ops->lsrqst_priv_sz) 1312 lsreq->private = (void *)&conn_acc[1]; 1313 else 1314 lsreq->private = NULL; 1315 1316 conn_rqst->w0.ls_cmd = FCNVME_LS_CREATE_CONNECTION; 1317 conn_rqst->desc_list_len = cpu_to_be32( 1318 sizeof(struct fcnvme_lsdesc_assoc_id) + 1319 sizeof(struct fcnvme_lsdesc_cr_conn_cmd)); 1320 1321 conn_rqst->associd.desc_tag = cpu_to_be32(FCNVME_LSDESC_ASSOC_ID); 1322 conn_rqst->associd.desc_len = 1323 fcnvme_lsdesc_len( 1324 sizeof(struct fcnvme_lsdesc_assoc_id)); 1325 conn_rqst->associd.association_id = cpu_to_be64(ctrl->association_id); 1326 conn_rqst->connect_cmd.desc_tag = 1327 cpu_to_be32(FCNVME_LSDESC_CREATE_CONN_CMD); 1328 conn_rqst->connect_cmd.desc_len = 1329 fcnvme_lsdesc_len( 1330 sizeof(struct fcnvme_lsdesc_cr_conn_cmd)); 1331 conn_rqst->connect_cmd.ersp_ratio = cpu_to_be16(ersp_ratio); 1332 conn_rqst->connect_cmd.qid = cpu_to_be16(queue->qnum); 1333 conn_rqst->connect_cmd.sqsize = cpu_to_be16(qsize - 1); 1334 1335 lsop->queue = queue; 1336 lsreq->rqstaddr = conn_rqst; 1337 lsreq->rqstlen = sizeof(*conn_rqst); 1338 lsreq->rspaddr = conn_acc; 1339 lsreq->rsplen = sizeof(*conn_acc); 1340 lsreq->timeout = NVME_FC_LS_TIMEOUT_SEC; 1341 1342 ret = nvme_fc_send_ls_req(ctrl->rport, lsop); 1343 if (ret) 1344 goto out_free_buffer; 1345 1346 /* process connect LS completion */ 1347 1348 /* validate the ACC response */ 1349 if (conn_acc->hdr.w0.ls_cmd != FCNVME_LS_ACC) 1350 fcret = VERR_LSACC; 1351 else if (conn_acc->hdr.desc_list_len != 1352 fcnvme_lsdesc_len(sizeof(struct fcnvme_ls_cr_conn_acc))) 1353 fcret = VERR_CR_CONN_ACC_LEN; 1354 else if (conn_acc->hdr.rqst.desc_tag != cpu_to_be32(FCNVME_LSDESC_RQST)) 1355 fcret = VERR_LSDESC_RQST; 1356 else if (conn_acc->hdr.rqst.desc_len != 1357 fcnvme_lsdesc_len(sizeof(struct fcnvme_lsdesc_rqst))) 1358 fcret = VERR_LSDESC_RQST_LEN; 1359 else if (conn_acc->hdr.rqst.w0.ls_cmd != FCNVME_LS_CREATE_CONNECTION) 1360 fcret = VERR_CR_CONN; 1361 else if (conn_acc->connectid.desc_tag != 1362 cpu_to_be32(FCNVME_LSDESC_CONN_ID)) 1363 fcret = VERR_CONN_ID; 1364 else if (conn_acc->connectid.desc_len != 1365 fcnvme_lsdesc_len(sizeof(struct fcnvme_lsdesc_conn_id))) 1366 fcret = VERR_CONN_ID_LEN; 1367 1368 if (fcret) { 1369 ret = -EBADF; 1370 dev_err(ctrl->dev, 1371 "q %d Create I/O Connection LS failed: %s\n", 1372 queue->qnum, validation_errors[fcret]); 1373 } else { 1374 queue->connection_id = 1375 be64_to_cpu(conn_acc->connectid.connection_id); 1376 set_bit(NVME_FC_Q_CONNECTED, &queue->flags); 1377 } 1378 1379 out_free_buffer: 1380 kfree(lsop); 1381 out_no_memory: 1382 if (ret) 1383 dev_err(ctrl->dev, 1384 "queue %d connect I/O queue failed (%d).\n", 1385 queue->qnum, ret); 1386 return ret; 1387 } 1388 1389 static void 1390 nvme_fc_disconnect_assoc_done(struct nvmefc_ls_req *lsreq, int status) 1391 { 1392 struct nvmefc_ls_req_op *lsop = ls_req_to_lsop(lsreq); 1393 1394 __nvme_fc_finish_ls_req(lsop); 1395 1396 /* fc-nvme initiator doesn't care about success or failure of cmd */ 1397 1398 kfree(lsop); 1399 } 1400 1401 /* 1402 * This routine sends a FC-NVME LS to disconnect (aka terminate) 1403 * the FC-NVME Association. Terminating the association also 1404 * terminates the FC-NVME connections (per queue, both admin and io 1405 * queues) that are part of the association. E.g. things are torn 1406 * down, and the related FC-NVME Association ID and Connection IDs 1407 * become invalid. 1408 * 1409 * The behavior of the fc-nvme initiator is such that it's 1410 * understanding of the association and connections will implicitly 1411 * be torn down. The action is implicit as it may be due to a loss of 1412 * connectivity with the fc-nvme target, so you may never get a 1413 * response even if you tried. As such, the action of this routine 1414 * is to asynchronously send the LS, ignore any results of the LS, and 1415 * continue on with terminating the association. If the fc-nvme target 1416 * is present and receives the LS, it too can tear down. 1417 */ 1418 static void 1419 nvme_fc_xmt_disconnect_assoc(struct nvme_fc_ctrl *ctrl) 1420 { 1421 struct fcnvme_ls_disconnect_assoc_rqst *discon_rqst; 1422 struct fcnvme_ls_disconnect_assoc_acc *discon_acc; 1423 struct nvmefc_ls_req_op *lsop; 1424 struct nvmefc_ls_req *lsreq; 1425 int ret; 1426 1427 lsop = kzalloc((sizeof(*lsop) + 1428 sizeof(*discon_rqst) + sizeof(*discon_acc) + 1429 ctrl->lport->ops->lsrqst_priv_sz), GFP_KERNEL); 1430 if (!lsop) { 1431 dev_info(ctrl->ctrl.device, 1432 "NVME-FC{%d}: send Disconnect Association " 1433 "failed: ENOMEM\n", 1434 ctrl->cnum); 1435 return; 1436 } 1437 1438 discon_rqst = (struct fcnvme_ls_disconnect_assoc_rqst *)&lsop[1]; 1439 discon_acc = (struct fcnvme_ls_disconnect_assoc_acc *)&discon_rqst[1]; 1440 lsreq = &lsop->ls_req; 1441 if (ctrl->lport->ops->lsrqst_priv_sz) 1442 lsreq->private = (void *)&discon_acc[1]; 1443 else 1444 lsreq->private = NULL; 1445 1446 nvmefc_fmt_lsreq_discon_assoc(lsreq, discon_rqst, discon_acc, 1447 ctrl->association_id); 1448 1449 ret = nvme_fc_send_ls_req_async(ctrl->rport, lsop, 1450 nvme_fc_disconnect_assoc_done); 1451 if (ret) 1452 kfree(lsop); 1453 } 1454 1455 static void 1456 nvme_fc_xmt_ls_rsp_done(struct nvmefc_ls_rsp *lsrsp) 1457 { 1458 struct nvmefc_ls_rcv_op *lsop = lsrsp->nvme_fc_private; 1459 struct nvme_fc_rport *rport = lsop->rport; 1460 struct nvme_fc_lport *lport = rport->lport; 1461 unsigned long flags; 1462 1463 spin_lock_irqsave(&rport->lock, flags); 1464 list_del(&lsop->lsrcv_list); 1465 spin_unlock_irqrestore(&rport->lock, flags); 1466 1467 fc_dma_sync_single_for_cpu(lport->dev, lsop->rspdma, 1468 sizeof(*lsop->rspbuf), DMA_TO_DEVICE); 1469 fc_dma_unmap_single(lport->dev, lsop->rspdma, 1470 sizeof(*lsop->rspbuf), DMA_TO_DEVICE); 1471 1472 kfree(lsop); 1473 1474 nvme_fc_rport_put(rport); 1475 } 1476 1477 static void 1478 nvme_fc_xmt_ls_rsp(struct nvmefc_ls_rcv_op *lsop) 1479 { 1480 struct nvme_fc_rport *rport = lsop->rport; 1481 struct nvme_fc_lport *lport = rport->lport; 1482 struct fcnvme_ls_rqst_w0 *w0 = &lsop->rqstbuf->w0; 1483 int ret; 1484 1485 fc_dma_sync_single_for_device(lport->dev, lsop->rspdma, 1486 sizeof(*lsop->rspbuf), DMA_TO_DEVICE); 1487 1488 ret = lport->ops->xmt_ls_rsp(&lport->localport, &rport->remoteport, 1489 lsop->lsrsp); 1490 if (ret) { 1491 dev_warn(lport->dev, 1492 "LLDD rejected LS RSP xmt: LS %d status %d\n", 1493 w0->ls_cmd, ret); 1494 nvme_fc_xmt_ls_rsp_done(lsop->lsrsp); 1495 return; 1496 } 1497 } 1498 1499 static struct nvme_fc_ctrl * 1500 nvme_fc_match_disconn_ls(struct nvme_fc_rport *rport, 1501 struct nvmefc_ls_rcv_op *lsop) 1502 { 1503 struct fcnvme_ls_disconnect_assoc_rqst *rqst = 1504 &lsop->rqstbuf->rq_dis_assoc; 1505 struct nvme_fc_ctrl *ctrl, *ret = NULL; 1506 struct nvmefc_ls_rcv_op *oldls = NULL; 1507 u64 association_id = be64_to_cpu(rqst->associd.association_id); 1508 unsigned long flags; 1509 1510 spin_lock_irqsave(&rport->lock, flags); 1511 1512 list_for_each_entry(ctrl, &rport->ctrl_list, ctrl_list) { 1513 if (!nvme_fc_ctrl_get(ctrl)) 1514 continue; 1515 spin_lock(&ctrl->lock); 1516 if (association_id == ctrl->association_id) { 1517 oldls = ctrl->rcv_disconn; 1518 ctrl->rcv_disconn = lsop; 1519 ret = ctrl; 1520 } 1521 spin_unlock(&ctrl->lock); 1522 if (ret) 1523 /* leave the ctrl get reference */ 1524 break; 1525 nvme_fc_ctrl_put(ctrl); 1526 } 1527 1528 spin_unlock_irqrestore(&rport->lock, flags); 1529 1530 /* transmit a response for anything that was pending */ 1531 if (oldls) { 1532 dev_info(rport->lport->dev, 1533 "NVME-FC{%d}: Multiple Disconnect Association " 1534 "LS's received\n", ctrl->cnum); 1535 /* overwrite good response with bogus failure */ 1536 oldls->lsrsp->rsplen = nvme_fc_format_rjt(oldls->rspbuf, 1537 sizeof(*oldls->rspbuf), 1538 rqst->w0.ls_cmd, 1539 FCNVME_RJT_RC_UNAB, 1540 FCNVME_RJT_EXP_NONE, 0); 1541 nvme_fc_xmt_ls_rsp(oldls); 1542 } 1543 1544 return ret; 1545 } 1546 1547 /* 1548 * returns true to mean LS handled and ls_rsp can be sent 1549 * returns false to defer ls_rsp xmt (will be done as part of 1550 * association termination) 1551 */ 1552 static bool 1553 nvme_fc_ls_disconnect_assoc(struct nvmefc_ls_rcv_op *lsop) 1554 { 1555 struct nvme_fc_rport *rport = lsop->rport; 1556 struct fcnvme_ls_disconnect_assoc_rqst *rqst = 1557 &lsop->rqstbuf->rq_dis_assoc; 1558 struct fcnvme_ls_disconnect_assoc_acc *acc = 1559 &lsop->rspbuf->rsp_dis_assoc; 1560 struct nvme_fc_ctrl *ctrl = NULL; 1561 int ret = 0; 1562 1563 memset(acc, 0, sizeof(*acc)); 1564 1565 ret = nvmefc_vldt_lsreq_discon_assoc(lsop->rqstdatalen, rqst); 1566 if (!ret) { 1567 /* match an active association */ 1568 ctrl = nvme_fc_match_disconn_ls(rport, lsop); 1569 if (!ctrl) 1570 ret = VERR_NO_ASSOC; 1571 } 1572 1573 if (ret) { 1574 dev_info(rport->lport->dev, 1575 "Disconnect LS failed: %s\n", 1576 validation_errors[ret]); 1577 lsop->lsrsp->rsplen = nvme_fc_format_rjt(acc, 1578 sizeof(*acc), rqst->w0.ls_cmd, 1579 (ret == VERR_NO_ASSOC) ? 1580 FCNVME_RJT_RC_INV_ASSOC : 1581 FCNVME_RJT_RC_LOGIC, 1582 FCNVME_RJT_EXP_NONE, 0); 1583 return true; 1584 } 1585 1586 /* format an ACCept response */ 1587 1588 lsop->lsrsp->rsplen = sizeof(*acc); 1589 1590 nvme_fc_format_rsp_hdr(acc, FCNVME_LS_ACC, 1591 fcnvme_lsdesc_len( 1592 sizeof(struct fcnvme_ls_disconnect_assoc_acc)), 1593 FCNVME_LS_DISCONNECT_ASSOC); 1594 1595 /* 1596 * the transmit of the response will occur after the exchanges 1597 * for the association have been ABTS'd by 1598 * nvme_fc_delete_association(). 1599 */ 1600 1601 /* fail the association */ 1602 nvme_fc_error_recovery(ctrl, "Disconnect Association LS received"); 1603 1604 /* release the reference taken by nvme_fc_match_disconn_ls() */ 1605 nvme_fc_ctrl_put(ctrl); 1606 1607 return false; 1608 } 1609 1610 /* 1611 * Actual Processing routine for received FC-NVME LS Requests from the LLD 1612 * returns true if a response should be sent afterward, false if rsp will 1613 * be sent asynchronously. 1614 */ 1615 static bool 1616 nvme_fc_handle_ls_rqst(struct nvmefc_ls_rcv_op *lsop) 1617 { 1618 struct fcnvme_ls_rqst_w0 *w0 = &lsop->rqstbuf->w0; 1619 bool ret = true; 1620 1621 lsop->lsrsp->nvme_fc_private = lsop; 1622 lsop->lsrsp->rspbuf = lsop->rspbuf; 1623 lsop->lsrsp->rspdma = lsop->rspdma; 1624 lsop->lsrsp->done = nvme_fc_xmt_ls_rsp_done; 1625 /* Be preventative. handlers will later set to valid length */ 1626 lsop->lsrsp->rsplen = 0; 1627 1628 /* 1629 * handlers: 1630 * parse request input, execute the request, and format the 1631 * LS response 1632 */ 1633 switch (w0->ls_cmd) { 1634 case FCNVME_LS_DISCONNECT_ASSOC: 1635 ret = nvme_fc_ls_disconnect_assoc(lsop); 1636 break; 1637 case FCNVME_LS_DISCONNECT_CONN: 1638 lsop->lsrsp->rsplen = nvme_fc_format_rjt(lsop->rspbuf, 1639 sizeof(*lsop->rspbuf), w0->ls_cmd, 1640 FCNVME_RJT_RC_UNSUP, FCNVME_RJT_EXP_NONE, 0); 1641 break; 1642 case FCNVME_LS_CREATE_ASSOCIATION: 1643 case FCNVME_LS_CREATE_CONNECTION: 1644 lsop->lsrsp->rsplen = nvme_fc_format_rjt(lsop->rspbuf, 1645 sizeof(*lsop->rspbuf), w0->ls_cmd, 1646 FCNVME_RJT_RC_LOGIC, FCNVME_RJT_EXP_NONE, 0); 1647 break; 1648 default: 1649 lsop->lsrsp->rsplen = nvme_fc_format_rjt(lsop->rspbuf, 1650 sizeof(*lsop->rspbuf), w0->ls_cmd, 1651 FCNVME_RJT_RC_INVAL, FCNVME_RJT_EXP_NONE, 0); 1652 break; 1653 } 1654 1655 return(ret); 1656 } 1657 1658 static void 1659 nvme_fc_handle_ls_rqst_work(struct work_struct *work) 1660 { 1661 struct nvme_fc_rport *rport = 1662 container_of(work, struct nvme_fc_rport, lsrcv_work); 1663 struct fcnvme_ls_rqst_w0 *w0; 1664 struct nvmefc_ls_rcv_op *lsop; 1665 unsigned long flags; 1666 bool sendrsp; 1667 1668 restart: 1669 sendrsp = true; 1670 spin_lock_irqsave(&rport->lock, flags); 1671 list_for_each_entry(lsop, &rport->ls_rcv_list, lsrcv_list) { 1672 if (lsop->handled) 1673 continue; 1674 1675 lsop->handled = true; 1676 if (rport->remoteport.port_state == FC_OBJSTATE_ONLINE) { 1677 spin_unlock_irqrestore(&rport->lock, flags); 1678 sendrsp = nvme_fc_handle_ls_rqst(lsop); 1679 } else { 1680 spin_unlock_irqrestore(&rport->lock, flags); 1681 w0 = &lsop->rqstbuf->w0; 1682 lsop->lsrsp->rsplen = nvme_fc_format_rjt( 1683 lsop->rspbuf, 1684 sizeof(*lsop->rspbuf), 1685 w0->ls_cmd, 1686 FCNVME_RJT_RC_UNAB, 1687 FCNVME_RJT_EXP_NONE, 0); 1688 } 1689 if (sendrsp) 1690 nvme_fc_xmt_ls_rsp(lsop); 1691 goto restart; 1692 } 1693 spin_unlock_irqrestore(&rport->lock, flags); 1694 } 1695 1696 /** 1697 * nvme_fc_rcv_ls_req - transport entry point called by an LLDD 1698 * upon the reception of a NVME LS request. 1699 * 1700 * The nvme-fc layer will copy payload to an internal structure for 1701 * processing. As such, upon completion of the routine, the LLDD may 1702 * immediately free/reuse the LS request buffer passed in the call. 1703 * 1704 * If this routine returns error, the LLDD should abort the exchange. 1705 * 1706 * @remoteport: pointer to the (registered) remote port that the LS 1707 * was received from. The remoteport is associated with 1708 * a specific localport. 1709 * @lsrsp: pointer to a nvmefc_ls_rsp response structure to be 1710 * used to reference the exchange corresponding to the LS 1711 * when issuing an ls response. 1712 * @lsreqbuf: pointer to the buffer containing the LS Request 1713 * @lsreqbuf_len: length, in bytes, of the received LS request 1714 */ 1715 int 1716 nvme_fc_rcv_ls_req(struct nvme_fc_remote_port *portptr, 1717 struct nvmefc_ls_rsp *lsrsp, 1718 void *lsreqbuf, u32 lsreqbuf_len) 1719 { 1720 struct nvme_fc_rport *rport = remoteport_to_rport(portptr); 1721 struct nvme_fc_lport *lport = rport->lport; 1722 struct fcnvme_ls_rqst_w0 *w0 = (struct fcnvme_ls_rqst_w0 *)lsreqbuf; 1723 struct nvmefc_ls_rcv_op *lsop; 1724 unsigned long flags; 1725 int ret; 1726 1727 nvme_fc_rport_get(rport); 1728 1729 /* validate there's a routine to transmit a response */ 1730 if (!lport->ops->xmt_ls_rsp) { 1731 dev_info(lport->dev, 1732 "RCV %s LS failed: no LLDD xmt_ls_rsp\n", 1733 (w0->ls_cmd <= NVME_FC_LAST_LS_CMD_VALUE) ? 1734 nvmefc_ls_names[w0->ls_cmd] : ""); 1735 ret = -EINVAL; 1736 goto out_put; 1737 } 1738 1739 if (lsreqbuf_len > sizeof(union nvmefc_ls_requests)) { 1740 dev_info(lport->dev, 1741 "RCV %s LS failed: payload too large\n", 1742 (w0->ls_cmd <= NVME_FC_LAST_LS_CMD_VALUE) ? 1743 nvmefc_ls_names[w0->ls_cmd] : ""); 1744 ret = -E2BIG; 1745 goto out_put; 1746 } 1747 1748 lsop = kzalloc(sizeof(*lsop) + 1749 sizeof(union nvmefc_ls_requests) + 1750 sizeof(union nvmefc_ls_responses), 1751 GFP_KERNEL); 1752 if (!lsop) { 1753 dev_info(lport->dev, 1754 "RCV %s LS failed: No memory\n", 1755 (w0->ls_cmd <= NVME_FC_LAST_LS_CMD_VALUE) ? 1756 nvmefc_ls_names[w0->ls_cmd] : ""); 1757 ret = -ENOMEM; 1758 goto out_put; 1759 } 1760 lsop->rqstbuf = (union nvmefc_ls_requests *)&lsop[1]; 1761 lsop->rspbuf = (union nvmefc_ls_responses *)&lsop->rqstbuf[1]; 1762 1763 lsop->rspdma = fc_dma_map_single(lport->dev, lsop->rspbuf, 1764 sizeof(*lsop->rspbuf), 1765 DMA_TO_DEVICE); 1766 if (fc_dma_mapping_error(lport->dev, lsop->rspdma)) { 1767 dev_info(lport->dev, 1768 "RCV %s LS failed: DMA mapping failure\n", 1769 (w0->ls_cmd <= NVME_FC_LAST_LS_CMD_VALUE) ? 1770 nvmefc_ls_names[w0->ls_cmd] : ""); 1771 ret = -EFAULT; 1772 goto out_free; 1773 } 1774 1775 lsop->rport = rport; 1776 lsop->lsrsp = lsrsp; 1777 1778 memcpy(lsop->rqstbuf, lsreqbuf, lsreqbuf_len); 1779 lsop->rqstdatalen = lsreqbuf_len; 1780 1781 spin_lock_irqsave(&rport->lock, flags); 1782 if (rport->remoteport.port_state != FC_OBJSTATE_ONLINE) { 1783 spin_unlock_irqrestore(&rport->lock, flags); 1784 ret = -ENOTCONN; 1785 goto out_unmap; 1786 } 1787 list_add_tail(&lsop->lsrcv_list, &rport->ls_rcv_list); 1788 spin_unlock_irqrestore(&rport->lock, flags); 1789 1790 schedule_work(&rport->lsrcv_work); 1791 1792 return 0; 1793 1794 out_unmap: 1795 fc_dma_unmap_single(lport->dev, lsop->rspdma, 1796 sizeof(*lsop->rspbuf), DMA_TO_DEVICE); 1797 out_free: 1798 kfree(lsop); 1799 out_put: 1800 nvme_fc_rport_put(rport); 1801 return ret; 1802 } 1803 EXPORT_SYMBOL_GPL(nvme_fc_rcv_ls_req); 1804 1805 1806 /* *********************** NVME Ctrl Routines **************************** */ 1807 1808 static void 1809 __nvme_fc_exit_request(struct nvme_fc_ctrl *ctrl, 1810 struct nvme_fc_fcp_op *op) 1811 { 1812 fc_dma_unmap_single(ctrl->lport->dev, op->fcp_req.rspdma, 1813 sizeof(op->rsp_iu), DMA_FROM_DEVICE); 1814 fc_dma_unmap_single(ctrl->lport->dev, op->fcp_req.cmddma, 1815 sizeof(op->cmd_iu), DMA_TO_DEVICE); 1816 1817 atomic_set(&op->state, FCPOP_STATE_UNINIT); 1818 } 1819 1820 static void 1821 nvme_fc_exit_request(struct blk_mq_tag_set *set, struct request *rq, 1822 unsigned int hctx_idx) 1823 { 1824 struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(rq); 1825 1826 return __nvme_fc_exit_request(set->driver_data, op); 1827 } 1828 1829 static int 1830 __nvme_fc_abort_op(struct nvme_fc_ctrl *ctrl, struct nvme_fc_fcp_op *op) 1831 { 1832 unsigned long flags; 1833 int opstate; 1834 1835 spin_lock_irqsave(&ctrl->lock, flags); 1836 opstate = atomic_xchg(&op->state, FCPOP_STATE_ABORTED); 1837 if (opstate != FCPOP_STATE_ACTIVE) 1838 atomic_set(&op->state, opstate); 1839 else if (test_bit(FCCTRL_TERMIO, &ctrl->flags)) 1840 ctrl->iocnt++; 1841 spin_unlock_irqrestore(&ctrl->lock, flags); 1842 1843 if (opstate != FCPOP_STATE_ACTIVE) 1844 return -ECANCELED; 1845 1846 ctrl->lport->ops->fcp_abort(&ctrl->lport->localport, 1847 &ctrl->rport->remoteport, 1848 op->queue->lldd_handle, 1849 &op->fcp_req); 1850 1851 return 0; 1852 } 1853 1854 static void 1855 nvme_fc_abort_aen_ops(struct nvme_fc_ctrl *ctrl) 1856 { 1857 struct nvme_fc_fcp_op *aen_op = ctrl->aen_ops; 1858 int i; 1859 1860 /* ensure we've initialized the ops once */ 1861 if (!(aen_op->flags & FCOP_FLAGS_AEN)) 1862 return; 1863 1864 for (i = 0; i < NVME_NR_AEN_COMMANDS; i++, aen_op++) 1865 __nvme_fc_abort_op(ctrl, aen_op); 1866 } 1867 1868 static inline void 1869 __nvme_fc_fcpop_chk_teardowns(struct nvme_fc_ctrl *ctrl, 1870 struct nvme_fc_fcp_op *op, int opstate) 1871 { 1872 unsigned long flags; 1873 1874 if (opstate == FCPOP_STATE_ABORTED) { 1875 spin_lock_irqsave(&ctrl->lock, flags); 1876 if (test_bit(FCCTRL_TERMIO, &ctrl->flags)) { 1877 if (!--ctrl->iocnt) 1878 wake_up(&ctrl->ioabort_wait); 1879 } 1880 spin_unlock_irqrestore(&ctrl->lock, flags); 1881 } 1882 } 1883 1884 static void 1885 nvme_fc_fcpio_done(struct nvmefc_fcp_req *req) 1886 { 1887 struct nvme_fc_fcp_op *op = fcp_req_to_fcp_op(req); 1888 struct request *rq = op->rq; 1889 struct nvmefc_fcp_req *freq = &op->fcp_req; 1890 struct nvme_fc_ctrl *ctrl = op->ctrl; 1891 struct nvme_fc_queue *queue = op->queue; 1892 struct nvme_completion *cqe = &op->rsp_iu.cqe; 1893 struct nvme_command *sqe = &op->cmd_iu.sqe; 1894 __le16 status = cpu_to_le16(NVME_SC_SUCCESS << 1); 1895 union nvme_result result; 1896 bool terminate_assoc = true; 1897 int opstate; 1898 1899 /* 1900 * WARNING: 1901 * The current linux implementation of a nvme controller 1902 * allocates a single tag set for all io queues and sizes 1903 * the io queues to fully hold all possible tags. Thus, the 1904 * implementation does not reference or care about the sqhd 1905 * value as it never needs to use the sqhd/sqtail pointers 1906 * for submission pacing. 1907 * 1908 * This affects the FC-NVME implementation in two ways: 1909 * 1) As the value doesn't matter, we don't need to waste 1910 * cycles extracting it from ERSPs and stamping it in the 1911 * cases where the transport fabricates CQEs on successful 1912 * completions. 1913 * 2) The FC-NVME implementation requires that delivery of 1914 * ERSP completions are to go back to the nvme layer in order 1915 * relative to the rsn, such that the sqhd value will always 1916 * be "in order" for the nvme layer. As the nvme layer in 1917 * linux doesn't care about sqhd, there's no need to return 1918 * them in order. 1919 * 1920 * Additionally: 1921 * As the core nvme layer in linux currently does not look at 1922 * every field in the cqe - in cases where the FC transport must 1923 * fabricate a CQE, the following fields will not be set as they 1924 * are not referenced: 1925 * cqe.sqid, cqe.sqhd, cqe.command_id 1926 * 1927 * Failure or error of an individual i/o, in a transport 1928 * detected fashion unrelated to the nvme completion status, 1929 * potentially cause the initiator and target sides to get out 1930 * of sync on SQ head/tail (aka outstanding io count allowed). 1931 * Per FC-NVME spec, failure of an individual command requires 1932 * the connection to be terminated, which in turn requires the 1933 * association to be terminated. 1934 */ 1935 1936 opstate = atomic_xchg(&op->state, FCPOP_STATE_COMPLETE); 1937 1938 fc_dma_sync_single_for_cpu(ctrl->lport->dev, op->fcp_req.rspdma, 1939 sizeof(op->rsp_iu), DMA_FROM_DEVICE); 1940 1941 if (opstate == FCPOP_STATE_ABORTED) 1942 status = cpu_to_le16(NVME_SC_HOST_PATH_ERROR << 1); 1943 else if (freq->status) { 1944 status = cpu_to_le16(NVME_SC_HOST_PATH_ERROR << 1); 1945 dev_info(ctrl->ctrl.device, 1946 "NVME-FC{%d}: io failed due to lldd error %d\n", 1947 ctrl->cnum, freq->status); 1948 } 1949 1950 /* 1951 * For the linux implementation, if we have an unsuccesful 1952 * status, they blk-mq layer can typically be called with the 1953 * non-zero status and the content of the cqe isn't important. 1954 */ 1955 if (status) 1956 goto done; 1957 1958 /* 1959 * command completed successfully relative to the wire 1960 * protocol. However, validate anything received and 1961 * extract the status and result from the cqe (create it 1962 * where necessary). 1963 */ 1964 1965 switch (freq->rcv_rsplen) { 1966 1967 case 0: 1968 case NVME_FC_SIZEOF_ZEROS_RSP: 1969 /* 1970 * No response payload or 12 bytes of payload (which 1971 * should all be zeros) are considered successful and 1972 * no payload in the CQE by the transport. 1973 */ 1974 if (freq->transferred_length != 1975 be32_to_cpu(op->cmd_iu.data_len)) { 1976 status = cpu_to_le16(NVME_SC_HOST_PATH_ERROR << 1); 1977 dev_info(ctrl->ctrl.device, 1978 "NVME-FC{%d}: io failed due to bad transfer " 1979 "length: %d vs expected %d\n", 1980 ctrl->cnum, freq->transferred_length, 1981 be32_to_cpu(op->cmd_iu.data_len)); 1982 goto done; 1983 } 1984 result.u64 = 0; 1985 break; 1986 1987 case sizeof(struct nvme_fc_ersp_iu): 1988 /* 1989 * The ERSP IU contains a full completion with CQE. 1990 * Validate ERSP IU and look at cqe. 1991 */ 1992 if (unlikely(be16_to_cpu(op->rsp_iu.iu_len) != 1993 (freq->rcv_rsplen / 4) || 1994 be32_to_cpu(op->rsp_iu.xfrd_len) != 1995 freq->transferred_length || 1996 op->rsp_iu.ersp_result || 1997 sqe->common.command_id != cqe->command_id)) { 1998 status = cpu_to_le16(NVME_SC_HOST_PATH_ERROR << 1); 1999 dev_info(ctrl->ctrl.device, 2000 "NVME-FC{%d}: io failed due to bad NVMe_ERSP: " 2001 "iu len %d, xfr len %d vs %d, status code " 2002 "%d, cmdid %d vs %d\n", 2003 ctrl->cnum, be16_to_cpu(op->rsp_iu.iu_len), 2004 be32_to_cpu(op->rsp_iu.xfrd_len), 2005 freq->transferred_length, 2006 op->rsp_iu.ersp_result, 2007 sqe->common.command_id, 2008 cqe->command_id); 2009 goto done; 2010 } 2011 result = cqe->result; 2012 status = cqe->status; 2013 break; 2014 2015 default: 2016 status = cpu_to_le16(NVME_SC_HOST_PATH_ERROR << 1); 2017 dev_info(ctrl->ctrl.device, 2018 "NVME-FC{%d}: io failed due to odd NVMe_xRSP iu " 2019 "len %d\n", 2020 ctrl->cnum, freq->rcv_rsplen); 2021 goto done; 2022 } 2023 2024 terminate_assoc = false; 2025 2026 done: 2027 if (op->flags & FCOP_FLAGS_AEN) { 2028 nvme_complete_async_event(&queue->ctrl->ctrl, status, &result); 2029 __nvme_fc_fcpop_chk_teardowns(ctrl, op, opstate); 2030 atomic_set(&op->state, FCPOP_STATE_IDLE); 2031 op->flags = FCOP_FLAGS_AEN; /* clear other flags */ 2032 nvme_fc_ctrl_put(ctrl); 2033 goto check_error; 2034 } 2035 2036 __nvme_fc_fcpop_chk_teardowns(ctrl, op, opstate); 2037 if (!nvme_end_request(rq, status, result)) 2038 nvme_fc_complete_rq(rq); 2039 2040 check_error: 2041 if (terminate_assoc) 2042 nvme_fc_error_recovery(ctrl, "transport detected io error"); 2043 } 2044 2045 static int 2046 __nvme_fc_init_request(struct nvme_fc_ctrl *ctrl, 2047 struct nvme_fc_queue *queue, struct nvme_fc_fcp_op *op, 2048 struct request *rq, u32 rqno) 2049 { 2050 struct nvme_fcp_op_w_sgl *op_w_sgl = 2051 container_of(op, typeof(*op_w_sgl), op); 2052 struct nvme_fc_cmd_iu *cmdiu = &op->cmd_iu; 2053 int ret = 0; 2054 2055 memset(op, 0, sizeof(*op)); 2056 op->fcp_req.cmdaddr = &op->cmd_iu; 2057 op->fcp_req.cmdlen = sizeof(op->cmd_iu); 2058 op->fcp_req.rspaddr = &op->rsp_iu; 2059 op->fcp_req.rsplen = sizeof(op->rsp_iu); 2060 op->fcp_req.done = nvme_fc_fcpio_done; 2061 op->ctrl = ctrl; 2062 op->queue = queue; 2063 op->rq = rq; 2064 op->rqno = rqno; 2065 2066 cmdiu->format_id = NVME_CMD_FORMAT_ID; 2067 cmdiu->fc_id = NVME_CMD_FC_ID; 2068 cmdiu->iu_len = cpu_to_be16(sizeof(*cmdiu) / sizeof(u32)); 2069 if (queue->qnum) 2070 cmdiu->rsv_cat = fccmnd_set_cat_css(0, 2071 (NVME_CC_CSS_NVM >> NVME_CC_CSS_SHIFT)); 2072 else 2073 cmdiu->rsv_cat = fccmnd_set_cat_admin(0); 2074 2075 op->fcp_req.cmddma = fc_dma_map_single(ctrl->lport->dev, 2076 &op->cmd_iu, sizeof(op->cmd_iu), DMA_TO_DEVICE); 2077 if (fc_dma_mapping_error(ctrl->lport->dev, op->fcp_req.cmddma)) { 2078 dev_err(ctrl->dev, 2079 "FCP Op failed - cmdiu dma mapping failed.\n"); 2080 ret = EFAULT; 2081 goto out_on_error; 2082 } 2083 2084 op->fcp_req.rspdma = fc_dma_map_single(ctrl->lport->dev, 2085 &op->rsp_iu, sizeof(op->rsp_iu), 2086 DMA_FROM_DEVICE); 2087 if (fc_dma_mapping_error(ctrl->lport->dev, op->fcp_req.rspdma)) { 2088 dev_err(ctrl->dev, 2089 "FCP Op failed - rspiu dma mapping failed.\n"); 2090 ret = EFAULT; 2091 } 2092 2093 atomic_set(&op->state, FCPOP_STATE_IDLE); 2094 out_on_error: 2095 return ret; 2096 } 2097 2098 static int 2099 nvme_fc_init_request(struct blk_mq_tag_set *set, struct request *rq, 2100 unsigned int hctx_idx, unsigned int numa_node) 2101 { 2102 struct nvme_fc_ctrl *ctrl = set->driver_data; 2103 struct nvme_fcp_op_w_sgl *op = blk_mq_rq_to_pdu(rq); 2104 int queue_idx = (set == &ctrl->tag_set) ? hctx_idx + 1 : 0; 2105 struct nvme_fc_queue *queue = &ctrl->queues[queue_idx]; 2106 int res; 2107 2108 res = __nvme_fc_init_request(ctrl, queue, &op->op, rq, queue->rqcnt++); 2109 if (res) 2110 return res; 2111 op->op.fcp_req.first_sgl = op->sgl; 2112 op->op.fcp_req.private = &op->priv[0]; 2113 nvme_req(rq)->ctrl = &ctrl->ctrl; 2114 return res; 2115 } 2116 2117 static int 2118 nvme_fc_init_aen_ops(struct nvme_fc_ctrl *ctrl) 2119 { 2120 struct nvme_fc_fcp_op *aen_op; 2121 struct nvme_fc_cmd_iu *cmdiu; 2122 struct nvme_command *sqe; 2123 void *private = NULL; 2124 int i, ret; 2125 2126 aen_op = ctrl->aen_ops; 2127 for (i = 0; i < NVME_NR_AEN_COMMANDS; i++, aen_op++) { 2128 if (ctrl->lport->ops->fcprqst_priv_sz) { 2129 private = kzalloc(ctrl->lport->ops->fcprqst_priv_sz, 2130 GFP_KERNEL); 2131 if (!private) 2132 return -ENOMEM; 2133 } 2134 2135 cmdiu = &aen_op->cmd_iu; 2136 sqe = &cmdiu->sqe; 2137 ret = __nvme_fc_init_request(ctrl, &ctrl->queues[0], 2138 aen_op, (struct request *)NULL, 2139 (NVME_AQ_BLK_MQ_DEPTH + i)); 2140 if (ret) { 2141 kfree(private); 2142 return ret; 2143 } 2144 2145 aen_op->flags = FCOP_FLAGS_AEN; 2146 aen_op->fcp_req.private = private; 2147 2148 memset(sqe, 0, sizeof(*sqe)); 2149 sqe->common.opcode = nvme_admin_async_event; 2150 /* Note: core layer may overwrite the sqe.command_id value */ 2151 sqe->common.command_id = NVME_AQ_BLK_MQ_DEPTH + i; 2152 } 2153 return 0; 2154 } 2155 2156 static void 2157 nvme_fc_term_aen_ops(struct nvme_fc_ctrl *ctrl) 2158 { 2159 struct nvme_fc_fcp_op *aen_op; 2160 int i; 2161 2162 aen_op = ctrl->aen_ops; 2163 for (i = 0; i < NVME_NR_AEN_COMMANDS; i++, aen_op++) { 2164 __nvme_fc_exit_request(ctrl, aen_op); 2165 2166 kfree(aen_op->fcp_req.private); 2167 aen_op->fcp_req.private = NULL; 2168 } 2169 } 2170 2171 static inline void 2172 __nvme_fc_init_hctx(struct blk_mq_hw_ctx *hctx, struct nvme_fc_ctrl *ctrl, 2173 unsigned int qidx) 2174 { 2175 struct nvme_fc_queue *queue = &ctrl->queues[qidx]; 2176 2177 hctx->driver_data = queue; 2178 queue->hctx = hctx; 2179 } 2180 2181 static int 2182 nvme_fc_init_hctx(struct blk_mq_hw_ctx *hctx, void *data, 2183 unsigned int hctx_idx) 2184 { 2185 struct nvme_fc_ctrl *ctrl = data; 2186 2187 __nvme_fc_init_hctx(hctx, ctrl, hctx_idx + 1); 2188 2189 return 0; 2190 } 2191 2192 static int 2193 nvme_fc_init_admin_hctx(struct blk_mq_hw_ctx *hctx, void *data, 2194 unsigned int hctx_idx) 2195 { 2196 struct nvme_fc_ctrl *ctrl = data; 2197 2198 __nvme_fc_init_hctx(hctx, ctrl, hctx_idx); 2199 2200 return 0; 2201 } 2202 2203 static void 2204 nvme_fc_init_queue(struct nvme_fc_ctrl *ctrl, int idx) 2205 { 2206 struct nvme_fc_queue *queue; 2207 2208 queue = &ctrl->queues[idx]; 2209 memset(queue, 0, sizeof(*queue)); 2210 queue->ctrl = ctrl; 2211 queue->qnum = idx; 2212 atomic_set(&queue->csn, 0); 2213 queue->dev = ctrl->dev; 2214 2215 if (idx > 0) 2216 queue->cmnd_capsule_len = ctrl->ctrl.ioccsz * 16; 2217 else 2218 queue->cmnd_capsule_len = sizeof(struct nvme_command); 2219 2220 /* 2221 * Considered whether we should allocate buffers for all SQEs 2222 * and CQEs and dma map them - mapping their respective entries 2223 * into the request structures (kernel vm addr and dma address) 2224 * thus the driver could use the buffers/mappings directly. 2225 * It only makes sense if the LLDD would use them for its 2226 * messaging api. It's very unlikely most adapter api's would use 2227 * a native NVME sqe/cqe. More reasonable if FC-NVME IU payload 2228 * structures were used instead. 2229 */ 2230 } 2231 2232 /* 2233 * This routine terminates a queue at the transport level. 2234 * The transport has already ensured that all outstanding ios on 2235 * the queue have been terminated. 2236 * The transport will send a Disconnect LS request to terminate 2237 * the queue's connection. Termination of the admin queue will also 2238 * terminate the association at the target. 2239 */ 2240 static void 2241 nvme_fc_free_queue(struct nvme_fc_queue *queue) 2242 { 2243 if (!test_and_clear_bit(NVME_FC_Q_CONNECTED, &queue->flags)) 2244 return; 2245 2246 clear_bit(NVME_FC_Q_LIVE, &queue->flags); 2247 /* 2248 * Current implementation never disconnects a single queue. 2249 * It always terminates a whole association. So there is never 2250 * a disconnect(queue) LS sent to the target. 2251 */ 2252 2253 queue->connection_id = 0; 2254 atomic_set(&queue->csn, 0); 2255 } 2256 2257 static void 2258 __nvme_fc_delete_hw_queue(struct nvme_fc_ctrl *ctrl, 2259 struct nvme_fc_queue *queue, unsigned int qidx) 2260 { 2261 if (ctrl->lport->ops->delete_queue) 2262 ctrl->lport->ops->delete_queue(&ctrl->lport->localport, qidx, 2263 queue->lldd_handle); 2264 queue->lldd_handle = NULL; 2265 } 2266 2267 static void 2268 nvme_fc_free_io_queues(struct nvme_fc_ctrl *ctrl) 2269 { 2270 int i; 2271 2272 for (i = 1; i < ctrl->ctrl.queue_count; i++) 2273 nvme_fc_free_queue(&ctrl->queues[i]); 2274 } 2275 2276 static int 2277 __nvme_fc_create_hw_queue(struct nvme_fc_ctrl *ctrl, 2278 struct nvme_fc_queue *queue, unsigned int qidx, u16 qsize) 2279 { 2280 int ret = 0; 2281 2282 queue->lldd_handle = NULL; 2283 if (ctrl->lport->ops->create_queue) 2284 ret = ctrl->lport->ops->create_queue(&ctrl->lport->localport, 2285 qidx, qsize, &queue->lldd_handle); 2286 2287 return ret; 2288 } 2289 2290 static void 2291 nvme_fc_delete_hw_io_queues(struct nvme_fc_ctrl *ctrl) 2292 { 2293 struct nvme_fc_queue *queue = &ctrl->queues[ctrl->ctrl.queue_count - 1]; 2294 int i; 2295 2296 for (i = ctrl->ctrl.queue_count - 1; i >= 1; i--, queue--) 2297 __nvme_fc_delete_hw_queue(ctrl, queue, i); 2298 } 2299 2300 static int 2301 nvme_fc_create_hw_io_queues(struct nvme_fc_ctrl *ctrl, u16 qsize) 2302 { 2303 struct nvme_fc_queue *queue = &ctrl->queues[1]; 2304 int i, ret; 2305 2306 for (i = 1; i < ctrl->ctrl.queue_count; i++, queue++) { 2307 ret = __nvme_fc_create_hw_queue(ctrl, queue, i, qsize); 2308 if (ret) 2309 goto delete_queues; 2310 } 2311 2312 return 0; 2313 2314 delete_queues: 2315 for (; i >= 0; i--) 2316 __nvme_fc_delete_hw_queue(ctrl, &ctrl->queues[i], i); 2317 return ret; 2318 } 2319 2320 static int 2321 nvme_fc_connect_io_queues(struct nvme_fc_ctrl *ctrl, u16 qsize) 2322 { 2323 int i, ret = 0; 2324 2325 for (i = 1; i < ctrl->ctrl.queue_count; i++) { 2326 ret = nvme_fc_connect_queue(ctrl, &ctrl->queues[i], qsize, 2327 (qsize / 5)); 2328 if (ret) 2329 break; 2330 ret = nvmf_connect_io_queue(&ctrl->ctrl, i, false); 2331 if (ret) 2332 break; 2333 2334 set_bit(NVME_FC_Q_LIVE, &ctrl->queues[i].flags); 2335 } 2336 2337 return ret; 2338 } 2339 2340 static void 2341 nvme_fc_init_io_queues(struct nvme_fc_ctrl *ctrl) 2342 { 2343 int i; 2344 2345 for (i = 1; i < ctrl->ctrl.queue_count; i++) 2346 nvme_fc_init_queue(ctrl, i); 2347 } 2348 2349 static void 2350 nvme_fc_ctrl_free(struct kref *ref) 2351 { 2352 struct nvme_fc_ctrl *ctrl = 2353 container_of(ref, struct nvme_fc_ctrl, ref); 2354 unsigned long flags; 2355 2356 if (ctrl->ctrl.tagset) { 2357 blk_cleanup_queue(ctrl->ctrl.connect_q); 2358 blk_mq_free_tag_set(&ctrl->tag_set); 2359 } 2360 2361 /* remove from rport list */ 2362 spin_lock_irqsave(&ctrl->rport->lock, flags); 2363 list_del(&ctrl->ctrl_list); 2364 spin_unlock_irqrestore(&ctrl->rport->lock, flags); 2365 2366 blk_mq_unquiesce_queue(ctrl->ctrl.admin_q); 2367 blk_cleanup_queue(ctrl->ctrl.admin_q); 2368 blk_cleanup_queue(ctrl->ctrl.fabrics_q); 2369 blk_mq_free_tag_set(&ctrl->admin_tag_set); 2370 2371 kfree(ctrl->queues); 2372 2373 put_device(ctrl->dev); 2374 nvme_fc_rport_put(ctrl->rport); 2375 2376 ida_simple_remove(&nvme_fc_ctrl_cnt, ctrl->cnum); 2377 if (ctrl->ctrl.opts) 2378 nvmf_free_options(ctrl->ctrl.opts); 2379 kfree(ctrl); 2380 } 2381 2382 static void 2383 nvme_fc_ctrl_put(struct nvme_fc_ctrl *ctrl) 2384 { 2385 kref_put(&ctrl->ref, nvme_fc_ctrl_free); 2386 } 2387 2388 static int 2389 nvme_fc_ctrl_get(struct nvme_fc_ctrl *ctrl) 2390 { 2391 return kref_get_unless_zero(&ctrl->ref); 2392 } 2393 2394 /* 2395 * All accesses from nvme core layer done - can now free the 2396 * controller. Called after last nvme_put_ctrl() call 2397 */ 2398 static void 2399 nvme_fc_nvme_ctrl_freed(struct nvme_ctrl *nctrl) 2400 { 2401 struct nvme_fc_ctrl *ctrl = to_fc_ctrl(nctrl); 2402 2403 WARN_ON(nctrl != &ctrl->ctrl); 2404 2405 nvme_fc_ctrl_put(ctrl); 2406 } 2407 2408 static void 2409 nvme_fc_error_recovery(struct nvme_fc_ctrl *ctrl, char *errmsg) 2410 { 2411 int active; 2412 2413 /* 2414 * if an error (io timeout, etc) while (re)connecting, 2415 * it's an error on creating the new association. 2416 * Start the error recovery thread if it hasn't already 2417 * been started. It is expected there could be multiple 2418 * ios hitting this path before things are cleaned up. 2419 */ 2420 if (ctrl->ctrl.state == NVME_CTRL_CONNECTING) { 2421 active = atomic_xchg(&ctrl->err_work_active, 1); 2422 if (!active && !queue_work(nvme_fc_wq, &ctrl->err_work)) { 2423 atomic_set(&ctrl->err_work_active, 0); 2424 WARN_ON(1); 2425 } 2426 return; 2427 } 2428 2429 /* Otherwise, only proceed if in LIVE state - e.g. on first error */ 2430 if (ctrl->ctrl.state != NVME_CTRL_LIVE) 2431 return; 2432 2433 dev_warn(ctrl->ctrl.device, 2434 "NVME-FC{%d}: transport association error detected: %s\n", 2435 ctrl->cnum, errmsg); 2436 dev_warn(ctrl->ctrl.device, 2437 "NVME-FC{%d}: resetting controller\n", ctrl->cnum); 2438 2439 nvme_reset_ctrl(&ctrl->ctrl); 2440 } 2441 2442 static enum blk_eh_timer_return 2443 nvme_fc_timeout(struct request *rq, bool reserved) 2444 { 2445 struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(rq); 2446 struct nvme_fc_ctrl *ctrl = op->ctrl; 2447 2448 /* 2449 * we can't individually ABTS an io without affecting the queue, 2450 * thus killing the queue, and thus the association. 2451 * So resolve by performing a controller reset, which will stop 2452 * the host/io stack, terminate the association on the link, 2453 * and recreate an association on the link. 2454 */ 2455 nvme_fc_error_recovery(ctrl, "io timeout error"); 2456 2457 /* 2458 * the io abort has been initiated. Have the reset timer 2459 * restarted and the abort completion will complete the io 2460 * shortly. Avoids a synchronous wait while the abort finishes. 2461 */ 2462 return BLK_EH_RESET_TIMER; 2463 } 2464 2465 static int 2466 nvme_fc_map_data(struct nvme_fc_ctrl *ctrl, struct request *rq, 2467 struct nvme_fc_fcp_op *op) 2468 { 2469 struct nvmefc_fcp_req *freq = &op->fcp_req; 2470 int ret; 2471 2472 freq->sg_cnt = 0; 2473 2474 if (!blk_rq_nr_phys_segments(rq)) 2475 return 0; 2476 2477 freq->sg_table.sgl = freq->first_sgl; 2478 ret = sg_alloc_table_chained(&freq->sg_table, 2479 blk_rq_nr_phys_segments(rq), freq->sg_table.sgl, 2480 NVME_INLINE_SG_CNT); 2481 if (ret) 2482 return -ENOMEM; 2483 2484 op->nents = blk_rq_map_sg(rq->q, rq, freq->sg_table.sgl); 2485 WARN_ON(op->nents > blk_rq_nr_phys_segments(rq)); 2486 freq->sg_cnt = fc_dma_map_sg(ctrl->lport->dev, freq->sg_table.sgl, 2487 op->nents, rq_dma_dir(rq)); 2488 if (unlikely(freq->sg_cnt <= 0)) { 2489 sg_free_table_chained(&freq->sg_table, NVME_INLINE_SG_CNT); 2490 freq->sg_cnt = 0; 2491 return -EFAULT; 2492 } 2493 2494 /* 2495 * TODO: blk_integrity_rq(rq) for DIF 2496 */ 2497 return 0; 2498 } 2499 2500 static void 2501 nvme_fc_unmap_data(struct nvme_fc_ctrl *ctrl, struct request *rq, 2502 struct nvme_fc_fcp_op *op) 2503 { 2504 struct nvmefc_fcp_req *freq = &op->fcp_req; 2505 2506 if (!freq->sg_cnt) 2507 return; 2508 2509 fc_dma_unmap_sg(ctrl->lport->dev, freq->sg_table.sgl, op->nents, 2510 rq_dma_dir(rq)); 2511 2512 sg_free_table_chained(&freq->sg_table, NVME_INLINE_SG_CNT); 2513 2514 freq->sg_cnt = 0; 2515 } 2516 2517 /* 2518 * In FC, the queue is a logical thing. At transport connect, the target 2519 * creates its "queue" and returns a handle that is to be given to the 2520 * target whenever it posts something to the corresponding SQ. When an 2521 * SQE is sent on a SQ, FC effectively considers the SQE, or rather the 2522 * command contained within the SQE, an io, and assigns a FC exchange 2523 * to it. The SQE and the associated SQ handle are sent in the initial 2524 * CMD IU sents on the exchange. All transfers relative to the io occur 2525 * as part of the exchange. The CQE is the last thing for the io, 2526 * which is transferred (explicitly or implicitly) with the RSP IU 2527 * sent on the exchange. After the CQE is received, the FC exchange is 2528 * terminaed and the Exchange may be used on a different io. 2529 * 2530 * The transport to LLDD api has the transport making a request for a 2531 * new fcp io request to the LLDD. The LLDD then allocates a FC exchange 2532 * resource and transfers the command. The LLDD will then process all 2533 * steps to complete the io. Upon completion, the transport done routine 2534 * is called. 2535 * 2536 * So - while the operation is outstanding to the LLDD, there is a link 2537 * level FC exchange resource that is also outstanding. This must be 2538 * considered in all cleanup operations. 2539 */ 2540 static blk_status_t 2541 nvme_fc_start_fcp_op(struct nvme_fc_ctrl *ctrl, struct nvme_fc_queue *queue, 2542 struct nvme_fc_fcp_op *op, u32 data_len, 2543 enum nvmefc_fcp_datadir io_dir) 2544 { 2545 struct nvme_fc_cmd_iu *cmdiu = &op->cmd_iu; 2546 struct nvme_command *sqe = &cmdiu->sqe; 2547 int ret, opstate; 2548 2549 /* 2550 * before attempting to send the io, check to see if we believe 2551 * the target device is present 2552 */ 2553 if (ctrl->rport->remoteport.port_state != FC_OBJSTATE_ONLINE) 2554 return BLK_STS_RESOURCE; 2555 2556 if (!nvme_fc_ctrl_get(ctrl)) 2557 return BLK_STS_IOERR; 2558 2559 /* format the FC-NVME CMD IU and fcp_req */ 2560 cmdiu->connection_id = cpu_to_be64(queue->connection_id); 2561 cmdiu->data_len = cpu_to_be32(data_len); 2562 switch (io_dir) { 2563 case NVMEFC_FCP_WRITE: 2564 cmdiu->flags = FCNVME_CMD_FLAGS_WRITE; 2565 break; 2566 case NVMEFC_FCP_READ: 2567 cmdiu->flags = FCNVME_CMD_FLAGS_READ; 2568 break; 2569 case NVMEFC_FCP_NODATA: 2570 cmdiu->flags = 0; 2571 break; 2572 } 2573 op->fcp_req.payload_length = data_len; 2574 op->fcp_req.io_dir = io_dir; 2575 op->fcp_req.transferred_length = 0; 2576 op->fcp_req.rcv_rsplen = 0; 2577 op->fcp_req.status = NVME_SC_SUCCESS; 2578 op->fcp_req.sqid = cpu_to_le16(queue->qnum); 2579 2580 /* 2581 * validate per fabric rules, set fields mandated by fabric spec 2582 * as well as those by FC-NVME spec. 2583 */ 2584 WARN_ON_ONCE(sqe->common.metadata); 2585 sqe->common.flags |= NVME_CMD_SGL_METABUF; 2586 2587 /* 2588 * format SQE DPTR field per FC-NVME rules: 2589 * type=0x5 Transport SGL Data Block Descriptor 2590 * subtype=0xA Transport-specific value 2591 * address=0 2592 * length=length of the data series 2593 */ 2594 sqe->rw.dptr.sgl.type = (NVME_TRANSPORT_SGL_DATA_DESC << 4) | 2595 NVME_SGL_FMT_TRANSPORT_A; 2596 sqe->rw.dptr.sgl.length = cpu_to_le32(data_len); 2597 sqe->rw.dptr.sgl.addr = 0; 2598 2599 if (!(op->flags & FCOP_FLAGS_AEN)) { 2600 ret = nvme_fc_map_data(ctrl, op->rq, op); 2601 if (ret < 0) { 2602 nvme_cleanup_cmd(op->rq); 2603 nvme_fc_ctrl_put(ctrl); 2604 if (ret == -ENOMEM || ret == -EAGAIN) 2605 return BLK_STS_RESOURCE; 2606 return BLK_STS_IOERR; 2607 } 2608 } 2609 2610 fc_dma_sync_single_for_device(ctrl->lport->dev, op->fcp_req.cmddma, 2611 sizeof(op->cmd_iu), DMA_TO_DEVICE); 2612 2613 atomic_set(&op->state, FCPOP_STATE_ACTIVE); 2614 2615 if (!(op->flags & FCOP_FLAGS_AEN)) 2616 blk_mq_start_request(op->rq); 2617 2618 cmdiu->csn = cpu_to_be32(atomic_inc_return(&queue->csn)); 2619 ret = ctrl->lport->ops->fcp_io(&ctrl->lport->localport, 2620 &ctrl->rport->remoteport, 2621 queue->lldd_handle, &op->fcp_req); 2622 2623 if (ret) { 2624 /* 2625 * If the lld fails to send the command is there an issue with 2626 * the csn value? If the command that fails is the Connect, 2627 * no - as the connection won't be live. If it is a command 2628 * post-connect, it's possible a gap in csn may be created. 2629 * Does this matter? As Linux initiators don't send fused 2630 * commands, no. The gap would exist, but as there's nothing 2631 * that depends on csn order to be delivered on the target 2632 * side, it shouldn't hurt. It would be difficult for a 2633 * target to even detect the csn gap as it has no idea when the 2634 * cmd with the csn was supposed to arrive. 2635 */ 2636 opstate = atomic_xchg(&op->state, FCPOP_STATE_COMPLETE); 2637 __nvme_fc_fcpop_chk_teardowns(ctrl, op, opstate); 2638 2639 if (!(op->flags & FCOP_FLAGS_AEN)) { 2640 nvme_fc_unmap_data(ctrl, op->rq, op); 2641 nvme_cleanup_cmd(op->rq); 2642 } 2643 2644 nvme_fc_ctrl_put(ctrl); 2645 2646 if (ctrl->rport->remoteport.port_state == FC_OBJSTATE_ONLINE && 2647 ret != -EBUSY) 2648 return BLK_STS_IOERR; 2649 2650 return BLK_STS_RESOURCE; 2651 } 2652 2653 return BLK_STS_OK; 2654 } 2655 2656 static blk_status_t 2657 nvme_fc_queue_rq(struct blk_mq_hw_ctx *hctx, 2658 const struct blk_mq_queue_data *bd) 2659 { 2660 struct nvme_ns *ns = hctx->queue->queuedata; 2661 struct nvme_fc_queue *queue = hctx->driver_data; 2662 struct nvme_fc_ctrl *ctrl = queue->ctrl; 2663 struct request *rq = bd->rq; 2664 struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(rq); 2665 struct nvme_fc_cmd_iu *cmdiu = &op->cmd_iu; 2666 struct nvme_command *sqe = &cmdiu->sqe; 2667 enum nvmefc_fcp_datadir io_dir; 2668 bool queue_ready = test_bit(NVME_FC_Q_LIVE, &queue->flags); 2669 u32 data_len; 2670 blk_status_t ret; 2671 2672 if (ctrl->rport->remoteport.port_state != FC_OBJSTATE_ONLINE || 2673 !nvmf_check_ready(&queue->ctrl->ctrl, rq, queue_ready)) 2674 return nvmf_fail_nonready_command(&queue->ctrl->ctrl, rq); 2675 2676 ret = nvme_setup_cmd(ns, rq, sqe); 2677 if (ret) 2678 return ret; 2679 2680 /* 2681 * nvme core doesn't quite treat the rq opaquely. Commands such 2682 * as WRITE ZEROES will return a non-zero rq payload_bytes yet 2683 * there is no actual payload to be transferred. 2684 * To get it right, key data transmission on there being 1 or 2685 * more physical segments in the sg list. If there is no 2686 * physical segments, there is no payload. 2687 */ 2688 if (blk_rq_nr_phys_segments(rq)) { 2689 data_len = blk_rq_payload_bytes(rq); 2690 io_dir = ((rq_data_dir(rq) == WRITE) ? 2691 NVMEFC_FCP_WRITE : NVMEFC_FCP_READ); 2692 } else { 2693 data_len = 0; 2694 io_dir = NVMEFC_FCP_NODATA; 2695 } 2696 2697 2698 return nvme_fc_start_fcp_op(ctrl, queue, op, data_len, io_dir); 2699 } 2700 2701 static void 2702 nvme_fc_submit_async_event(struct nvme_ctrl *arg) 2703 { 2704 struct nvme_fc_ctrl *ctrl = to_fc_ctrl(arg); 2705 struct nvme_fc_fcp_op *aen_op; 2706 blk_status_t ret; 2707 2708 if (test_bit(FCCTRL_TERMIO, &ctrl->flags)) 2709 return; 2710 2711 aen_op = &ctrl->aen_ops[0]; 2712 2713 ret = nvme_fc_start_fcp_op(ctrl, aen_op->queue, aen_op, 0, 2714 NVMEFC_FCP_NODATA); 2715 if (ret) 2716 dev_err(ctrl->ctrl.device, 2717 "failed async event work\n"); 2718 } 2719 2720 static void 2721 nvme_fc_complete_rq(struct request *rq) 2722 { 2723 struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(rq); 2724 struct nvme_fc_ctrl *ctrl = op->ctrl; 2725 2726 atomic_set(&op->state, FCPOP_STATE_IDLE); 2727 2728 nvme_fc_unmap_data(ctrl, rq, op); 2729 nvme_complete_rq(rq); 2730 nvme_fc_ctrl_put(ctrl); 2731 } 2732 2733 /* 2734 * This routine is used by the transport when it needs to find active 2735 * io on a queue that is to be terminated. The transport uses 2736 * blk_mq_tagset_busy_itr() to find the busy requests, which then invoke 2737 * this routine to kill them on a 1 by 1 basis. 2738 * 2739 * As FC allocates FC exchange for each io, the transport must contact 2740 * the LLDD to terminate the exchange, thus releasing the FC exchange. 2741 * After terminating the exchange the LLDD will call the transport's 2742 * normal io done path for the request, but it will have an aborted 2743 * status. The done path will return the io request back to the block 2744 * layer with an error status. 2745 */ 2746 static bool 2747 nvme_fc_terminate_exchange(struct request *req, void *data, bool reserved) 2748 { 2749 struct nvme_ctrl *nctrl = data; 2750 struct nvme_fc_ctrl *ctrl = to_fc_ctrl(nctrl); 2751 struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(req); 2752 2753 __nvme_fc_abort_op(ctrl, op); 2754 return true; 2755 } 2756 2757 2758 static const struct blk_mq_ops nvme_fc_mq_ops = { 2759 .queue_rq = nvme_fc_queue_rq, 2760 .complete = nvme_fc_complete_rq, 2761 .init_request = nvme_fc_init_request, 2762 .exit_request = nvme_fc_exit_request, 2763 .init_hctx = nvme_fc_init_hctx, 2764 .timeout = nvme_fc_timeout, 2765 }; 2766 2767 static int 2768 nvme_fc_create_io_queues(struct nvme_fc_ctrl *ctrl) 2769 { 2770 struct nvmf_ctrl_options *opts = ctrl->ctrl.opts; 2771 unsigned int nr_io_queues; 2772 int ret; 2773 2774 nr_io_queues = min(min(opts->nr_io_queues, num_online_cpus()), 2775 ctrl->lport->ops->max_hw_queues); 2776 ret = nvme_set_queue_count(&ctrl->ctrl, &nr_io_queues); 2777 if (ret) { 2778 dev_info(ctrl->ctrl.device, 2779 "set_queue_count failed: %d\n", ret); 2780 return ret; 2781 } 2782 2783 ctrl->ctrl.queue_count = nr_io_queues + 1; 2784 if (!nr_io_queues) 2785 return 0; 2786 2787 nvme_fc_init_io_queues(ctrl); 2788 2789 memset(&ctrl->tag_set, 0, sizeof(ctrl->tag_set)); 2790 ctrl->tag_set.ops = &nvme_fc_mq_ops; 2791 ctrl->tag_set.queue_depth = ctrl->ctrl.opts->queue_size; 2792 ctrl->tag_set.reserved_tags = 1; /* fabric connect */ 2793 ctrl->tag_set.numa_node = ctrl->ctrl.numa_node; 2794 ctrl->tag_set.flags = BLK_MQ_F_SHOULD_MERGE; 2795 ctrl->tag_set.cmd_size = 2796 struct_size((struct nvme_fcp_op_w_sgl *)NULL, priv, 2797 ctrl->lport->ops->fcprqst_priv_sz); 2798 ctrl->tag_set.driver_data = ctrl; 2799 ctrl->tag_set.nr_hw_queues = ctrl->ctrl.queue_count - 1; 2800 ctrl->tag_set.timeout = NVME_IO_TIMEOUT; 2801 2802 ret = blk_mq_alloc_tag_set(&ctrl->tag_set); 2803 if (ret) 2804 return ret; 2805 2806 ctrl->ctrl.tagset = &ctrl->tag_set; 2807 2808 ctrl->ctrl.connect_q = blk_mq_init_queue(&ctrl->tag_set); 2809 if (IS_ERR(ctrl->ctrl.connect_q)) { 2810 ret = PTR_ERR(ctrl->ctrl.connect_q); 2811 goto out_free_tag_set; 2812 } 2813 2814 ret = nvme_fc_create_hw_io_queues(ctrl, ctrl->ctrl.sqsize + 1); 2815 if (ret) 2816 goto out_cleanup_blk_queue; 2817 2818 ret = nvme_fc_connect_io_queues(ctrl, ctrl->ctrl.sqsize + 1); 2819 if (ret) 2820 goto out_delete_hw_queues; 2821 2822 ctrl->ioq_live = true; 2823 2824 return 0; 2825 2826 out_delete_hw_queues: 2827 nvme_fc_delete_hw_io_queues(ctrl); 2828 out_cleanup_blk_queue: 2829 blk_cleanup_queue(ctrl->ctrl.connect_q); 2830 out_free_tag_set: 2831 blk_mq_free_tag_set(&ctrl->tag_set); 2832 nvme_fc_free_io_queues(ctrl); 2833 2834 /* force put free routine to ignore io queues */ 2835 ctrl->ctrl.tagset = NULL; 2836 2837 return ret; 2838 } 2839 2840 static int 2841 nvme_fc_recreate_io_queues(struct nvme_fc_ctrl *ctrl) 2842 { 2843 struct nvmf_ctrl_options *opts = ctrl->ctrl.opts; 2844 u32 prior_ioq_cnt = ctrl->ctrl.queue_count - 1; 2845 unsigned int nr_io_queues; 2846 int ret; 2847 2848 nr_io_queues = min(min(opts->nr_io_queues, num_online_cpus()), 2849 ctrl->lport->ops->max_hw_queues); 2850 ret = nvme_set_queue_count(&ctrl->ctrl, &nr_io_queues); 2851 if (ret) { 2852 dev_info(ctrl->ctrl.device, 2853 "set_queue_count failed: %d\n", ret); 2854 return ret; 2855 } 2856 2857 if (!nr_io_queues && prior_ioq_cnt) { 2858 dev_info(ctrl->ctrl.device, 2859 "Fail Reconnect: At least 1 io queue " 2860 "required (was %d)\n", prior_ioq_cnt); 2861 return -ENOSPC; 2862 } 2863 2864 ctrl->ctrl.queue_count = nr_io_queues + 1; 2865 /* check for io queues existing */ 2866 if (ctrl->ctrl.queue_count == 1) 2867 return 0; 2868 2869 ret = nvme_fc_create_hw_io_queues(ctrl, ctrl->ctrl.sqsize + 1); 2870 if (ret) 2871 goto out_free_io_queues; 2872 2873 ret = nvme_fc_connect_io_queues(ctrl, ctrl->ctrl.sqsize + 1); 2874 if (ret) 2875 goto out_delete_hw_queues; 2876 2877 if (prior_ioq_cnt != nr_io_queues) 2878 dev_info(ctrl->ctrl.device, 2879 "reconnect: revising io queue count from %d to %d\n", 2880 prior_ioq_cnt, nr_io_queues); 2881 blk_mq_update_nr_hw_queues(&ctrl->tag_set, nr_io_queues); 2882 2883 return 0; 2884 2885 out_delete_hw_queues: 2886 nvme_fc_delete_hw_io_queues(ctrl); 2887 out_free_io_queues: 2888 nvme_fc_free_io_queues(ctrl); 2889 return ret; 2890 } 2891 2892 static void 2893 nvme_fc_rport_active_on_lport(struct nvme_fc_rport *rport) 2894 { 2895 struct nvme_fc_lport *lport = rport->lport; 2896 2897 atomic_inc(&lport->act_rport_cnt); 2898 } 2899 2900 static void 2901 nvme_fc_rport_inactive_on_lport(struct nvme_fc_rport *rport) 2902 { 2903 struct nvme_fc_lport *lport = rport->lport; 2904 u32 cnt; 2905 2906 cnt = atomic_dec_return(&lport->act_rport_cnt); 2907 if (cnt == 0 && lport->localport.port_state == FC_OBJSTATE_DELETED) 2908 lport->ops->localport_delete(&lport->localport); 2909 } 2910 2911 static int 2912 nvme_fc_ctlr_active_on_rport(struct nvme_fc_ctrl *ctrl) 2913 { 2914 struct nvme_fc_rport *rport = ctrl->rport; 2915 u32 cnt; 2916 2917 if (test_and_set_bit(ASSOC_ACTIVE, &ctrl->flags)) 2918 return 1; 2919 2920 cnt = atomic_inc_return(&rport->act_ctrl_cnt); 2921 if (cnt == 1) 2922 nvme_fc_rport_active_on_lport(rport); 2923 2924 return 0; 2925 } 2926 2927 static int 2928 nvme_fc_ctlr_inactive_on_rport(struct nvme_fc_ctrl *ctrl) 2929 { 2930 struct nvme_fc_rport *rport = ctrl->rport; 2931 struct nvme_fc_lport *lport = rport->lport; 2932 u32 cnt; 2933 2934 /* clearing of ctrl->flags ASSOC_ACTIVE bit is in association delete */ 2935 2936 cnt = atomic_dec_return(&rport->act_ctrl_cnt); 2937 if (cnt == 0) { 2938 if (rport->remoteport.port_state == FC_OBJSTATE_DELETED) 2939 lport->ops->remoteport_delete(&rport->remoteport); 2940 nvme_fc_rport_inactive_on_lport(rport); 2941 } 2942 2943 return 0; 2944 } 2945 2946 /* 2947 * This routine restarts the controller on the host side, and 2948 * on the link side, recreates the controller association. 2949 */ 2950 static int 2951 nvme_fc_create_association(struct nvme_fc_ctrl *ctrl) 2952 { 2953 struct nvmf_ctrl_options *opts = ctrl->ctrl.opts; 2954 struct nvmefc_ls_rcv_op *disls = NULL; 2955 unsigned long flags; 2956 int ret; 2957 bool changed; 2958 2959 ++ctrl->ctrl.nr_reconnects; 2960 2961 if (ctrl->rport->remoteport.port_state != FC_OBJSTATE_ONLINE) 2962 return -ENODEV; 2963 2964 if (nvme_fc_ctlr_active_on_rport(ctrl)) 2965 return -ENOTUNIQ; 2966 2967 dev_info(ctrl->ctrl.device, 2968 "NVME-FC{%d}: create association : host wwpn 0x%016llx " 2969 " rport wwpn 0x%016llx: NQN \"%s\"\n", 2970 ctrl->cnum, ctrl->lport->localport.port_name, 2971 ctrl->rport->remoteport.port_name, ctrl->ctrl.opts->subsysnqn); 2972 2973 /* 2974 * Create the admin queue 2975 */ 2976 2977 ret = __nvme_fc_create_hw_queue(ctrl, &ctrl->queues[0], 0, 2978 NVME_AQ_DEPTH); 2979 if (ret) 2980 goto out_free_queue; 2981 2982 ret = nvme_fc_connect_admin_queue(ctrl, &ctrl->queues[0], 2983 NVME_AQ_DEPTH, (NVME_AQ_DEPTH / 4)); 2984 if (ret) 2985 goto out_delete_hw_queue; 2986 2987 ret = nvmf_connect_admin_queue(&ctrl->ctrl); 2988 if (ret) 2989 goto out_disconnect_admin_queue; 2990 2991 set_bit(NVME_FC_Q_LIVE, &ctrl->queues[0].flags); 2992 2993 /* 2994 * Check controller capabilities 2995 * 2996 * todo:- add code to check if ctrl attributes changed from 2997 * prior connection values 2998 */ 2999 3000 ret = nvme_enable_ctrl(&ctrl->ctrl); 3001 if (ret) 3002 goto out_disconnect_admin_queue; 3003 3004 ctrl->ctrl.max_hw_sectors = 3005 (ctrl->lport->ops->max_sgl_segments - 1) << (PAGE_SHIFT - 9); 3006 3007 blk_mq_unquiesce_queue(ctrl->ctrl.admin_q); 3008 3009 ret = nvme_init_identify(&ctrl->ctrl); 3010 if (ret) 3011 goto out_disconnect_admin_queue; 3012 3013 /* sanity checks */ 3014 3015 /* FC-NVME does not have other data in the capsule */ 3016 if (ctrl->ctrl.icdoff) { 3017 dev_err(ctrl->ctrl.device, "icdoff %d is not supported!\n", 3018 ctrl->ctrl.icdoff); 3019 goto out_disconnect_admin_queue; 3020 } 3021 3022 /* FC-NVME supports normal SGL Data Block Descriptors */ 3023 3024 if (opts->queue_size > ctrl->ctrl.maxcmd) { 3025 /* warn if maxcmd is lower than queue_size */ 3026 dev_warn(ctrl->ctrl.device, 3027 "queue_size %zu > ctrl maxcmd %u, reducing " 3028 "to maxcmd\n", 3029 opts->queue_size, ctrl->ctrl.maxcmd); 3030 opts->queue_size = ctrl->ctrl.maxcmd; 3031 } 3032 3033 if (opts->queue_size > ctrl->ctrl.sqsize + 1) { 3034 /* warn if sqsize is lower than queue_size */ 3035 dev_warn(ctrl->ctrl.device, 3036 "queue_size %zu > ctrl sqsize %u, reducing " 3037 "to sqsize\n", 3038 opts->queue_size, ctrl->ctrl.sqsize + 1); 3039 opts->queue_size = ctrl->ctrl.sqsize + 1; 3040 } 3041 3042 ret = nvme_fc_init_aen_ops(ctrl); 3043 if (ret) 3044 goto out_term_aen_ops; 3045 3046 /* 3047 * Create the io queues 3048 */ 3049 3050 if (ctrl->ctrl.queue_count > 1) { 3051 if (!ctrl->ioq_live) 3052 ret = nvme_fc_create_io_queues(ctrl); 3053 else 3054 ret = nvme_fc_recreate_io_queues(ctrl); 3055 if (ret) 3056 goto out_term_aen_ops; 3057 } 3058 3059 changed = nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_LIVE); 3060 3061 ctrl->ctrl.nr_reconnects = 0; 3062 3063 if (changed) 3064 nvme_start_ctrl(&ctrl->ctrl); 3065 3066 return 0; /* Success */ 3067 3068 out_term_aen_ops: 3069 nvme_fc_term_aen_ops(ctrl); 3070 out_disconnect_admin_queue: 3071 /* send a Disconnect(association) LS to fc-nvme target */ 3072 nvme_fc_xmt_disconnect_assoc(ctrl); 3073 spin_lock_irqsave(&ctrl->lock, flags); 3074 ctrl->association_id = 0; 3075 disls = ctrl->rcv_disconn; 3076 ctrl->rcv_disconn = NULL; 3077 spin_unlock_irqrestore(&ctrl->lock, flags); 3078 if (disls) 3079 nvme_fc_xmt_ls_rsp(disls); 3080 out_delete_hw_queue: 3081 __nvme_fc_delete_hw_queue(ctrl, &ctrl->queues[0], 0); 3082 out_free_queue: 3083 nvme_fc_free_queue(&ctrl->queues[0]); 3084 clear_bit(ASSOC_ACTIVE, &ctrl->flags); 3085 nvme_fc_ctlr_inactive_on_rport(ctrl); 3086 3087 return ret; 3088 } 3089 3090 /* 3091 * This routine stops operation of the controller on the host side. 3092 * On the host os stack side: Admin and IO queues are stopped, 3093 * outstanding ios on them terminated via FC ABTS. 3094 * On the link side: the association is terminated. 3095 */ 3096 static void 3097 nvme_fc_delete_association(struct nvme_fc_ctrl *ctrl) 3098 { 3099 struct nvmefc_ls_rcv_op *disls = NULL; 3100 unsigned long flags; 3101 3102 if (!test_and_clear_bit(ASSOC_ACTIVE, &ctrl->flags)) 3103 return; 3104 3105 spin_lock_irqsave(&ctrl->lock, flags); 3106 set_bit(FCCTRL_TERMIO, &ctrl->flags); 3107 ctrl->iocnt = 0; 3108 spin_unlock_irqrestore(&ctrl->lock, flags); 3109 3110 /* 3111 * If io queues are present, stop them and terminate all outstanding 3112 * ios on them. As FC allocates FC exchange for each io, the 3113 * transport must contact the LLDD to terminate the exchange, 3114 * thus releasing the FC exchange. We use blk_mq_tagset_busy_itr() 3115 * to tell us what io's are busy and invoke a transport routine 3116 * to kill them with the LLDD. After terminating the exchange 3117 * the LLDD will call the transport's normal io done path, but it 3118 * will have an aborted status. The done path will return the 3119 * io requests back to the block layer as part of normal completions 3120 * (but with error status). 3121 */ 3122 if (ctrl->ctrl.queue_count > 1) { 3123 nvme_stop_queues(&ctrl->ctrl); 3124 blk_mq_tagset_busy_iter(&ctrl->tag_set, 3125 nvme_fc_terminate_exchange, &ctrl->ctrl); 3126 blk_mq_tagset_wait_completed_request(&ctrl->tag_set); 3127 } 3128 3129 /* 3130 * Other transports, which don't have link-level contexts bound 3131 * to sqe's, would try to gracefully shutdown the controller by 3132 * writing the registers for shutdown and polling (call 3133 * nvme_shutdown_ctrl()). Given a bunch of i/o was potentially 3134 * just aborted and we will wait on those contexts, and given 3135 * there was no indication of how live the controlelr is on the 3136 * link, don't send more io to create more contexts for the 3137 * shutdown. Let the controller fail via keepalive failure if 3138 * its still present. 3139 */ 3140 3141 /* 3142 * clean up the admin queue. Same thing as above. 3143 * use blk_mq_tagset_busy_itr() and the transport routine to 3144 * terminate the exchanges. 3145 */ 3146 blk_mq_quiesce_queue(ctrl->ctrl.admin_q); 3147 blk_mq_tagset_busy_iter(&ctrl->admin_tag_set, 3148 nvme_fc_terminate_exchange, &ctrl->ctrl); 3149 blk_mq_tagset_wait_completed_request(&ctrl->admin_tag_set); 3150 3151 /* kill the aens as they are a separate path */ 3152 nvme_fc_abort_aen_ops(ctrl); 3153 3154 /* wait for all io that had to be aborted */ 3155 spin_lock_irq(&ctrl->lock); 3156 wait_event_lock_irq(ctrl->ioabort_wait, ctrl->iocnt == 0, ctrl->lock); 3157 clear_bit(FCCTRL_TERMIO, &ctrl->flags); 3158 spin_unlock_irq(&ctrl->lock); 3159 3160 nvme_fc_term_aen_ops(ctrl); 3161 3162 /* 3163 * send a Disconnect(association) LS to fc-nvme target 3164 * Note: could have been sent at top of process, but 3165 * cleaner on link traffic if after the aborts complete. 3166 * Note: if association doesn't exist, association_id will be 0 3167 */ 3168 if (ctrl->association_id) 3169 nvme_fc_xmt_disconnect_assoc(ctrl); 3170 3171 spin_lock_irqsave(&ctrl->lock, flags); 3172 ctrl->association_id = 0; 3173 disls = ctrl->rcv_disconn; 3174 ctrl->rcv_disconn = NULL; 3175 spin_unlock_irqrestore(&ctrl->lock, flags); 3176 if (disls) 3177 /* 3178 * if a Disconnect Request was waiting for a response, send 3179 * now that all ABTS's have been issued (and are complete). 3180 */ 3181 nvme_fc_xmt_ls_rsp(disls); 3182 3183 if (ctrl->ctrl.tagset) { 3184 nvme_fc_delete_hw_io_queues(ctrl); 3185 nvme_fc_free_io_queues(ctrl); 3186 } 3187 3188 __nvme_fc_delete_hw_queue(ctrl, &ctrl->queues[0], 0); 3189 nvme_fc_free_queue(&ctrl->queues[0]); 3190 3191 /* re-enable the admin_q so anything new can fast fail */ 3192 blk_mq_unquiesce_queue(ctrl->ctrl.admin_q); 3193 3194 /* resume the io queues so that things will fast fail */ 3195 nvme_start_queues(&ctrl->ctrl); 3196 3197 nvme_fc_ctlr_inactive_on_rport(ctrl); 3198 } 3199 3200 static void 3201 nvme_fc_delete_ctrl(struct nvme_ctrl *nctrl) 3202 { 3203 struct nvme_fc_ctrl *ctrl = to_fc_ctrl(nctrl); 3204 3205 cancel_work_sync(&ctrl->err_work); 3206 cancel_delayed_work_sync(&ctrl->connect_work); 3207 /* 3208 * kill the association on the link side. this will block 3209 * waiting for io to terminate 3210 */ 3211 nvme_fc_delete_association(ctrl); 3212 } 3213 3214 static void 3215 nvme_fc_reconnect_or_delete(struct nvme_fc_ctrl *ctrl, int status) 3216 { 3217 struct nvme_fc_rport *rport = ctrl->rport; 3218 struct nvme_fc_remote_port *portptr = &rport->remoteport; 3219 unsigned long recon_delay = ctrl->ctrl.opts->reconnect_delay * HZ; 3220 bool recon = true; 3221 3222 if (ctrl->ctrl.state != NVME_CTRL_CONNECTING) 3223 return; 3224 3225 if (portptr->port_state == FC_OBJSTATE_ONLINE) 3226 dev_info(ctrl->ctrl.device, 3227 "NVME-FC{%d}: reset: Reconnect attempt failed (%d)\n", 3228 ctrl->cnum, status); 3229 else if (time_after_eq(jiffies, rport->dev_loss_end)) 3230 recon = false; 3231 3232 if (recon && nvmf_should_reconnect(&ctrl->ctrl)) { 3233 if (portptr->port_state == FC_OBJSTATE_ONLINE) 3234 dev_info(ctrl->ctrl.device, 3235 "NVME-FC{%d}: Reconnect attempt in %ld " 3236 "seconds\n", 3237 ctrl->cnum, recon_delay / HZ); 3238 else if (time_after(jiffies + recon_delay, rport->dev_loss_end)) 3239 recon_delay = rport->dev_loss_end - jiffies; 3240 3241 queue_delayed_work(nvme_wq, &ctrl->connect_work, recon_delay); 3242 } else { 3243 if (portptr->port_state == FC_OBJSTATE_ONLINE) 3244 dev_warn(ctrl->ctrl.device, 3245 "NVME-FC{%d}: Max reconnect attempts (%d) " 3246 "reached.\n", 3247 ctrl->cnum, ctrl->ctrl.nr_reconnects); 3248 else 3249 dev_warn(ctrl->ctrl.device, 3250 "NVME-FC{%d}: dev_loss_tmo (%d) expired " 3251 "while waiting for remoteport connectivity.\n", 3252 ctrl->cnum, min_t(int, portptr->dev_loss_tmo, 3253 (ctrl->ctrl.opts->max_reconnects * 3254 ctrl->ctrl.opts->reconnect_delay))); 3255 WARN_ON(nvme_delete_ctrl(&ctrl->ctrl)); 3256 } 3257 } 3258 3259 static void 3260 __nvme_fc_terminate_io(struct nvme_fc_ctrl *ctrl) 3261 { 3262 /* 3263 * if state is connecting - the error occurred as part of a 3264 * reconnect attempt. The create_association error paths will 3265 * clean up any outstanding io. 3266 * 3267 * if it's a different state - ensure all pending io is 3268 * terminated. Given this can delay while waiting for the 3269 * aborted io to return, we recheck adapter state below 3270 * before changing state. 3271 */ 3272 if (ctrl->ctrl.state != NVME_CTRL_CONNECTING) { 3273 nvme_stop_keep_alive(&ctrl->ctrl); 3274 3275 /* will block will waiting for io to terminate */ 3276 nvme_fc_delete_association(ctrl); 3277 } 3278 3279 if (ctrl->ctrl.state != NVME_CTRL_CONNECTING && 3280 !nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_CONNECTING)) 3281 dev_err(ctrl->ctrl.device, 3282 "NVME-FC{%d}: error_recovery: Couldn't change state " 3283 "to CONNECTING\n", ctrl->cnum); 3284 } 3285 3286 static void 3287 nvme_fc_reset_ctrl_work(struct work_struct *work) 3288 { 3289 struct nvme_fc_ctrl *ctrl = 3290 container_of(work, struct nvme_fc_ctrl, ctrl.reset_work); 3291 int ret; 3292 3293 __nvme_fc_terminate_io(ctrl); 3294 3295 nvme_stop_ctrl(&ctrl->ctrl); 3296 3297 if (ctrl->rport->remoteport.port_state == FC_OBJSTATE_ONLINE) 3298 ret = nvme_fc_create_association(ctrl); 3299 else 3300 ret = -ENOTCONN; 3301 3302 if (ret) 3303 nvme_fc_reconnect_or_delete(ctrl, ret); 3304 else 3305 dev_info(ctrl->ctrl.device, 3306 "NVME-FC{%d}: controller reset complete\n", 3307 ctrl->cnum); 3308 } 3309 3310 static void 3311 nvme_fc_connect_err_work(struct work_struct *work) 3312 { 3313 struct nvme_fc_ctrl *ctrl = 3314 container_of(work, struct nvme_fc_ctrl, err_work); 3315 3316 __nvme_fc_terminate_io(ctrl); 3317 3318 atomic_set(&ctrl->err_work_active, 0); 3319 3320 /* 3321 * Rescheduling the connection after recovering 3322 * from the io error is left to the reconnect work 3323 * item, which is what should have stalled waiting on 3324 * the io that had the error that scheduled this work. 3325 */ 3326 } 3327 3328 static const struct nvme_ctrl_ops nvme_fc_ctrl_ops = { 3329 .name = "fc", 3330 .module = THIS_MODULE, 3331 .flags = NVME_F_FABRICS, 3332 .reg_read32 = nvmf_reg_read32, 3333 .reg_read64 = nvmf_reg_read64, 3334 .reg_write32 = nvmf_reg_write32, 3335 .free_ctrl = nvme_fc_nvme_ctrl_freed, 3336 .submit_async_event = nvme_fc_submit_async_event, 3337 .delete_ctrl = nvme_fc_delete_ctrl, 3338 .get_address = nvmf_get_address, 3339 }; 3340 3341 static void 3342 nvme_fc_connect_ctrl_work(struct work_struct *work) 3343 { 3344 int ret; 3345 3346 struct nvme_fc_ctrl *ctrl = 3347 container_of(to_delayed_work(work), 3348 struct nvme_fc_ctrl, connect_work); 3349 3350 ret = nvme_fc_create_association(ctrl); 3351 if (ret) 3352 nvme_fc_reconnect_or_delete(ctrl, ret); 3353 else 3354 dev_info(ctrl->ctrl.device, 3355 "NVME-FC{%d}: controller connect complete\n", 3356 ctrl->cnum); 3357 } 3358 3359 3360 static const struct blk_mq_ops nvme_fc_admin_mq_ops = { 3361 .queue_rq = nvme_fc_queue_rq, 3362 .complete = nvme_fc_complete_rq, 3363 .init_request = nvme_fc_init_request, 3364 .exit_request = nvme_fc_exit_request, 3365 .init_hctx = nvme_fc_init_admin_hctx, 3366 .timeout = nvme_fc_timeout, 3367 }; 3368 3369 3370 /* 3371 * Fails a controller request if it matches an existing controller 3372 * (association) with the same tuple: 3373 * <Host NQN, Host ID, local FC port, remote FC port, SUBSYS NQN> 3374 * 3375 * The ports don't need to be compared as they are intrinsically 3376 * already matched by the port pointers supplied. 3377 */ 3378 static bool 3379 nvme_fc_existing_controller(struct nvme_fc_rport *rport, 3380 struct nvmf_ctrl_options *opts) 3381 { 3382 struct nvme_fc_ctrl *ctrl; 3383 unsigned long flags; 3384 bool found = false; 3385 3386 spin_lock_irqsave(&rport->lock, flags); 3387 list_for_each_entry(ctrl, &rport->ctrl_list, ctrl_list) { 3388 found = nvmf_ctlr_matches_baseopts(&ctrl->ctrl, opts); 3389 if (found) 3390 break; 3391 } 3392 spin_unlock_irqrestore(&rport->lock, flags); 3393 3394 return found; 3395 } 3396 3397 static struct nvme_ctrl * 3398 nvme_fc_init_ctrl(struct device *dev, struct nvmf_ctrl_options *opts, 3399 struct nvme_fc_lport *lport, struct nvme_fc_rport *rport) 3400 { 3401 struct nvme_fc_ctrl *ctrl; 3402 unsigned long flags; 3403 int ret, idx; 3404 3405 if (!(rport->remoteport.port_role & 3406 (FC_PORT_ROLE_NVME_DISCOVERY | FC_PORT_ROLE_NVME_TARGET))) { 3407 ret = -EBADR; 3408 goto out_fail; 3409 } 3410 3411 if (!opts->duplicate_connect && 3412 nvme_fc_existing_controller(rport, opts)) { 3413 ret = -EALREADY; 3414 goto out_fail; 3415 } 3416 3417 ctrl = kzalloc(sizeof(*ctrl), GFP_KERNEL); 3418 if (!ctrl) { 3419 ret = -ENOMEM; 3420 goto out_fail; 3421 } 3422 3423 idx = ida_simple_get(&nvme_fc_ctrl_cnt, 0, 0, GFP_KERNEL); 3424 if (idx < 0) { 3425 ret = -ENOSPC; 3426 goto out_free_ctrl; 3427 } 3428 3429 ctrl->ctrl.opts = opts; 3430 ctrl->ctrl.nr_reconnects = 0; 3431 if (lport->dev) 3432 ctrl->ctrl.numa_node = dev_to_node(lport->dev); 3433 else 3434 ctrl->ctrl.numa_node = NUMA_NO_NODE; 3435 INIT_LIST_HEAD(&ctrl->ctrl_list); 3436 ctrl->lport = lport; 3437 ctrl->rport = rport; 3438 ctrl->dev = lport->dev; 3439 ctrl->cnum = idx; 3440 ctrl->ioq_live = false; 3441 atomic_set(&ctrl->err_work_active, 0); 3442 init_waitqueue_head(&ctrl->ioabort_wait); 3443 3444 get_device(ctrl->dev); 3445 kref_init(&ctrl->ref); 3446 3447 INIT_WORK(&ctrl->ctrl.reset_work, nvme_fc_reset_ctrl_work); 3448 INIT_DELAYED_WORK(&ctrl->connect_work, nvme_fc_connect_ctrl_work); 3449 INIT_WORK(&ctrl->err_work, nvme_fc_connect_err_work); 3450 spin_lock_init(&ctrl->lock); 3451 3452 /* io queue count */ 3453 ctrl->ctrl.queue_count = min_t(unsigned int, 3454 opts->nr_io_queues, 3455 lport->ops->max_hw_queues); 3456 ctrl->ctrl.queue_count++; /* +1 for admin queue */ 3457 3458 ctrl->ctrl.sqsize = opts->queue_size - 1; 3459 ctrl->ctrl.kato = opts->kato; 3460 ctrl->ctrl.cntlid = 0xffff; 3461 3462 ret = -ENOMEM; 3463 ctrl->queues = kcalloc(ctrl->ctrl.queue_count, 3464 sizeof(struct nvme_fc_queue), GFP_KERNEL); 3465 if (!ctrl->queues) 3466 goto out_free_ida; 3467 3468 nvme_fc_init_queue(ctrl, 0); 3469 3470 memset(&ctrl->admin_tag_set, 0, sizeof(ctrl->admin_tag_set)); 3471 ctrl->admin_tag_set.ops = &nvme_fc_admin_mq_ops; 3472 ctrl->admin_tag_set.queue_depth = NVME_AQ_MQ_TAG_DEPTH; 3473 ctrl->admin_tag_set.reserved_tags = 2; /* fabric connect + Keep-Alive */ 3474 ctrl->admin_tag_set.numa_node = ctrl->ctrl.numa_node; 3475 ctrl->admin_tag_set.cmd_size = 3476 struct_size((struct nvme_fcp_op_w_sgl *)NULL, priv, 3477 ctrl->lport->ops->fcprqst_priv_sz); 3478 ctrl->admin_tag_set.driver_data = ctrl; 3479 ctrl->admin_tag_set.nr_hw_queues = 1; 3480 ctrl->admin_tag_set.timeout = ADMIN_TIMEOUT; 3481 ctrl->admin_tag_set.flags = BLK_MQ_F_NO_SCHED; 3482 3483 ret = blk_mq_alloc_tag_set(&ctrl->admin_tag_set); 3484 if (ret) 3485 goto out_free_queues; 3486 ctrl->ctrl.admin_tagset = &ctrl->admin_tag_set; 3487 3488 ctrl->ctrl.fabrics_q = blk_mq_init_queue(&ctrl->admin_tag_set); 3489 if (IS_ERR(ctrl->ctrl.fabrics_q)) { 3490 ret = PTR_ERR(ctrl->ctrl.fabrics_q); 3491 goto out_free_admin_tag_set; 3492 } 3493 3494 ctrl->ctrl.admin_q = blk_mq_init_queue(&ctrl->admin_tag_set); 3495 if (IS_ERR(ctrl->ctrl.admin_q)) { 3496 ret = PTR_ERR(ctrl->ctrl.admin_q); 3497 goto out_cleanup_fabrics_q; 3498 } 3499 3500 /* 3501 * Would have been nice to init io queues tag set as well. 3502 * However, we require interaction from the controller 3503 * for max io queue count before we can do so. 3504 * Defer this to the connect path. 3505 */ 3506 3507 ret = nvme_init_ctrl(&ctrl->ctrl, dev, &nvme_fc_ctrl_ops, 0); 3508 if (ret) 3509 goto out_cleanup_admin_q; 3510 3511 /* at this point, teardown path changes to ref counting on nvme ctrl */ 3512 3513 spin_lock_irqsave(&rport->lock, flags); 3514 list_add_tail(&ctrl->ctrl_list, &rport->ctrl_list); 3515 spin_unlock_irqrestore(&rport->lock, flags); 3516 3517 if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_RESETTING) || 3518 !nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_CONNECTING)) { 3519 dev_err(ctrl->ctrl.device, 3520 "NVME-FC{%d}: failed to init ctrl state\n", ctrl->cnum); 3521 goto fail_ctrl; 3522 } 3523 3524 if (!queue_delayed_work(nvme_wq, &ctrl->connect_work, 0)) { 3525 dev_err(ctrl->ctrl.device, 3526 "NVME-FC{%d}: failed to schedule initial connect\n", 3527 ctrl->cnum); 3528 goto fail_ctrl; 3529 } 3530 3531 flush_delayed_work(&ctrl->connect_work); 3532 3533 dev_info(ctrl->ctrl.device, 3534 "NVME-FC{%d}: new ctrl: NQN \"%s\"\n", 3535 ctrl->cnum, ctrl->ctrl.opts->subsysnqn); 3536 3537 return &ctrl->ctrl; 3538 3539 fail_ctrl: 3540 nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_DELETING); 3541 cancel_work_sync(&ctrl->ctrl.reset_work); 3542 cancel_work_sync(&ctrl->err_work); 3543 cancel_delayed_work_sync(&ctrl->connect_work); 3544 3545 ctrl->ctrl.opts = NULL; 3546 3547 /* initiate nvme ctrl ref counting teardown */ 3548 nvme_uninit_ctrl(&ctrl->ctrl); 3549 3550 /* Remove core ctrl ref. */ 3551 nvme_put_ctrl(&ctrl->ctrl); 3552 3553 /* as we're past the point where we transition to the ref 3554 * counting teardown path, if we return a bad pointer here, 3555 * the calling routine, thinking it's prior to the 3556 * transition, will do an rport put. Since the teardown 3557 * path also does a rport put, we do an extra get here to 3558 * so proper order/teardown happens. 3559 */ 3560 nvme_fc_rport_get(rport); 3561 3562 return ERR_PTR(-EIO); 3563 3564 out_cleanup_admin_q: 3565 blk_cleanup_queue(ctrl->ctrl.admin_q); 3566 out_cleanup_fabrics_q: 3567 blk_cleanup_queue(ctrl->ctrl.fabrics_q); 3568 out_free_admin_tag_set: 3569 blk_mq_free_tag_set(&ctrl->admin_tag_set); 3570 out_free_queues: 3571 kfree(ctrl->queues); 3572 out_free_ida: 3573 put_device(ctrl->dev); 3574 ida_simple_remove(&nvme_fc_ctrl_cnt, ctrl->cnum); 3575 out_free_ctrl: 3576 kfree(ctrl); 3577 out_fail: 3578 /* exit via here doesn't follow ctlr ref points */ 3579 return ERR_PTR(ret); 3580 } 3581 3582 3583 struct nvmet_fc_traddr { 3584 u64 nn; 3585 u64 pn; 3586 }; 3587 3588 static int 3589 __nvme_fc_parse_u64(substring_t *sstr, u64 *val) 3590 { 3591 u64 token64; 3592 3593 if (match_u64(sstr, &token64)) 3594 return -EINVAL; 3595 *val = token64; 3596 3597 return 0; 3598 } 3599 3600 /* 3601 * This routine validates and extracts the WWN's from the TRADDR string. 3602 * As kernel parsers need the 0x to determine number base, universally 3603 * build string to parse with 0x prefix before parsing name strings. 3604 */ 3605 static int 3606 nvme_fc_parse_traddr(struct nvmet_fc_traddr *traddr, char *buf, size_t blen) 3607 { 3608 char name[2 + NVME_FC_TRADDR_HEXNAMELEN + 1]; 3609 substring_t wwn = { name, &name[sizeof(name)-1] }; 3610 int nnoffset, pnoffset; 3611 3612 /* validate if string is one of the 2 allowed formats */ 3613 if (strnlen(buf, blen) == NVME_FC_TRADDR_MAXLENGTH && 3614 !strncmp(buf, "nn-0x", NVME_FC_TRADDR_OXNNLEN) && 3615 !strncmp(&buf[NVME_FC_TRADDR_MAX_PN_OFFSET], 3616 "pn-0x", NVME_FC_TRADDR_OXNNLEN)) { 3617 nnoffset = NVME_FC_TRADDR_OXNNLEN; 3618 pnoffset = NVME_FC_TRADDR_MAX_PN_OFFSET + 3619 NVME_FC_TRADDR_OXNNLEN; 3620 } else if ((strnlen(buf, blen) == NVME_FC_TRADDR_MINLENGTH && 3621 !strncmp(buf, "nn-", NVME_FC_TRADDR_NNLEN) && 3622 !strncmp(&buf[NVME_FC_TRADDR_MIN_PN_OFFSET], 3623 "pn-", NVME_FC_TRADDR_NNLEN))) { 3624 nnoffset = NVME_FC_TRADDR_NNLEN; 3625 pnoffset = NVME_FC_TRADDR_MIN_PN_OFFSET + NVME_FC_TRADDR_NNLEN; 3626 } else 3627 goto out_einval; 3628 3629 name[0] = '0'; 3630 name[1] = 'x'; 3631 name[2 + NVME_FC_TRADDR_HEXNAMELEN] = 0; 3632 3633 memcpy(&name[2], &buf[nnoffset], NVME_FC_TRADDR_HEXNAMELEN); 3634 if (__nvme_fc_parse_u64(&wwn, &traddr->nn)) 3635 goto out_einval; 3636 3637 memcpy(&name[2], &buf[pnoffset], NVME_FC_TRADDR_HEXNAMELEN); 3638 if (__nvme_fc_parse_u64(&wwn, &traddr->pn)) 3639 goto out_einval; 3640 3641 return 0; 3642 3643 out_einval: 3644 pr_warn("%s: bad traddr string\n", __func__); 3645 return -EINVAL; 3646 } 3647 3648 static struct nvme_ctrl * 3649 nvme_fc_create_ctrl(struct device *dev, struct nvmf_ctrl_options *opts) 3650 { 3651 struct nvme_fc_lport *lport; 3652 struct nvme_fc_rport *rport; 3653 struct nvme_ctrl *ctrl; 3654 struct nvmet_fc_traddr laddr = { 0L, 0L }; 3655 struct nvmet_fc_traddr raddr = { 0L, 0L }; 3656 unsigned long flags; 3657 int ret; 3658 3659 ret = nvme_fc_parse_traddr(&raddr, opts->traddr, NVMF_TRADDR_SIZE); 3660 if (ret || !raddr.nn || !raddr.pn) 3661 return ERR_PTR(-EINVAL); 3662 3663 ret = nvme_fc_parse_traddr(&laddr, opts->host_traddr, NVMF_TRADDR_SIZE); 3664 if (ret || !laddr.nn || !laddr.pn) 3665 return ERR_PTR(-EINVAL); 3666 3667 /* find the host and remote ports to connect together */ 3668 spin_lock_irqsave(&nvme_fc_lock, flags); 3669 list_for_each_entry(lport, &nvme_fc_lport_list, port_list) { 3670 if (lport->localport.node_name != laddr.nn || 3671 lport->localport.port_name != laddr.pn) 3672 continue; 3673 3674 list_for_each_entry(rport, &lport->endp_list, endp_list) { 3675 if (rport->remoteport.node_name != raddr.nn || 3676 rport->remoteport.port_name != raddr.pn) 3677 continue; 3678 3679 /* if fail to get reference fall through. Will error */ 3680 if (!nvme_fc_rport_get(rport)) 3681 break; 3682 3683 spin_unlock_irqrestore(&nvme_fc_lock, flags); 3684 3685 ctrl = nvme_fc_init_ctrl(dev, opts, lport, rport); 3686 if (IS_ERR(ctrl)) 3687 nvme_fc_rport_put(rport); 3688 return ctrl; 3689 } 3690 } 3691 spin_unlock_irqrestore(&nvme_fc_lock, flags); 3692 3693 pr_warn("%s: %s - %s combination not found\n", 3694 __func__, opts->traddr, opts->host_traddr); 3695 return ERR_PTR(-ENOENT); 3696 } 3697 3698 3699 static struct nvmf_transport_ops nvme_fc_transport = { 3700 .name = "fc", 3701 .module = THIS_MODULE, 3702 .required_opts = NVMF_OPT_TRADDR | NVMF_OPT_HOST_TRADDR, 3703 .allowed_opts = NVMF_OPT_RECONNECT_DELAY | NVMF_OPT_CTRL_LOSS_TMO, 3704 .create_ctrl = nvme_fc_create_ctrl, 3705 }; 3706 3707 /* Arbitrary successive failures max. With lots of subsystems could be high */ 3708 #define DISCOVERY_MAX_FAIL 20 3709 3710 static ssize_t nvme_fc_nvme_discovery_store(struct device *dev, 3711 struct device_attribute *attr, const char *buf, size_t count) 3712 { 3713 unsigned long flags; 3714 LIST_HEAD(local_disc_list); 3715 struct nvme_fc_lport *lport; 3716 struct nvme_fc_rport *rport; 3717 int failcnt = 0; 3718 3719 spin_lock_irqsave(&nvme_fc_lock, flags); 3720 restart: 3721 list_for_each_entry(lport, &nvme_fc_lport_list, port_list) { 3722 list_for_each_entry(rport, &lport->endp_list, endp_list) { 3723 if (!nvme_fc_lport_get(lport)) 3724 continue; 3725 if (!nvme_fc_rport_get(rport)) { 3726 /* 3727 * This is a temporary condition. Upon restart 3728 * this rport will be gone from the list. 3729 * 3730 * Revert the lport put and retry. Anything 3731 * added to the list already will be skipped (as 3732 * they are no longer list_empty). Loops should 3733 * resume at rports that were not yet seen. 3734 */ 3735 nvme_fc_lport_put(lport); 3736 3737 if (failcnt++ < DISCOVERY_MAX_FAIL) 3738 goto restart; 3739 3740 pr_err("nvme_discovery: too many reference " 3741 "failures\n"); 3742 goto process_local_list; 3743 } 3744 if (list_empty(&rport->disc_list)) 3745 list_add_tail(&rport->disc_list, 3746 &local_disc_list); 3747 } 3748 } 3749 3750 process_local_list: 3751 while (!list_empty(&local_disc_list)) { 3752 rport = list_first_entry(&local_disc_list, 3753 struct nvme_fc_rport, disc_list); 3754 list_del_init(&rport->disc_list); 3755 spin_unlock_irqrestore(&nvme_fc_lock, flags); 3756 3757 lport = rport->lport; 3758 /* signal discovery. Won't hurt if it repeats */ 3759 nvme_fc_signal_discovery_scan(lport, rport); 3760 nvme_fc_rport_put(rport); 3761 nvme_fc_lport_put(lport); 3762 3763 spin_lock_irqsave(&nvme_fc_lock, flags); 3764 } 3765 spin_unlock_irqrestore(&nvme_fc_lock, flags); 3766 3767 return count; 3768 } 3769 static DEVICE_ATTR(nvme_discovery, 0200, NULL, nvme_fc_nvme_discovery_store); 3770 3771 static struct attribute *nvme_fc_attrs[] = { 3772 &dev_attr_nvme_discovery.attr, 3773 NULL 3774 }; 3775 3776 static struct attribute_group nvme_fc_attr_group = { 3777 .attrs = nvme_fc_attrs, 3778 }; 3779 3780 static const struct attribute_group *nvme_fc_attr_groups[] = { 3781 &nvme_fc_attr_group, 3782 NULL 3783 }; 3784 3785 static struct class fc_class = { 3786 .name = "fc", 3787 .dev_groups = nvme_fc_attr_groups, 3788 .owner = THIS_MODULE, 3789 }; 3790 3791 static int __init nvme_fc_init_module(void) 3792 { 3793 int ret; 3794 3795 nvme_fc_wq = alloc_workqueue("nvme_fc_wq", WQ_MEM_RECLAIM, 0); 3796 if (!nvme_fc_wq) 3797 return -ENOMEM; 3798 3799 /* 3800 * NOTE: 3801 * It is expected that in the future the kernel will combine 3802 * the FC-isms that are currently under scsi and now being 3803 * added to by NVME into a new standalone FC class. The SCSI 3804 * and NVME protocols and their devices would be under this 3805 * new FC class. 3806 * 3807 * As we need something to post FC-specific udev events to, 3808 * specifically for nvme probe events, start by creating the 3809 * new device class. When the new standalone FC class is 3810 * put in place, this code will move to a more generic 3811 * location for the class. 3812 */ 3813 ret = class_register(&fc_class); 3814 if (ret) { 3815 pr_err("couldn't register class fc\n"); 3816 goto out_destroy_wq; 3817 } 3818 3819 /* 3820 * Create a device for the FC-centric udev events 3821 */ 3822 fc_udev_device = device_create(&fc_class, NULL, MKDEV(0, 0), NULL, 3823 "fc_udev_device"); 3824 if (IS_ERR(fc_udev_device)) { 3825 pr_err("couldn't create fc_udev device!\n"); 3826 ret = PTR_ERR(fc_udev_device); 3827 goto out_destroy_class; 3828 } 3829 3830 ret = nvmf_register_transport(&nvme_fc_transport); 3831 if (ret) 3832 goto out_destroy_device; 3833 3834 return 0; 3835 3836 out_destroy_device: 3837 device_destroy(&fc_class, MKDEV(0, 0)); 3838 out_destroy_class: 3839 class_unregister(&fc_class); 3840 out_destroy_wq: 3841 destroy_workqueue(nvme_fc_wq); 3842 3843 return ret; 3844 } 3845 3846 static void 3847 nvme_fc_delete_controllers(struct nvme_fc_rport *rport) 3848 { 3849 struct nvme_fc_ctrl *ctrl; 3850 3851 spin_lock(&rport->lock); 3852 list_for_each_entry(ctrl, &rport->ctrl_list, ctrl_list) { 3853 dev_warn(ctrl->ctrl.device, 3854 "NVME-FC{%d}: transport unloading: deleting ctrl\n", 3855 ctrl->cnum); 3856 nvme_delete_ctrl(&ctrl->ctrl); 3857 } 3858 spin_unlock(&rport->lock); 3859 } 3860 3861 static void 3862 nvme_fc_cleanup_for_unload(void) 3863 { 3864 struct nvme_fc_lport *lport; 3865 struct nvme_fc_rport *rport; 3866 3867 list_for_each_entry(lport, &nvme_fc_lport_list, port_list) { 3868 list_for_each_entry(rport, &lport->endp_list, endp_list) { 3869 nvme_fc_delete_controllers(rport); 3870 } 3871 } 3872 } 3873 3874 static void __exit nvme_fc_exit_module(void) 3875 { 3876 unsigned long flags; 3877 bool need_cleanup = false; 3878 3879 spin_lock_irqsave(&nvme_fc_lock, flags); 3880 nvme_fc_waiting_to_unload = true; 3881 if (!list_empty(&nvme_fc_lport_list)) { 3882 need_cleanup = true; 3883 nvme_fc_cleanup_for_unload(); 3884 } 3885 spin_unlock_irqrestore(&nvme_fc_lock, flags); 3886 if (need_cleanup) { 3887 pr_info("%s: waiting for ctlr deletes\n", __func__); 3888 wait_for_completion(&nvme_fc_unload_proceed); 3889 pr_info("%s: ctrl deletes complete\n", __func__); 3890 } 3891 3892 nvmf_unregister_transport(&nvme_fc_transport); 3893 3894 ida_destroy(&nvme_fc_local_port_cnt); 3895 ida_destroy(&nvme_fc_ctrl_cnt); 3896 3897 device_destroy(&fc_class, MKDEV(0, 0)); 3898 class_unregister(&fc_class); 3899 destroy_workqueue(nvme_fc_wq); 3900 } 3901 3902 module_init(nvme_fc_init_module); 3903 module_exit(nvme_fc_exit_module); 3904 3905 MODULE_LICENSE("GPL v2"); 3906