1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. 23 * Copyright (c) 2015 by Delphix. All rights reserved. 24 * Copyright 2024 Oxide Computer Company 25 */ 26 27 #include <sys/types.h> 28 #include <sys/stream.h> 29 #include <sys/strsun.h> 30 #include <sys/strsubr.h> 31 #include <sys/debug.h> 32 #include <sys/sdt.h> 33 #include <sys/cmn_err.h> 34 #include <sys/tihdr.h> 35 36 #include <inet/common.h> 37 #include <inet/optcom.h> 38 #include <inet/ip.h> 39 #include <inet/ip_if.h> 40 #include <inet/ip_impl.h> 41 #include <inet/tcp.h> 42 #include <inet/tcp_impl.h> 43 #include <inet/ipsec_impl.h> 44 #include <inet/ipclassifier.h> 45 #include <inet/ipp_common.h> 46 #include <inet/ip_if.h> 47 48 /* 49 * This file implements TCP fusion - a protocol-less data path for TCP 50 * loopback connections. The fusion of two local TCP endpoints occurs 51 * at connection establishment time. Various conditions (see details 52 * in tcp_fuse()) need to be met for fusion to be successful. If it 53 * fails, we fall back to the regular TCP data path; if it succeeds, 54 * both endpoints proceed to use tcp_fuse_output() as the transmit path. 55 * tcp_fuse_output() enqueues application data directly onto the peer's 56 * receive queue; no protocol processing is involved. 57 * 58 * Sychronization is handled by squeue and the mutex tcp_non_sq_lock. 59 * One of the requirements for fusion to succeed is that both endpoints 60 * need to be using the same squeue. This ensures that neither side 61 * can disappear while the other side is still sending data. Flow 62 * control information is manipulated outside the squeue, so the 63 * tcp_non_sq_lock must be held when touching tcp_flow_stopped. 64 */ 65 66 /* 67 * Setting this to false means we disable fusion altogether and 68 * loopback connections would go through the protocol paths. 69 */ 70 boolean_t do_tcp_fusion = B_TRUE; 71 72 /* 73 * This routine gets called by the eager tcp upon changing state from 74 * SYN_RCVD to ESTABLISHED. It fuses a direct path between itself 75 * and the active connect tcp such that the regular tcp processings 76 * may be bypassed under allowable circumstances. Because the fusion 77 * requires both endpoints to be in the same squeue, it does not work 78 * for simultaneous active connects because there is no easy way to 79 * switch from one squeue to another once the connection is created. 80 * This is different from the eager tcp case where we assign it the 81 * same squeue as the one given to the active connect tcp during open. 82 */ 83 void 84 tcp_fuse(tcp_t *tcp, uchar_t *iphdr, tcpha_t *tcpha) 85 { 86 conn_t *peer_connp, *connp = tcp->tcp_connp; 87 tcp_t *peer_tcp; 88 tcp_stack_t *tcps = tcp->tcp_tcps; 89 netstack_t *ns; 90 ip_stack_t *ipst = tcps->tcps_netstack->netstack_ip; 91 92 ASSERT(!tcp->tcp_fused); 93 ASSERT(tcp->tcp_loopback); 94 ASSERT(tcp->tcp_loopback_peer == NULL); 95 /* 96 * We need to inherit conn_rcvbuf of the listener tcp, 97 * but we can't really use tcp_listener since we get here after 98 * sending up T_CONN_IND and tcp_tli_accept() may be called 99 * independently, at which point tcp_listener is cleared; 100 * this is why we use tcp_saved_listener. The listener itself 101 * is guaranteed to be around until tcp_accept_finish() is called 102 * on this eager -- this won't happen until we're done since we're 103 * inside the eager's perimeter now. 104 */ 105 ASSERT(tcp->tcp_saved_listener != NULL); 106 /* 107 * Lookup peer endpoint; search for the remote endpoint having 108 * the reversed address-port quadruplet in ESTABLISHED state, 109 * which is guaranteed to be unique in the system. Zone check 110 * is applied accordingly for loopback address, but not for 111 * local address since we want fusion to happen across Zones. 112 */ 113 if (connp->conn_ipversion == IPV4_VERSION) { 114 peer_connp = ipcl_conn_tcp_lookup_reversed_ipv4(connp, 115 (ipha_t *)iphdr, tcpha, ipst); 116 } else { 117 peer_connp = ipcl_conn_tcp_lookup_reversed_ipv6(connp, 118 (ip6_t *)iphdr, tcpha, ipst); 119 } 120 121 /* 122 * We can only proceed if peer exists, resides in the same squeue 123 * as our conn and is not raw-socket. We also restrict fusion to 124 * endpoints of the same type (STREAMS or non-STREAMS). The squeue 125 * assignment of this eager tcp was done earlier at the time of SYN 126 * processing in ip_fanout_tcp{_v6}. Note that similar squeues by 127 * itself doesn't guarantee a safe condition to fuse, hence we perform 128 * additional tests below. 129 */ 130 ASSERT(peer_connp == NULL || peer_connp != connp); 131 if (peer_connp == NULL || peer_connp->conn_sqp != connp->conn_sqp || 132 !IPCL_IS_TCP(peer_connp) || 133 IPCL_IS_NONSTR(connp) != IPCL_IS_NONSTR(peer_connp)) { 134 if (peer_connp != NULL) { 135 TCP_STAT(tcps, tcp_fusion_unqualified); 136 CONN_DEC_REF(peer_connp); 137 } 138 return; 139 } 140 peer_tcp = peer_connp->conn_tcp; /* active connect tcp */ 141 142 ASSERT(peer_tcp != NULL && peer_tcp != tcp && !peer_tcp->tcp_fused); 143 ASSERT(peer_tcp->tcp_loopback_peer == NULL); 144 ASSERT(peer_connp->conn_sqp == connp->conn_sqp); 145 146 /* 147 * Due to IRE changes the peer and us might not agree on tcp_loopback. 148 * We bail in that case. 149 */ 150 if (!peer_tcp->tcp_loopback) { 151 TCP_STAT(tcps, tcp_fusion_unqualified); 152 CONN_DEC_REF(peer_connp); 153 return; 154 } 155 /* 156 * Fuse the endpoints; we perform further checks against both 157 * tcp endpoints to ensure that a fusion is allowed to happen. 158 */ 159 ns = tcps->tcps_netstack; 160 ipst = ns->netstack_ip; 161 162 if (!tcp->tcp_unfusable && !peer_tcp->tcp_unfusable && 163 tcp->tcp_xmit_head == NULL && peer_tcp->tcp_xmit_head == NULL) { 164 mblk_t *mp = NULL; 165 queue_t *peer_rq = peer_connp->conn_rq; 166 167 ASSERT(!TCP_IS_DETACHED(peer_tcp)); 168 ASSERT(tcp->tcp_fused_sigurg_mp == NULL); 169 ASSERT(peer_tcp->tcp_fused_sigurg_mp == NULL); 170 171 /* 172 * We need to drain data on both endpoints during unfuse. 173 * If we need to send up SIGURG at the time of draining, 174 * we want to be sure that an mblk is readily available. 175 * This is why we pre-allocate the M_PCSIG mblks for both 176 * endpoints which will only be used during/after unfuse. 177 * The mblk might already exist if we are doing a re-fuse. 178 */ 179 if (!IPCL_IS_NONSTR(tcp->tcp_connp)) { 180 ASSERT(!IPCL_IS_NONSTR(peer_tcp->tcp_connp)); 181 182 if (tcp->tcp_fused_sigurg_mp == NULL) { 183 if ((mp = allocb(1, BPRI_HI)) == NULL) 184 goto failed; 185 tcp->tcp_fused_sigurg_mp = mp; 186 } 187 188 if (peer_tcp->tcp_fused_sigurg_mp == NULL) { 189 if ((mp = allocb(1, BPRI_HI)) == NULL) 190 goto failed; 191 peer_tcp->tcp_fused_sigurg_mp = mp; 192 } 193 194 if ((mp = allocb(sizeof (struct stroptions), 195 BPRI_HI)) == NULL) 196 goto failed; 197 } 198 199 /* Fuse both endpoints */ 200 peer_tcp->tcp_loopback_peer = tcp; 201 tcp->tcp_loopback_peer = peer_tcp; 202 peer_tcp->tcp_fused = tcp->tcp_fused = B_TRUE; 203 204 /* 205 * We never use regular tcp paths in fusion and should 206 * therefore clear tcp_unsent on both endpoints. Having 207 * them set to non-zero values means asking for trouble 208 * especially after unfuse, where we may end up sending 209 * through regular tcp paths which expect xmit_list and 210 * friends to be correctly setup. 211 */ 212 peer_tcp->tcp_unsent = tcp->tcp_unsent = 0; 213 214 tcp_timers_stop(tcp); 215 tcp_timers_stop(peer_tcp); 216 217 /* 218 * Set receive buffer and max packet size for the 219 * active open tcp. 220 * eager's values will be set in tcp_accept_finish. 221 */ 222 (void) tcp_rwnd_set(peer_tcp, peer_tcp->tcp_connp->conn_rcvbuf); 223 224 /* 225 * Set the write offset value to zero since we won't 226 * be needing any room for TCP/IP headers. 227 */ 228 if (!IPCL_IS_NONSTR(peer_tcp->tcp_connp)) { 229 struct stroptions *stropt; 230 231 DB_TYPE(mp) = M_SETOPTS; 232 mp->b_wptr += sizeof (*stropt); 233 234 stropt = (struct stroptions *)mp->b_rptr; 235 stropt->so_flags = SO_WROFF | SO_MAXBLK; 236 stropt->so_wroff = 0; 237 stropt->so_maxblk = INFPSZ; 238 239 /* Send the options up */ 240 putnext(peer_rq, mp); 241 } else { 242 struct sock_proto_props sopp; 243 244 /* The peer is a non-STREAMS end point */ 245 ASSERT(IPCL_IS_TCP(peer_connp)); 246 247 sopp.sopp_flags = SOCKOPT_WROFF | SOCKOPT_MAXBLK; 248 sopp.sopp_wroff = 0; 249 sopp.sopp_maxblk = INFPSZ; 250 (*peer_connp->conn_upcalls->su_set_proto_props) 251 (peer_connp->conn_upper_handle, &sopp); 252 } 253 } else { 254 TCP_STAT(tcps, tcp_fusion_unqualified); 255 } 256 CONN_DEC_REF(peer_connp); 257 return; 258 259 failed: 260 if (tcp->tcp_fused_sigurg_mp != NULL) { 261 freeb(tcp->tcp_fused_sigurg_mp); 262 tcp->tcp_fused_sigurg_mp = NULL; 263 } 264 if (peer_tcp->tcp_fused_sigurg_mp != NULL) { 265 freeb(peer_tcp->tcp_fused_sigurg_mp); 266 peer_tcp->tcp_fused_sigurg_mp = NULL; 267 } 268 CONN_DEC_REF(peer_connp); 269 } 270 271 /* 272 * Unfuse a previously-fused pair of tcp loopback endpoints. 273 */ 274 void 275 tcp_unfuse(tcp_t *tcp) 276 { 277 tcp_t *peer_tcp = tcp->tcp_loopback_peer; 278 tcp_stack_t *tcps = tcp->tcp_tcps; 279 280 ASSERT(tcp->tcp_fused && peer_tcp != NULL); 281 ASSERT(peer_tcp->tcp_fused && peer_tcp->tcp_loopback_peer == tcp); 282 ASSERT(tcp->tcp_connp->conn_sqp == peer_tcp->tcp_connp->conn_sqp); 283 ASSERT(tcp->tcp_unsent == 0 && peer_tcp->tcp_unsent == 0); 284 285 /* 286 * Cancel any pending push timers. 287 */ 288 if (tcp->tcp_push_tid != 0) { 289 (void) TCP_TIMER_CANCEL(tcp, tcp->tcp_push_tid); 290 tcp->tcp_push_tid = 0; 291 } 292 if (peer_tcp->tcp_push_tid != 0) { 293 (void) TCP_TIMER_CANCEL(peer_tcp, peer_tcp->tcp_push_tid); 294 peer_tcp->tcp_push_tid = 0; 295 } 296 297 /* 298 * Drain any pending data; Note that in case of a detached tcp, the 299 * draining will happen later after the tcp is unfused. For non- 300 * urgent data, this can be handled by the regular tcp_rcv_drain(). 301 * If we have urgent data sitting in the receive list, we will 302 * need to send up a SIGURG signal first before draining the data. 303 * All of these will be handled by the code in tcp_fuse_rcv_drain() 304 * when called from tcp_rcv_drain(). 305 */ 306 if (!TCP_IS_DETACHED(tcp)) { 307 (void) tcp_fuse_rcv_drain(tcp->tcp_connp->conn_rq, tcp, 308 &tcp->tcp_fused_sigurg_mp); 309 } 310 if (!TCP_IS_DETACHED(peer_tcp)) { 311 (void) tcp_fuse_rcv_drain(peer_tcp->tcp_connp->conn_rq, 312 peer_tcp, &peer_tcp->tcp_fused_sigurg_mp); 313 } 314 315 /* Lift up any flow-control conditions */ 316 mutex_enter(&tcp->tcp_non_sq_lock); 317 if (tcp->tcp_flow_stopped) { 318 tcp_clrqfull(tcp); 319 TCP_STAT(tcps, tcp_fusion_backenabled); 320 } 321 mutex_exit(&tcp->tcp_non_sq_lock); 322 323 mutex_enter(&peer_tcp->tcp_non_sq_lock); 324 if (peer_tcp->tcp_flow_stopped) { 325 tcp_clrqfull(peer_tcp); 326 TCP_STAT(tcps, tcp_fusion_backenabled); 327 } 328 mutex_exit(&peer_tcp->tcp_non_sq_lock); 329 330 /* 331 * Update tha_seq and tha_ack in the header template 332 */ 333 tcp->tcp_tcpha->tha_seq = htonl(tcp->tcp_snxt); 334 tcp->tcp_tcpha->tha_ack = htonl(tcp->tcp_rnxt); 335 peer_tcp->tcp_tcpha->tha_seq = htonl(peer_tcp->tcp_snxt); 336 peer_tcp->tcp_tcpha->tha_ack = htonl(peer_tcp->tcp_rnxt); 337 338 /* Unfuse the endpoints */ 339 peer_tcp->tcp_fused = tcp->tcp_fused = B_FALSE; 340 peer_tcp->tcp_loopback_peer = tcp->tcp_loopback_peer = NULL; 341 } 342 343 /* 344 * Fusion output routine used to handle urgent data sent by STREAMS based 345 * endpoints. This routine is called by tcp_fuse_output() for handling 346 * non-M_DATA mblks. 347 */ 348 void 349 tcp_fuse_output_urg(tcp_t *tcp, mblk_t *mp) 350 { 351 mblk_t *mp1; 352 struct T_exdata_ind *tei; 353 tcp_t *peer_tcp = tcp->tcp_loopback_peer; 354 mblk_t *head, *prev_head = NULL; 355 tcp_stack_t *tcps = tcp->tcp_tcps; 356 357 ASSERT(tcp->tcp_fused); 358 ASSERT(peer_tcp != NULL && peer_tcp->tcp_loopback_peer == tcp); 359 ASSERT(!IPCL_IS_NONSTR(tcp->tcp_connp)); 360 ASSERT(DB_TYPE(mp) == M_PROTO || DB_TYPE(mp) == M_PCPROTO); 361 ASSERT(mp->b_cont != NULL && DB_TYPE(mp->b_cont) == M_DATA); 362 ASSERT(MBLKL(mp) >= sizeof (*tei) && MBLKL(mp->b_cont) > 0); 363 364 /* 365 * Urgent data arrives in the form of T_EXDATA_REQ from above. 366 * Each occurence denotes a new urgent pointer. For each new 367 * urgent pointer we signal (SIGURG) the receiving app to indicate 368 * that it needs to go into urgent mode. This is similar to the 369 * urgent data handling in the regular tcp. We don't need to keep 370 * track of where the urgent pointer is, because each T_EXDATA_REQ 371 * "advances" the urgent pointer for us. 372 * 373 * The actual urgent data carried by T_EXDATA_REQ is then prepended 374 * by a T_EXDATA_IND before being enqueued behind any existing data 375 * destined for the receiving app. There is only a single urgent 376 * pointer (out-of-band mark) for a given tcp. If the new urgent 377 * data arrives before the receiving app reads some existing urgent 378 * data, the previous marker is lost. This behavior is emulated 379 * accordingly below, by removing any existing T_EXDATA_IND messages 380 * and essentially converting old urgent data into non-urgent. 381 */ 382 ASSERT(tcp->tcp_valid_bits & TCP_URG_VALID); 383 /* Let sender get out of urgent mode */ 384 tcp->tcp_valid_bits &= ~TCP_URG_VALID; 385 386 /* 387 * This flag indicates that a signal needs to be sent up. 388 * This flag will only get cleared once SIGURG is delivered and 389 * is not affected by the tcp_fused flag -- delivery will still 390 * happen even after an endpoint is unfused, to handle the case 391 * where the sending endpoint immediately closes/unfuses after 392 * sending urgent data and the accept is not yet finished. 393 */ 394 peer_tcp->tcp_fused_sigurg = B_TRUE; 395 396 /* Reuse T_EXDATA_REQ mblk for T_EXDATA_IND */ 397 DB_TYPE(mp) = M_PROTO; 398 tei = (struct T_exdata_ind *)mp->b_rptr; 399 tei->PRIM_type = T_EXDATA_IND; 400 tei->MORE_flag = 0; 401 mp->b_wptr = (uchar_t *)&tei[1]; 402 403 TCP_STAT(tcps, tcp_fusion_urg); 404 TCPS_BUMP_MIB(tcps, tcpOutUrg); 405 406 head = peer_tcp->tcp_rcv_list; 407 while (head != NULL) { 408 /* 409 * Remove existing T_EXDATA_IND, keep the data which follows 410 * it and relink our list. Note that we don't modify the 411 * tcp_rcv_last_tail since it never points to T_EXDATA_IND. 412 */ 413 if (DB_TYPE(head) != M_DATA) { 414 mp1 = head; 415 416 ASSERT(DB_TYPE(mp1->b_cont) == M_DATA); 417 head = mp1->b_cont; 418 mp1->b_cont = NULL; 419 head->b_next = mp1->b_next; 420 mp1->b_next = NULL; 421 if (prev_head != NULL) 422 prev_head->b_next = head; 423 if (peer_tcp->tcp_rcv_list == mp1) 424 peer_tcp->tcp_rcv_list = head; 425 if (peer_tcp->tcp_rcv_last_head == mp1) 426 peer_tcp->tcp_rcv_last_head = head; 427 freeb(mp1); 428 } 429 prev_head = head; 430 head = head->b_next; 431 } 432 } 433 434 /* 435 * Fusion output routine, called by tcp_output() and tcp_wput_proto(). 436 * If we are modifying any member that can be changed outside the squeue, 437 * like tcp_flow_stopped, we need to take tcp_non_sq_lock. 438 */ 439 boolean_t 440 tcp_fuse_output(tcp_t *tcp, mblk_t *mp, uint32_t send_size) 441 { 442 conn_t *connp = tcp->tcp_connp; 443 tcp_t *peer_tcp = tcp->tcp_loopback_peer; 444 conn_t *peer_connp = peer_tcp->tcp_connp; 445 boolean_t flow_stopped, peer_data_queued = B_FALSE; 446 boolean_t urgent = (DB_TYPE(mp) != M_DATA); 447 boolean_t push = B_TRUE; 448 mblk_t *mp1 = mp; 449 uint_t ip_hdr_len; 450 uint32_t recv_size = send_size; 451 tcp_stack_t *tcps = tcp->tcp_tcps; 452 netstack_t *ns = tcps->tcps_netstack; 453 ip_stack_t *ipst = ns->netstack_ip; 454 ipsec_stack_t *ipss = ns->netstack_ipsec; 455 iaflags_t ixaflags = connp->conn_ixa->ixa_flags; 456 boolean_t do_ipsec, hooks_out, hooks_in, ipobs_enabled; 457 458 ASSERT(tcp->tcp_fused); 459 ASSERT(peer_tcp != NULL && peer_tcp->tcp_loopback_peer == tcp); 460 ASSERT(connp->conn_sqp == peer_connp->conn_sqp); 461 ASSERT(DB_TYPE(mp) == M_DATA || DB_TYPE(mp) == M_PROTO || 462 DB_TYPE(mp) == M_PCPROTO); 463 464 if (send_size == 0) { 465 freemsg(mp); 466 return (B_TRUE); 467 } 468 469 /* 470 * Check enforcement of the minimum TTL policy differences in the 471 * connection as this can change even after fusion. If we detect a 472 * mismatch, unfuse and allow normal stack processing to handle this. 473 */ 474 if (peer_connp->conn_min_ttl != 0 && peer_connp->conn_min_ttl > 475 connp->conn_xmit_ipp.ipp_unicast_hops) { 476 goto unfuse; 477 } 478 479 /* 480 * Handle urgent data; we either send up SIGURG to the peer now 481 * or do it later when we drain, in case the peer is detached 482 * or if we're short of memory for M_PCSIG mblk. 483 */ 484 if (urgent) { 485 tcp_fuse_output_urg(tcp, mp); 486 487 mp1 = mp->b_cont; 488 } 489 490 /* 491 * Check that we are still using an IRE_LOCAL or IRE_LOOPBACK before 492 * further processes. 493 */ 494 if (!ip_output_verify_local(connp->conn_ixa)) 495 goto unfuse; 496 497 /* 498 * Build IP and TCP header in case we have something that needs the 499 * headers. Those cases are: 500 * 1. IPsec 501 * 2. IPobs 502 * 3. FW_HOOKS 503 * 504 * If tcp_xmit_mp() fails to dupb() the message, unfuse the connection 505 * and back to regular path. 506 */ 507 if (ixaflags & IXAF_IS_IPV4) { 508 do_ipsec = (ixaflags & IXAF_IPSEC_SECURE) || 509 CONN_INBOUND_POLICY_PRESENT(peer_connp, ipss); 510 511 hooks_out = HOOKS4_INTERESTED_LOOPBACK_OUT(ipst); 512 hooks_in = HOOKS4_INTERESTED_LOOPBACK_IN(ipst); 513 ipobs_enabled = (ipst->ips_ip4_observe.he_interested != 0); 514 } else { 515 do_ipsec = (ixaflags & IXAF_IPSEC_SECURE) || 516 CONN_INBOUND_POLICY_PRESENT_V6(peer_connp, ipss); 517 518 hooks_out = HOOKS6_INTERESTED_LOOPBACK_OUT(ipst); 519 hooks_in = HOOKS6_INTERESTED_LOOPBACK_IN(ipst); 520 ipobs_enabled = (ipst->ips_ip6_observe.he_interested != 0); 521 } 522 523 /* We do logical 'or' for efficiency */ 524 if (ipobs_enabled | do_ipsec | hooks_in | hooks_out) { 525 if ((mp1 = tcp_xmit_mp(tcp, mp1, tcp->tcp_mss, NULL, NULL, 526 tcp->tcp_snxt, B_TRUE, NULL, B_FALSE)) == NULL) 527 /* If tcp_xmit_mp fails, use regular path */ 528 goto unfuse; 529 530 /* 531 * Leave all IP relevant processes to ip_output_process_local(), 532 * which handles IPsec, IPobs, and FW_HOOKS. 533 */ 534 mp1 = ip_output_process_local(mp1, connp->conn_ixa, hooks_out, 535 hooks_in, do_ipsec ? peer_connp : NULL); 536 537 /* If the message is dropped for any reason. */ 538 if (mp1 == NULL) 539 goto unfuse; 540 541 /* 542 * Data length might have been changed by FW_HOOKS. 543 * We assume that the first mblk contains the TCP/IP headers. 544 */ 545 if (hooks_in || hooks_out) { 546 tcpha_t *tcpha; 547 548 ip_hdr_len = (ixaflags & IXAF_IS_IPV4) ? 549 IPH_HDR_LENGTH((ipha_t *)mp1->b_rptr) : 550 ip_hdr_length_v6(mp1, (ip6_t *)mp1->b_rptr); 551 552 tcpha = (tcpha_t *)&mp1->b_rptr[ip_hdr_len]; 553 ASSERT((uchar_t *)tcpha + sizeof (tcpha_t) <= 554 mp1->b_wptr); 555 recv_size += htonl(tcpha->tha_seq) - tcp->tcp_snxt; 556 557 } 558 559 /* 560 * The message duplicated by tcp_xmit_mp is freed. 561 * Note: the original message passed in remains unchanged. 562 */ 563 freemsg(mp1); 564 } 565 566 /* 567 * Enqueue data into the peer's receive list; we may or may not 568 * drain the contents depending on the conditions below. 569 * 570 * For non-STREAMS sockets we normally queue data directly in the 571 * socket by calling the su_recv upcall. However, if the peer is 572 * detached we use tcp_rcv_enqueue() instead. Queued data will be 573 * drained when the accept completes (in tcp_accept_finish()). 574 */ 575 if (IPCL_IS_NONSTR(peer_connp) && 576 !TCP_IS_DETACHED(peer_tcp)) { 577 int error; 578 int flags = 0; 579 580 if ((tcp->tcp_valid_bits & TCP_URG_VALID) && 581 (tcp->tcp_urg == tcp->tcp_snxt)) { 582 flags = MSG_OOB; 583 (*peer_connp->conn_upcalls->su_signal_oob) 584 (peer_connp->conn_upper_handle, 0); 585 tcp->tcp_valid_bits &= ~TCP_URG_VALID; 586 } 587 if ((*peer_connp->conn_upcalls->su_recv)( 588 peer_connp->conn_upper_handle, mp, recv_size, 589 flags, &error, &push) < 0) { 590 ASSERT(error != EOPNOTSUPP); 591 peer_data_queued = B_TRUE; 592 } 593 } else { 594 if (IPCL_IS_NONSTR(peer_connp) && 595 (tcp->tcp_valid_bits & TCP_URG_VALID) && 596 (tcp->tcp_urg == tcp->tcp_snxt)) { 597 /* 598 * Can not deal with urgent pointers 599 * that arrive before the connection has been 600 * accept()ed. 601 */ 602 tcp->tcp_valid_bits &= ~TCP_URG_VALID; 603 freemsg(mp); 604 return (B_TRUE); 605 } 606 607 tcp_rcv_enqueue(peer_tcp, mp, recv_size, 608 tcp->tcp_connp->conn_cred); 609 610 /* In case it wrapped around and also to keep it constant */ 611 peer_tcp->tcp_rwnd += recv_size; 612 } 613 614 /* 615 * Exercise flow-control when needed; we will get back-enabled 616 * in either tcp_accept_finish(), tcp_unfuse(), or when data is 617 * consumed. If peer endpoint is detached, we emulate streams flow 618 * control by checking the peer's queue size and high water mark; 619 * otherwise we simply use canputnext() to decide if we need to stop 620 * our flow. 621 * 622 * Since we are accessing our tcp_flow_stopped and might modify it, 623 * we need to take tcp->tcp_non_sq_lock. 624 */ 625 mutex_enter(&tcp->tcp_non_sq_lock); 626 flow_stopped = tcp->tcp_flow_stopped; 627 if ((TCP_IS_DETACHED(peer_tcp) && 628 (peer_tcp->tcp_rcv_cnt >= peer_connp->conn_rcvbuf)) || 629 (!TCP_IS_DETACHED(peer_tcp) && 630 !IPCL_IS_NONSTR(peer_connp) && !canputnext(peer_connp->conn_rq))) { 631 peer_data_queued = B_TRUE; 632 } 633 634 if (!flow_stopped && (peer_data_queued || 635 (TCP_UNSENT_BYTES(tcp) >= connp->conn_sndbuf))) { 636 tcp_setqfull(tcp); 637 flow_stopped = B_TRUE; 638 TCP_STAT(tcps, tcp_fusion_flowctl); 639 DTRACE_PROBE3(tcp__fuse__output__flowctl, tcp_t *, tcp, 640 uint_t, send_size, uint_t, peer_tcp->tcp_rcv_cnt); 641 } else if (flow_stopped && !peer_data_queued && 642 (TCP_UNSENT_BYTES(tcp) <= connp->conn_sndlowat)) { 643 tcp_clrqfull(tcp); 644 TCP_STAT(tcps, tcp_fusion_backenabled); 645 flow_stopped = B_FALSE; 646 } 647 mutex_exit(&tcp->tcp_non_sq_lock); 648 649 ipst->ips_loopback_packets++; 650 tcp->tcp_last_sent_len = send_size; 651 652 /* Need to adjust the following SNMP MIB-related variables */ 653 tcp->tcp_snxt += send_size; 654 tcp->tcp_suna = tcp->tcp_snxt; 655 peer_tcp->tcp_rnxt += recv_size; 656 peer_tcp->tcp_last_recv_len = recv_size; 657 peer_tcp->tcp_rack = peer_tcp->tcp_rnxt; 658 659 TCPS_BUMP_MIB(tcps, tcpOutDataSegs); 660 TCPS_BUMP_MIB(tcps, tcpHCOutSegs); 661 TCPS_UPDATE_MIB(tcps, tcpOutDataBytes, send_size); 662 tcp->tcp_cs.tcp_out_data_bytes += send_size; 663 tcp->tcp_cs.tcp_out_data_segs++; 664 665 TCPS_BUMP_MIB(tcps, tcpHCInSegs); 666 TCPS_BUMP_MIB(tcps, tcpInDataInorderSegs); 667 TCPS_UPDATE_MIB(tcps, tcpInDataInorderBytes, send_size); 668 peer_tcp->tcp_cs.tcp_in_data_inorder_bytes += send_size; 669 peer_tcp->tcp_cs.tcp_in_data_inorder_segs++; 670 671 DTRACE_TCP5(send, void, NULL, ip_xmit_attr_t *, connp->conn_ixa, 672 __dtrace_tcp_void_ip_t *, NULL, tcp_t *, tcp, 673 __dtrace_tcp_tcph_t *, NULL); 674 DTRACE_TCP5(receive, void, NULL, ip_xmit_attr_t *, 675 peer_connp->conn_ixa, __dtrace_tcp_void_ip_t *, NULL, 676 tcp_t *, peer_tcp, __dtrace_tcp_tcph_t *, NULL); 677 678 if (!IPCL_IS_NONSTR(peer_tcp->tcp_connp) && 679 !TCP_IS_DETACHED(peer_tcp)) { 680 /* 681 * Drain the peer's receive queue it has urgent data or if 682 * we're not flow-controlled. 683 */ 684 if (urgent || !flow_stopped) { 685 ASSERT(peer_tcp->tcp_rcv_list != NULL); 686 /* 687 * For TLI-based streams, a thread in tcp_accept_swap() 688 * can race with us. That thread will ensure that the 689 * correct peer_connp->conn_rq is globally visible 690 * before peer_tcp->tcp_detached is visible as clear, 691 * but we must also ensure that the load of conn_rq 692 * cannot be reordered to be before the tcp_detached 693 * check. 694 */ 695 membar_consumer(); 696 (void) tcp_fuse_rcv_drain(peer_connp->conn_rq, peer_tcp, 697 NULL); 698 } 699 } 700 return (B_TRUE); 701 unfuse: 702 tcp_unfuse(tcp); 703 return (B_FALSE); 704 } 705 706 /* 707 * This routine gets called to deliver data upstream on a fused or 708 * previously fused tcp loopback endpoint; the latter happens only 709 * when there is a pending SIGURG signal plus urgent data that can't 710 * be sent upstream in the past. 711 */ 712 boolean_t 713 tcp_fuse_rcv_drain(queue_t *q, tcp_t *tcp, mblk_t **sigurg_mpp) 714 { 715 mblk_t *mp; 716 conn_t *connp = tcp->tcp_connp; 717 718 #ifdef DEBUG 719 uint_t cnt = 0; 720 #endif 721 tcp_stack_t *tcps = tcp->tcp_tcps; 722 tcp_t *peer_tcp = tcp->tcp_loopback_peer; 723 724 ASSERT(tcp->tcp_loopback); 725 ASSERT(tcp->tcp_fused || tcp->tcp_fused_sigurg); 726 ASSERT(!tcp->tcp_fused || tcp->tcp_loopback_peer != NULL); 727 ASSERT(IPCL_IS_NONSTR(connp) || sigurg_mpp != NULL || tcp->tcp_fused); 728 729 /* No need for the push timer now, in case it was scheduled */ 730 if (tcp->tcp_push_tid != 0) { 731 (void) TCP_TIMER_CANCEL(tcp, tcp->tcp_push_tid); 732 tcp->tcp_push_tid = 0; 733 } 734 /* 735 * If there's urgent data sitting in receive list and we didn't 736 * get a chance to send up a SIGURG signal, make sure we send 737 * it first before draining in order to ensure that SIOCATMARK 738 * works properly. 739 */ 740 if (tcp->tcp_fused_sigurg) { 741 ASSERT(!IPCL_IS_NONSTR(tcp->tcp_connp)); 742 743 tcp->tcp_fused_sigurg = B_FALSE; 744 /* 745 * sigurg_mpp is normally NULL, i.e. when we're still 746 * fused and didn't get here because of tcp_unfuse(). 747 * In this case try hard to allocate the M_PCSIG mblk. 748 */ 749 if (sigurg_mpp == NULL && 750 (mp = allocb(1, BPRI_HI)) == NULL && 751 (mp = allocb_tryhard(1)) == NULL) { 752 /* Alloc failed; try again next time */ 753 tcp->tcp_push_tid = TCP_TIMER(tcp, 754 tcp_push_timer, tcps->tcps_push_timer_interval); 755 return (B_TRUE); 756 } else if (sigurg_mpp != NULL) { 757 /* 758 * Use the supplied M_PCSIG mblk; it means we're 759 * either unfused or in the process of unfusing, 760 * and the drain must happen now. 761 */ 762 mp = *sigurg_mpp; 763 *sigurg_mpp = NULL; 764 } 765 ASSERT(mp != NULL); 766 767 /* Send up the signal */ 768 DB_TYPE(mp) = M_PCSIG; 769 *mp->b_wptr++ = (uchar_t)SIGURG; 770 putnext(q, mp); 771 772 /* 773 * Let the regular tcp_rcv_drain() path handle 774 * draining the data if we're no longer fused. 775 */ 776 if (!tcp->tcp_fused) 777 return (B_FALSE); 778 } 779 780 /* Drain the data */ 781 while ((mp = tcp->tcp_rcv_list) != NULL) { 782 tcp->tcp_rcv_list = mp->b_next; 783 mp->b_next = NULL; 784 #ifdef DEBUG 785 cnt += msgdsize(mp); 786 #endif 787 ASSERT(!IPCL_IS_NONSTR(connp)); 788 putnext(q, mp); 789 TCP_STAT(tcps, tcp_fusion_putnext); 790 } 791 792 #ifdef DEBUG 793 ASSERT(cnt == tcp->tcp_rcv_cnt); 794 #endif 795 tcp->tcp_rcv_last_head = NULL; 796 tcp->tcp_rcv_last_tail = NULL; 797 tcp->tcp_rcv_cnt = 0; 798 tcp->tcp_rwnd = tcp->tcp_connp->conn_rcvbuf; 799 800 mutex_enter(&peer_tcp->tcp_non_sq_lock); 801 if (peer_tcp->tcp_flow_stopped && (TCP_UNSENT_BYTES(peer_tcp) <= 802 peer_tcp->tcp_connp->conn_sndlowat)) { 803 tcp_clrqfull(peer_tcp); 804 TCP_STAT(tcps, tcp_fusion_backenabled); 805 } 806 mutex_exit(&peer_tcp->tcp_non_sq_lock); 807 808 return (B_TRUE); 809 } 810 811 /* 812 * Calculate the size of receive buffer for a fused tcp endpoint. 813 */ 814 size_t 815 tcp_fuse_set_rcv_hiwat(tcp_t *tcp, size_t rwnd) 816 { 817 tcp_stack_t *tcps = tcp->tcp_tcps; 818 uint32_t max_win; 819 820 ASSERT(tcp->tcp_fused); 821 822 /* Ensure that value is within the maximum upper bound */ 823 if (rwnd > tcps->tcps_max_buf) 824 rwnd = tcps->tcps_max_buf; 825 /* 826 * Round up to system page size in case SO_RCVBUF is modified 827 * after SO_SNDBUF; the latter is also similarly rounded up. 828 */ 829 rwnd = P2ROUNDUP_TYPED(rwnd, PAGESIZE, size_t); 830 max_win = TCP_MAXWIN << tcp->tcp_rcv_ws; 831 if (rwnd > max_win) { 832 rwnd = max_win - (max_win % tcp->tcp_mss); 833 if (rwnd < tcp->tcp_mss) 834 rwnd = max_win; 835 } 836 837 /* 838 * Record high water mark, this is used for flow-control 839 * purposes in tcp_fuse_output(). 840 */ 841 tcp->tcp_connp->conn_rcvbuf = rwnd; 842 tcp->tcp_rwnd = rwnd; 843 return (rwnd); 844 } 845 846 /* 847 * Calculate the maximum outstanding unread data block for a fused tcp endpoint. 848 */ 849 int 850 tcp_fuse_maxpsz(tcp_t *tcp) 851 { 852 tcp_t *peer_tcp = tcp->tcp_loopback_peer; 853 conn_t *connp = tcp->tcp_connp; 854 uint_t sndbuf = connp->conn_sndbuf; 855 uint_t maxpsz = sndbuf; 856 857 ASSERT(tcp->tcp_fused); 858 ASSERT(peer_tcp != NULL); 859 ASSERT(peer_tcp->tcp_connp->conn_rcvbuf != 0); 860 /* 861 * In the fused loopback case, we want the stream head to split 862 * up larger writes into smaller chunks for a more accurate flow- 863 * control accounting. Our maxpsz is half of the sender's send 864 * buffer or the receiver's receive buffer, whichever is smaller. 865 * We round up the buffer to system page size due to the lack of 866 * TCP MSS concept in Fusion. 867 */ 868 if (maxpsz > peer_tcp->tcp_connp->conn_rcvbuf) 869 maxpsz = peer_tcp->tcp_connp->conn_rcvbuf; 870 maxpsz = P2ROUNDUP_TYPED(maxpsz, PAGESIZE, uint_t) >> 1; 871 872 return (maxpsz); 873 } 874 875 /* 876 * Called to release flow control. 877 */ 878 void 879 tcp_fuse_backenable(tcp_t *tcp) 880 { 881 tcp_t *peer_tcp = tcp->tcp_loopback_peer; 882 883 ASSERT(tcp->tcp_fused); 884 ASSERT(peer_tcp != NULL && peer_tcp->tcp_fused); 885 ASSERT(peer_tcp->tcp_loopback_peer == tcp); 886 ASSERT(!TCP_IS_DETACHED(tcp)); 887 ASSERT(tcp->tcp_connp->conn_sqp == 888 peer_tcp->tcp_connp->conn_sqp); 889 890 if (tcp->tcp_rcv_list != NULL) 891 (void) tcp_fuse_rcv_drain(tcp->tcp_connp->conn_rq, tcp, NULL); 892 893 mutex_enter(&peer_tcp->tcp_non_sq_lock); 894 if (peer_tcp->tcp_flow_stopped && 895 (TCP_UNSENT_BYTES(peer_tcp) <= 896 peer_tcp->tcp_connp->conn_sndlowat)) { 897 tcp_clrqfull(peer_tcp); 898 } 899 mutex_exit(&peer_tcp->tcp_non_sq_lock); 900 901 TCP_STAT(tcp->tcp_tcps, tcp_fusion_backenabled); 902 } 903