1 /* 2 * INET An implementation of the TCP/IP protocol suite for the LINUX 3 * operating system. INET is implemented using the BSD Socket 4 * interface as the means of communication with the user level. 5 * 6 * Definitions for the AF_INET socket handler. 7 * 8 * Version: @(#)sock.h 1.0.4 05/13/93 9 * 10 * Authors: Ross Biro 11 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG> 12 * Corey Minyard <wf-rch!minyard@relay.EU.net> 13 * Florian La Roche <flla@stud.uni-sb.de> 14 * 15 * Fixes: 16 * Alan Cox : Volatiles in skbuff pointers. See 17 * skbuff comments. May be overdone, 18 * better to prove they can be removed 19 * than the reverse. 20 * Alan Cox : Added a zapped field for tcp to note 21 * a socket is reset and must stay shut up 22 * Alan Cox : New fields for options 23 * Pauline Middelink : identd support 24 * Alan Cox : Eliminate low level recv/recvfrom 25 * David S. Miller : New socket lookup architecture. 26 * Steve Whitehouse: Default routines for sock_ops 27 * Arnaldo C. Melo : removed net_pinfo, tp_pinfo and made 28 * protinfo be just a void pointer, as the 29 * protocol specific parts were moved to 30 * respective headers and ipv4/v6, etc now 31 * use private slabcaches for its socks 32 * Pedro Hortas : New flags field for socket options 33 * 34 * 35 * This program is free software; you can redistribute it and/or 36 * modify it under the terms of the GNU General Public License 37 * as published by the Free Software Foundation; either version 38 * 2 of the License, or (at your option) any later version. 39 */ 40 #ifndef _SOCK_H 41 #define _SOCK_H 42 43 #include <linux/hardirq.h> 44 #include <linux/kernel.h> 45 #include <linux/list.h> 46 #include <linux/list_nulls.h> 47 #include <linux/timer.h> 48 #include <linux/cache.h> 49 #include <linux/bitops.h> 50 #include <linux/lockdep.h> 51 #include <linux/netdevice.h> 52 #include <linux/skbuff.h> /* struct sk_buff */ 53 #include <linux/mm.h> 54 #include <linux/security.h> 55 #include <linux/slab.h> 56 #include <linux/uaccess.h> 57 #include <linux/page_counter.h> 58 #include <linux/memcontrol.h> 59 #include <linux/static_key.h> 60 #include <linux/sched.h> 61 62 #include <linux/filter.h> 63 #include <linux/rculist_nulls.h> 64 #include <linux/poll.h> 65 66 #include <linux/atomic.h> 67 #include <net/dst.h> 68 #include <net/checksum.h> 69 #include <net/tcp_states.h> 70 #include <linux/net_tstamp.h> 71 72 struct cgroup; 73 struct cgroup_subsys; 74 #ifdef CONFIG_NET 75 int mem_cgroup_sockets_init(struct mem_cgroup *memcg, struct cgroup_subsys *ss); 76 void mem_cgroup_sockets_destroy(struct mem_cgroup *memcg); 77 #else 78 static inline 79 int mem_cgroup_sockets_init(struct mem_cgroup *memcg, struct cgroup_subsys *ss) 80 { 81 return 0; 82 } 83 static inline 84 void mem_cgroup_sockets_destroy(struct mem_cgroup *memcg) 85 { 86 } 87 #endif 88 /* 89 * This structure really needs to be cleaned up. 90 * Most of it is for TCP, and not used by any of 91 * the other protocols. 92 */ 93 94 /* Define this to get the SOCK_DBG debugging facility. */ 95 #define SOCK_DEBUGGING 96 #ifdef SOCK_DEBUGGING 97 #define SOCK_DEBUG(sk, msg...) do { if ((sk) && sock_flag((sk), SOCK_DBG)) \ 98 printk(KERN_DEBUG msg); } while (0) 99 #else 100 /* Validate arguments and do nothing */ 101 static inline __printf(2, 3) 102 void SOCK_DEBUG(const struct sock *sk, const char *msg, ...) 103 { 104 } 105 #endif 106 107 /* This is the per-socket lock. The spinlock provides a synchronization 108 * between user contexts and software interrupt processing, whereas the 109 * mini-semaphore synchronizes multiple users amongst themselves. 110 */ 111 typedef struct { 112 spinlock_t slock; 113 int owned; 114 wait_queue_head_t wq; 115 /* 116 * We express the mutex-alike socket_lock semantics 117 * to the lock validator by explicitly managing 118 * the slock as a lock variant (in addition to 119 * the slock itself): 120 */ 121 #ifdef CONFIG_DEBUG_LOCK_ALLOC 122 struct lockdep_map dep_map; 123 #endif 124 } socket_lock_t; 125 126 struct sock; 127 struct proto; 128 struct net; 129 130 typedef __u32 __bitwise __portpair; 131 typedef __u64 __bitwise __addrpair; 132 133 /** 134 * struct sock_common - minimal network layer representation of sockets 135 * @skc_daddr: Foreign IPv4 addr 136 * @skc_rcv_saddr: Bound local IPv4 addr 137 * @skc_hash: hash value used with various protocol lookup tables 138 * @skc_u16hashes: two u16 hash values used by UDP lookup tables 139 * @skc_dport: placeholder for inet_dport/tw_dport 140 * @skc_num: placeholder for inet_num/tw_num 141 * @skc_family: network address family 142 * @skc_state: Connection state 143 * @skc_reuse: %SO_REUSEADDR setting 144 * @skc_reuseport: %SO_REUSEPORT setting 145 * @skc_bound_dev_if: bound device index if != 0 146 * @skc_bind_node: bind hash linkage for various protocol lookup tables 147 * @skc_portaddr_node: second hash linkage for UDP/UDP-Lite protocol 148 * @skc_prot: protocol handlers inside a network family 149 * @skc_net: reference to the network namespace of this socket 150 * @skc_node: main hash linkage for various protocol lookup tables 151 * @skc_nulls_node: main hash linkage for TCP/UDP/UDP-Lite protocol 152 * @skc_tx_queue_mapping: tx queue number for this connection 153 * @skc_flags: place holder for sk_flags 154 * %SO_LINGER (l_onoff), %SO_BROADCAST, %SO_KEEPALIVE, 155 * %SO_OOBINLINE settings, %SO_TIMESTAMPING settings 156 * @skc_incoming_cpu: record/match cpu processing incoming packets 157 * @skc_refcnt: reference count 158 * 159 * This is the minimal network layer representation of sockets, the header 160 * for struct sock and struct inet_timewait_sock. 161 */ 162 struct sock_common { 163 /* skc_daddr and skc_rcv_saddr must be grouped on a 8 bytes aligned 164 * address on 64bit arches : cf INET_MATCH() 165 */ 166 union { 167 __addrpair skc_addrpair; 168 struct { 169 __be32 skc_daddr; 170 __be32 skc_rcv_saddr; 171 }; 172 }; 173 union { 174 unsigned int skc_hash; 175 __u16 skc_u16hashes[2]; 176 }; 177 /* skc_dport && skc_num must be grouped as well */ 178 union { 179 __portpair skc_portpair; 180 struct { 181 __be16 skc_dport; 182 __u16 skc_num; 183 }; 184 }; 185 186 unsigned short skc_family; 187 volatile unsigned char skc_state; 188 unsigned char skc_reuse:4; 189 unsigned char skc_reuseport:1; 190 unsigned char skc_ipv6only:1; 191 unsigned char skc_net_refcnt:1; 192 int skc_bound_dev_if; 193 union { 194 struct hlist_node skc_bind_node; 195 struct hlist_nulls_node skc_portaddr_node; 196 }; 197 struct proto *skc_prot; 198 possible_net_t skc_net; 199 200 #if IS_ENABLED(CONFIG_IPV6) 201 struct in6_addr skc_v6_daddr; 202 struct in6_addr skc_v6_rcv_saddr; 203 #endif 204 205 atomic64_t skc_cookie; 206 207 /* following fields are padding to force 208 * offset(struct sock, sk_refcnt) == 128 on 64bit arches 209 * assuming IPV6 is enabled. We use this padding differently 210 * for different kind of 'sockets' 211 */ 212 union { 213 unsigned long skc_flags; 214 struct sock *skc_listener; /* request_sock */ 215 struct inet_timewait_death_row *skc_tw_dr; /* inet_timewait_sock */ 216 }; 217 /* 218 * fields between dontcopy_begin/dontcopy_end 219 * are not copied in sock_copy() 220 */ 221 /* private: */ 222 int skc_dontcopy_begin[0]; 223 /* public: */ 224 union { 225 struct hlist_node skc_node; 226 struct hlist_nulls_node skc_nulls_node; 227 }; 228 int skc_tx_queue_mapping; 229 union { 230 int skc_incoming_cpu; 231 u32 skc_rcv_wnd; 232 u32 skc_tw_rcv_nxt; /* struct tcp_timewait_sock */ 233 }; 234 235 atomic_t skc_refcnt; 236 /* private: */ 237 int skc_dontcopy_end[0]; 238 union { 239 u32 skc_rxhash; 240 u32 skc_window_clamp; 241 u32 skc_tw_snd_nxt; /* struct tcp_timewait_sock */ 242 }; 243 /* public: */ 244 }; 245 246 struct cg_proto; 247 /** 248 * struct sock - network layer representation of sockets 249 * @__sk_common: shared layout with inet_timewait_sock 250 * @sk_shutdown: mask of %SEND_SHUTDOWN and/or %RCV_SHUTDOWN 251 * @sk_userlocks: %SO_SNDBUF and %SO_RCVBUF settings 252 * @sk_lock: synchronizer 253 * @sk_rcvbuf: size of receive buffer in bytes 254 * @sk_wq: sock wait queue and async head 255 * @sk_rx_dst: receive input route used by early demux 256 * @sk_dst_cache: destination cache 257 * @sk_policy: flow policy 258 * @sk_receive_queue: incoming packets 259 * @sk_wmem_alloc: transmit queue bytes committed 260 * @sk_write_queue: Packet sending queue 261 * @sk_omem_alloc: "o" is "option" or "other" 262 * @sk_wmem_queued: persistent queue size 263 * @sk_forward_alloc: space allocated forward 264 * @sk_napi_id: id of the last napi context to receive data for sk 265 * @sk_ll_usec: usecs to busypoll when there is no data 266 * @sk_allocation: allocation mode 267 * @sk_pacing_rate: Pacing rate (if supported by transport/packet scheduler) 268 * @sk_max_pacing_rate: Maximum pacing rate (%SO_MAX_PACING_RATE) 269 * @sk_sndbuf: size of send buffer in bytes 270 * @sk_no_check_tx: %SO_NO_CHECK setting, set checksum in TX packets 271 * @sk_no_check_rx: allow zero checksum in RX packets 272 * @sk_route_caps: route capabilities (e.g. %NETIF_F_TSO) 273 * @sk_route_nocaps: forbidden route capabilities (e.g NETIF_F_GSO_MASK) 274 * @sk_gso_type: GSO type (e.g. %SKB_GSO_TCPV4) 275 * @sk_gso_max_size: Maximum GSO segment size to build 276 * @sk_gso_max_segs: Maximum number of GSO segments 277 * @sk_lingertime: %SO_LINGER l_linger setting 278 * @sk_backlog: always used with the per-socket spinlock held 279 * @sk_callback_lock: used with the callbacks in the end of this struct 280 * @sk_error_queue: rarely used 281 * @sk_prot_creator: sk_prot of original sock creator (see ipv6_setsockopt, 282 * IPV6_ADDRFORM for instance) 283 * @sk_err: last error 284 * @sk_err_soft: errors that don't cause failure but are the cause of a 285 * persistent failure not just 'timed out' 286 * @sk_drops: raw/udp drops counter 287 * @sk_ack_backlog: current listen backlog 288 * @sk_max_ack_backlog: listen backlog set in listen() 289 * @sk_priority: %SO_PRIORITY setting 290 * @sk_cgrp_prioidx: socket group's priority map index 291 * @sk_type: socket type (%SOCK_STREAM, etc) 292 * @sk_protocol: which protocol this socket belongs in this network family 293 * @sk_peer_pid: &struct pid for this socket's peer 294 * @sk_peer_cred: %SO_PEERCRED setting 295 * @sk_rcvlowat: %SO_RCVLOWAT setting 296 * @sk_rcvtimeo: %SO_RCVTIMEO setting 297 * @sk_sndtimeo: %SO_SNDTIMEO setting 298 * @sk_txhash: computed flow hash for use on transmit 299 * @sk_filter: socket filtering instructions 300 * @sk_timer: sock cleanup timer 301 * @sk_stamp: time stamp of last packet received 302 * @sk_tsflags: SO_TIMESTAMPING socket options 303 * @sk_tskey: counter to disambiguate concurrent tstamp requests 304 * @sk_socket: Identd and reporting IO signals 305 * @sk_user_data: RPC layer private data 306 * @sk_frag: cached page frag 307 * @sk_peek_off: current peek_offset value 308 * @sk_send_head: front of stuff to transmit 309 * @sk_security: used by security modules 310 * @sk_mark: generic packet mark 311 * @sk_classid: this socket's cgroup classid 312 * @sk_cgrp: this socket's cgroup-specific proto data 313 * @sk_write_pending: a write to stream socket waits to start 314 * @sk_state_change: callback to indicate change in the state of the sock 315 * @sk_data_ready: callback to indicate there is data to be processed 316 * @sk_write_space: callback to indicate there is bf sending space available 317 * @sk_error_report: callback to indicate errors (e.g. %MSG_ERRQUEUE) 318 * @sk_backlog_rcv: callback to process the backlog 319 * @sk_destruct: called at sock freeing time, i.e. when all refcnt == 0 320 */ 321 struct sock { 322 /* 323 * Now struct inet_timewait_sock also uses sock_common, so please just 324 * don't add nothing before this first member (__sk_common) --acme 325 */ 326 struct sock_common __sk_common; 327 #define sk_node __sk_common.skc_node 328 #define sk_nulls_node __sk_common.skc_nulls_node 329 #define sk_refcnt __sk_common.skc_refcnt 330 #define sk_tx_queue_mapping __sk_common.skc_tx_queue_mapping 331 332 #define sk_dontcopy_begin __sk_common.skc_dontcopy_begin 333 #define sk_dontcopy_end __sk_common.skc_dontcopy_end 334 #define sk_hash __sk_common.skc_hash 335 #define sk_portpair __sk_common.skc_portpair 336 #define sk_num __sk_common.skc_num 337 #define sk_dport __sk_common.skc_dport 338 #define sk_addrpair __sk_common.skc_addrpair 339 #define sk_daddr __sk_common.skc_daddr 340 #define sk_rcv_saddr __sk_common.skc_rcv_saddr 341 #define sk_family __sk_common.skc_family 342 #define sk_state __sk_common.skc_state 343 #define sk_reuse __sk_common.skc_reuse 344 #define sk_reuseport __sk_common.skc_reuseport 345 #define sk_ipv6only __sk_common.skc_ipv6only 346 #define sk_net_refcnt __sk_common.skc_net_refcnt 347 #define sk_bound_dev_if __sk_common.skc_bound_dev_if 348 #define sk_bind_node __sk_common.skc_bind_node 349 #define sk_prot __sk_common.skc_prot 350 #define sk_net __sk_common.skc_net 351 #define sk_v6_daddr __sk_common.skc_v6_daddr 352 #define sk_v6_rcv_saddr __sk_common.skc_v6_rcv_saddr 353 #define sk_cookie __sk_common.skc_cookie 354 #define sk_incoming_cpu __sk_common.skc_incoming_cpu 355 #define sk_flags __sk_common.skc_flags 356 #define sk_rxhash __sk_common.skc_rxhash 357 358 socket_lock_t sk_lock; 359 struct sk_buff_head sk_receive_queue; 360 /* 361 * The backlog queue is special, it is always used with 362 * the per-socket spinlock held and requires low latency 363 * access. Therefore we special case it's implementation. 364 * Note : rmem_alloc is in this structure to fill a hole 365 * on 64bit arches, not because its logically part of 366 * backlog. 367 */ 368 struct { 369 atomic_t rmem_alloc; 370 int len; 371 struct sk_buff *head; 372 struct sk_buff *tail; 373 } sk_backlog; 374 #define sk_rmem_alloc sk_backlog.rmem_alloc 375 int sk_forward_alloc; 376 377 __u32 sk_txhash; 378 #ifdef CONFIG_NET_RX_BUSY_POLL 379 unsigned int sk_napi_id; 380 unsigned int sk_ll_usec; 381 #endif 382 atomic_t sk_drops; 383 int sk_rcvbuf; 384 385 struct sk_filter __rcu *sk_filter; 386 union { 387 struct socket_wq __rcu *sk_wq; 388 struct socket_wq *sk_wq_raw; 389 }; 390 #ifdef CONFIG_XFRM 391 struct xfrm_policy __rcu *sk_policy[2]; 392 #endif 393 struct dst_entry *sk_rx_dst; 394 struct dst_entry __rcu *sk_dst_cache; 395 /* Note: 32bit hole on 64bit arches */ 396 atomic_t sk_wmem_alloc; 397 atomic_t sk_omem_alloc; 398 int sk_sndbuf; 399 struct sk_buff_head sk_write_queue; 400 kmemcheck_bitfield_begin(flags); 401 unsigned int sk_shutdown : 2, 402 sk_no_check_tx : 1, 403 sk_no_check_rx : 1, 404 sk_userlocks : 4, 405 sk_protocol : 8, 406 sk_type : 16; 407 #define SK_PROTOCOL_MAX U8_MAX 408 kmemcheck_bitfield_end(flags); 409 int sk_wmem_queued; 410 gfp_t sk_allocation; 411 u32 sk_pacing_rate; /* bytes per second */ 412 u32 sk_max_pacing_rate; 413 netdev_features_t sk_route_caps; 414 netdev_features_t sk_route_nocaps; 415 int sk_gso_type; 416 unsigned int sk_gso_max_size; 417 u16 sk_gso_max_segs; 418 int sk_rcvlowat; 419 unsigned long sk_lingertime; 420 struct sk_buff_head sk_error_queue; 421 struct proto *sk_prot_creator; 422 rwlock_t sk_callback_lock; 423 int sk_err, 424 sk_err_soft; 425 u32 sk_ack_backlog; 426 u32 sk_max_ack_backlog; 427 __u32 sk_priority; 428 #if IS_ENABLED(CONFIG_CGROUP_NET_PRIO) 429 __u32 sk_cgrp_prioidx; 430 #endif 431 struct pid *sk_peer_pid; 432 const struct cred *sk_peer_cred; 433 long sk_rcvtimeo; 434 long sk_sndtimeo; 435 struct timer_list sk_timer; 436 ktime_t sk_stamp; 437 u16 sk_tsflags; 438 u32 sk_tskey; 439 struct socket *sk_socket; 440 void *sk_user_data; 441 struct page_frag sk_frag; 442 struct sk_buff *sk_send_head; 443 __s32 sk_peek_off; 444 int sk_write_pending; 445 #ifdef CONFIG_SECURITY 446 void *sk_security; 447 #endif 448 __u32 sk_mark; 449 #ifdef CONFIG_CGROUP_NET_CLASSID 450 u32 sk_classid; 451 #endif 452 struct cg_proto *sk_cgrp; 453 void (*sk_state_change)(struct sock *sk); 454 void (*sk_data_ready)(struct sock *sk); 455 void (*sk_write_space)(struct sock *sk); 456 void (*sk_error_report)(struct sock *sk); 457 int (*sk_backlog_rcv)(struct sock *sk, 458 struct sk_buff *skb); 459 void (*sk_destruct)(struct sock *sk); 460 }; 461 462 #define __sk_user_data(sk) ((*((void __rcu **)&(sk)->sk_user_data))) 463 464 #define rcu_dereference_sk_user_data(sk) rcu_dereference(__sk_user_data((sk))) 465 #define rcu_assign_sk_user_data(sk, ptr) rcu_assign_pointer(__sk_user_data((sk)), ptr) 466 467 /* 468 * SK_CAN_REUSE and SK_NO_REUSE on a socket mean that the socket is OK 469 * or not whether his port will be reused by someone else. SK_FORCE_REUSE 470 * on a socket means that the socket will reuse everybody else's port 471 * without looking at the other's sk_reuse value. 472 */ 473 474 #define SK_NO_REUSE 0 475 #define SK_CAN_REUSE 1 476 #define SK_FORCE_REUSE 2 477 478 static inline int sk_peek_offset(struct sock *sk, int flags) 479 { 480 if ((flags & MSG_PEEK) && (sk->sk_peek_off >= 0)) 481 return sk->sk_peek_off; 482 else 483 return 0; 484 } 485 486 static inline void sk_peek_offset_bwd(struct sock *sk, int val) 487 { 488 if (sk->sk_peek_off >= 0) { 489 if (sk->sk_peek_off >= val) 490 sk->sk_peek_off -= val; 491 else 492 sk->sk_peek_off = 0; 493 } 494 } 495 496 static inline void sk_peek_offset_fwd(struct sock *sk, int val) 497 { 498 if (sk->sk_peek_off >= 0) 499 sk->sk_peek_off += val; 500 } 501 502 /* 503 * Hashed lists helper routines 504 */ 505 static inline struct sock *sk_entry(const struct hlist_node *node) 506 { 507 return hlist_entry(node, struct sock, sk_node); 508 } 509 510 static inline struct sock *__sk_head(const struct hlist_head *head) 511 { 512 return hlist_entry(head->first, struct sock, sk_node); 513 } 514 515 static inline struct sock *sk_head(const struct hlist_head *head) 516 { 517 return hlist_empty(head) ? NULL : __sk_head(head); 518 } 519 520 static inline struct sock *__sk_nulls_head(const struct hlist_nulls_head *head) 521 { 522 return hlist_nulls_entry(head->first, struct sock, sk_nulls_node); 523 } 524 525 static inline struct sock *sk_nulls_head(const struct hlist_nulls_head *head) 526 { 527 return hlist_nulls_empty(head) ? NULL : __sk_nulls_head(head); 528 } 529 530 static inline struct sock *sk_next(const struct sock *sk) 531 { 532 return sk->sk_node.next ? 533 hlist_entry(sk->sk_node.next, struct sock, sk_node) : NULL; 534 } 535 536 static inline struct sock *sk_nulls_next(const struct sock *sk) 537 { 538 return (!is_a_nulls(sk->sk_nulls_node.next)) ? 539 hlist_nulls_entry(sk->sk_nulls_node.next, 540 struct sock, sk_nulls_node) : 541 NULL; 542 } 543 544 static inline bool sk_unhashed(const struct sock *sk) 545 { 546 return hlist_unhashed(&sk->sk_node); 547 } 548 549 static inline bool sk_hashed(const struct sock *sk) 550 { 551 return !sk_unhashed(sk); 552 } 553 554 static inline void sk_node_init(struct hlist_node *node) 555 { 556 node->pprev = NULL; 557 } 558 559 static inline void sk_nulls_node_init(struct hlist_nulls_node *node) 560 { 561 node->pprev = NULL; 562 } 563 564 static inline void __sk_del_node(struct sock *sk) 565 { 566 __hlist_del(&sk->sk_node); 567 } 568 569 /* NB: equivalent to hlist_del_init_rcu */ 570 static inline bool __sk_del_node_init(struct sock *sk) 571 { 572 if (sk_hashed(sk)) { 573 __sk_del_node(sk); 574 sk_node_init(&sk->sk_node); 575 return true; 576 } 577 return false; 578 } 579 580 /* Grab socket reference count. This operation is valid only 581 when sk is ALREADY grabbed f.e. it is found in hash table 582 or a list and the lookup is made under lock preventing hash table 583 modifications. 584 */ 585 586 static inline void sock_hold(struct sock *sk) 587 { 588 atomic_inc(&sk->sk_refcnt); 589 } 590 591 /* Ungrab socket in the context, which assumes that socket refcnt 592 cannot hit zero, f.e. it is true in context of any socketcall. 593 */ 594 static inline void __sock_put(struct sock *sk) 595 { 596 atomic_dec(&sk->sk_refcnt); 597 } 598 599 static inline bool sk_del_node_init(struct sock *sk) 600 { 601 bool rc = __sk_del_node_init(sk); 602 603 if (rc) { 604 /* paranoid for a while -acme */ 605 WARN_ON(atomic_read(&sk->sk_refcnt) == 1); 606 __sock_put(sk); 607 } 608 return rc; 609 } 610 #define sk_del_node_init_rcu(sk) sk_del_node_init(sk) 611 612 static inline bool __sk_nulls_del_node_init_rcu(struct sock *sk) 613 { 614 if (sk_hashed(sk)) { 615 hlist_nulls_del_init_rcu(&sk->sk_nulls_node); 616 return true; 617 } 618 return false; 619 } 620 621 static inline bool sk_nulls_del_node_init_rcu(struct sock *sk) 622 { 623 bool rc = __sk_nulls_del_node_init_rcu(sk); 624 625 if (rc) { 626 /* paranoid for a while -acme */ 627 WARN_ON(atomic_read(&sk->sk_refcnt) == 1); 628 __sock_put(sk); 629 } 630 return rc; 631 } 632 633 static inline void __sk_add_node(struct sock *sk, struct hlist_head *list) 634 { 635 hlist_add_head(&sk->sk_node, list); 636 } 637 638 static inline void sk_add_node(struct sock *sk, struct hlist_head *list) 639 { 640 sock_hold(sk); 641 __sk_add_node(sk, list); 642 } 643 644 static inline void sk_add_node_rcu(struct sock *sk, struct hlist_head *list) 645 { 646 sock_hold(sk); 647 hlist_add_head_rcu(&sk->sk_node, list); 648 } 649 650 static inline void __sk_nulls_add_node_rcu(struct sock *sk, struct hlist_nulls_head *list) 651 { 652 hlist_nulls_add_head_rcu(&sk->sk_nulls_node, list); 653 } 654 655 static inline void sk_nulls_add_node_rcu(struct sock *sk, struct hlist_nulls_head *list) 656 { 657 sock_hold(sk); 658 __sk_nulls_add_node_rcu(sk, list); 659 } 660 661 static inline void __sk_del_bind_node(struct sock *sk) 662 { 663 __hlist_del(&sk->sk_bind_node); 664 } 665 666 static inline void sk_add_bind_node(struct sock *sk, 667 struct hlist_head *list) 668 { 669 hlist_add_head(&sk->sk_bind_node, list); 670 } 671 672 #define sk_for_each(__sk, list) \ 673 hlist_for_each_entry(__sk, list, sk_node) 674 #define sk_for_each_rcu(__sk, list) \ 675 hlist_for_each_entry_rcu(__sk, list, sk_node) 676 #define sk_nulls_for_each(__sk, node, list) \ 677 hlist_nulls_for_each_entry(__sk, node, list, sk_nulls_node) 678 #define sk_nulls_for_each_rcu(__sk, node, list) \ 679 hlist_nulls_for_each_entry_rcu(__sk, node, list, sk_nulls_node) 680 #define sk_for_each_from(__sk) \ 681 hlist_for_each_entry_from(__sk, sk_node) 682 #define sk_nulls_for_each_from(__sk, node) \ 683 if (__sk && ({ node = &(__sk)->sk_nulls_node; 1; })) \ 684 hlist_nulls_for_each_entry_from(__sk, node, sk_nulls_node) 685 #define sk_for_each_safe(__sk, tmp, list) \ 686 hlist_for_each_entry_safe(__sk, tmp, list, sk_node) 687 #define sk_for_each_bound(__sk, list) \ 688 hlist_for_each_entry(__sk, list, sk_bind_node) 689 690 /** 691 * sk_nulls_for_each_entry_offset - iterate over a list at a given struct offset 692 * @tpos: the type * to use as a loop cursor. 693 * @pos: the &struct hlist_node to use as a loop cursor. 694 * @head: the head for your list. 695 * @offset: offset of hlist_node within the struct. 696 * 697 */ 698 #define sk_nulls_for_each_entry_offset(tpos, pos, head, offset) \ 699 for (pos = (head)->first; \ 700 (!is_a_nulls(pos)) && \ 701 ({ tpos = (typeof(*tpos) *)((void *)pos - offset); 1;}); \ 702 pos = pos->next) 703 704 static inline struct user_namespace *sk_user_ns(struct sock *sk) 705 { 706 /* Careful only use this in a context where these parameters 707 * can not change and must all be valid, such as recvmsg from 708 * userspace. 709 */ 710 return sk->sk_socket->file->f_cred->user_ns; 711 } 712 713 /* Sock flags */ 714 enum sock_flags { 715 SOCK_DEAD, 716 SOCK_DONE, 717 SOCK_URGINLINE, 718 SOCK_KEEPOPEN, 719 SOCK_LINGER, 720 SOCK_DESTROY, 721 SOCK_BROADCAST, 722 SOCK_TIMESTAMP, 723 SOCK_ZAPPED, 724 SOCK_USE_WRITE_QUEUE, /* whether to call sk->sk_write_space in sock_wfree */ 725 SOCK_DBG, /* %SO_DEBUG setting */ 726 SOCK_RCVTSTAMP, /* %SO_TIMESTAMP setting */ 727 SOCK_RCVTSTAMPNS, /* %SO_TIMESTAMPNS setting */ 728 SOCK_LOCALROUTE, /* route locally only, %SO_DONTROUTE setting */ 729 SOCK_QUEUE_SHRUNK, /* write queue has been shrunk recently */ 730 SOCK_MEMALLOC, /* VM depends on this socket for swapping */ 731 SOCK_TIMESTAMPING_RX_SOFTWARE, /* %SOF_TIMESTAMPING_RX_SOFTWARE */ 732 SOCK_FASYNC, /* fasync() active */ 733 SOCK_RXQ_OVFL, 734 SOCK_ZEROCOPY, /* buffers from userspace */ 735 SOCK_WIFI_STATUS, /* push wifi status to userspace */ 736 SOCK_NOFCS, /* Tell NIC not to do the Ethernet FCS. 737 * Will use last 4 bytes of packet sent from 738 * user-space instead. 739 */ 740 SOCK_FILTER_LOCKED, /* Filter cannot be changed anymore */ 741 SOCK_SELECT_ERR_QUEUE, /* Wake select on error queue */ 742 }; 743 744 #define SK_FLAGS_TIMESTAMP ((1UL << SOCK_TIMESTAMP) | (1UL << SOCK_TIMESTAMPING_RX_SOFTWARE)) 745 746 static inline void sock_copy_flags(struct sock *nsk, struct sock *osk) 747 { 748 nsk->sk_flags = osk->sk_flags; 749 } 750 751 static inline void sock_set_flag(struct sock *sk, enum sock_flags flag) 752 { 753 __set_bit(flag, &sk->sk_flags); 754 } 755 756 static inline void sock_reset_flag(struct sock *sk, enum sock_flags flag) 757 { 758 __clear_bit(flag, &sk->sk_flags); 759 } 760 761 static inline bool sock_flag(const struct sock *sk, enum sock_flags flag) 762 { 763 return test_bit(flag, &sk->sk_flags); 764 } 765 766 #ifdef CONFIG_NET 767 extern struct static_key memalloc_socks; 768 static inline int sk_memalloc_socks(void) 769 { 770 return static_key_false(&memalloc_socks); 771 } 772 #else 773 774 static inline int sk_memalloc_socks(void) 775 { 776 return 0; 777 } 778 779 #endif 780 781 static inline gfp_t sk_gfp_atomic(const struct sock *sk, gfp_t gfp_mask) 782 { 783 return GFP_ATOMIC | (sk->sk_allocation & __GFP_MEMALLOC); 784 } 785 786 static inline void sk_acceptq_removed(struct sock *sk) 787 { 788 sk->sk_ack_backlog--; 789 } 790 791 static inline void sk_acceptq_added(struct sock *sk) 792 { 793 sk->sk_ack_backlog++; 794 } 795 796 static inline bool sk_acceptq_is_full(const struct sock *sk) 797 { 798 return sk->sk_ack_backlog > sk->sk_max_ack_backlog; 799 } 800 801 /* 802 * Compute minimal free write space needed to queue new packets. 803 */ 804 static inline int sk_stream_min_wspace(const struct sock *sk) 805 { 806 return sk->sk_wmem_queued >> 1; 807 } 808 809 static inline int sk_stream_wspace(const struct sock *sk) 810 { 811 return sk->sk_sndbuf - sk->sk_wmem_queued; 812 } 813 814 void sk_stream_write_space(struct sock *sk); 815 816 /* OOB backlog add */ 817 static inline void __sk_add_backlog(struct sock *sk, struct sk_buff *skb) 818 { 819 /* dont let skb dst not refcounted, we are going to leave rcu lock */ 820 skb_dst_force_safe(skb); 821 822 if (!sk->sk_backlog.tail) 823 sk->sk_backlog.head = skb; 824 else 825 sk->sk_backlog.tail->next = skb; 826 827 sk->sk_backlog.tail = skb; 828 skb->next = NULL; 829 } 830 831 /* 832 * Take into account size of receive queue and backlog queue 833 * Do not take into account this skb truesize, 834 * to allow even a single big packet to come. 835 */ 836 static inline bool sk_rcvqueues_full(const struct sock *sk, unsigned int limit) 837 { 838 unsigned int qsize = sk->sk_backlog.len + atomic_read(&sk->sk_rmem_alloc); 839 840 return qsize > limit; 841 } 842 843 /* The per-socket spinlock must be held here. */ 844 static inline __must_check int sk_add_backlog(struct sock *sk, struct sk_buff *skb, 845 unsigned int limit) 846 { 847 if (sk_rcvqueues_full(sk, limit)) 848 return -ENOBUFS; 849 850 /* 851 * If the skb was allocated from pfmemalloc reserves, only 852 * allow SOCK_MEMALLOC sockets to use it as this socket is 853 * helping free memory 854 */ 855 if (skb_pfmemalloc(skb) && !sock_flag(sk, SOCK_MEMALLOC)) 856 return -ENOMEM; 857 858 __sk_add_backlog(sk, skb); 859 sk->sk_backlog.len += skb->truesize; 860 return 0; 861 } 862 863 int __sk_backlog_rcv(struct sock *sk, struct sk_buff *skb); 864 865 static inline int sk_backlog_rcv(struct sock *sk, struct sk_buff *skb) 866 { 867 if (sk_memalloc_socks() && skb_pfmemalloc(skb)) 868 return __sk_backlog_rcv(sk, skb); 869 870 return sk->sk_backlog_rcv(sk, skb); 871 } 872 873 static inline void sk_incoming_cpu_update(struct sock *sk) 874 { 875 sk->sk_incoming_cpu = raw_smp_processor_id(); 876 } 877 878 static inline void sock_rps_record_flow_hash(__u32 hash) 879 { 880 #ifdef CONFIG_RPS 881 struct rps_sock_flow_table *sock_flow_table; 882 883 rcu_read_lock(); 884 sock_flow_table = rcu_dereference(rps_sock_flow_table); 885 rps_record_sock_flow(sock_flow_table, hash); 886 rcu_read_unlock(); 887 #endif 888 } 889 890 static inline void sock_rps_record_flow(const struct sock *sk) 891 { 892 #ifdef CONFIG_RPS 893 sock_rps_record_flow_hash(sk->sk_rxhash); 894 #endif 895 } 896 897 static inline void sock_rps_save_rxhash(struct sock *sk, 898 const struct sk_buff *skb) 899 { 900 #ifdef CONFIG_RPS 901 if (unlikely(sk->sk_rxhash != skb->hash)) 902 sk->sk_rxhash = skb->hash; 903 #endif 904 } 905 906 static inline void sock_rps_reset_rxhash(struct sock *sk) 907 { 908 #ifdef CONFIG_RPS 909 sk->sk_rxhash = 0; 910 #endif 911 } 912 913 #define sk_wait_event(__sk, __timeo, __condition) \ 914 ({ int __rc; \ 915 release_sock(__sk); \ 916 __rc = __condition; \ 917 if (!__rc) { \ 918 *(__timeo) = schedule_timeout(*(__timeo)); \ 919 } \ 920 sched_annotate_sleep(); \ 921 lock_sock(__sk); \ 922 __rc = __condition; \ 923 __rc; \ 924 }) 925 926 int sk_stream_wait_connect(struct sock *sk, long *timeo_p); 927 int sk_stream_wait_memory(struct sock *sk, long *timeo_p); 928 void sk_stream_wait_close(struct sock *sk, long timeo_p); 929 int sk_stream_error(struct sock *sk, int flags, int err); 930 void sk_stream_kill_queues(struct sock *sk); 931 void sk_set_memalloc(struct sock *sk); 932 void sk_clear_memalloc(struct sock *sk); 933 934 int sk_wait_data(struct sock *sk, long *timeo, const struct sk_buff *skb); 935 936 struct request_sock_ops; 937 struct timewait_sock_ops; 938 struct inet_hashinfo; 939 struct raw_hashinfo; 940 struct module; 941 942 /* 943 * caches using SLAB_DESTROY_BY_RCU should let .next pointer from nulls nodes 944 * un-modified. Special care is taken when initializing object to zero. 945 */ 946 static inline void sk_prot_clear_nulls(struct sock *sk, int size) 947 { 948 if (offsetof(struct sock, sk_node.next) != 0) 949 memset(sk, 0, offsetof(struct sock, sk_node.next)); 950 memset(&sk->sk_node.pprev, 0, 951 size - offsetof(struct sock, sk_node.pprev)); 952 } 953 954 /* Networking protocol blocks we attach to sockets. 955 * socket layer -> transport layer interface 956 */ 957 struct proto { 958 void (*close)(struct sock *sk, 959 long timeout); 960 int (*connect)(struct sock *sk, 961 struct sockaddr *uaddr, 962 int addr_len); 963 int (*disconnect)(struct sock *sk, int flags); 964 965 struct sock * (*accept)(struct sock *sk, int flags, int *err); 966 967 int (*ioctl)(struct sock *sk, int cmd, 968 unsigned long arg); 969 int (*init)(struct sock *sk); 970 void (*destroy)(struct sock *sk); 971 void (*shutdown)(struct sock *sk, int how); 972 int (*setsockopt)(struct sock *sk, int level, 973 int optname, char __user *optval, 974 unsigned int optlen); 975 int (*getsockopt)(struct sock *sk, int level, 976 int optname, char __user *optval, 977 int __user *option); 978 #ifdef CONFIG_COMPAT 979 int (*compat_setsockopt)(struct sock *sk, 980 int level, 981 int optname, char __user *optval, 982 unsigned int optlen); 983 int (*compat_getsockopt)(struct sock *sk, 984 int level, 985 int optname, char __user *optval, 986 int __user *option); 987 int (*compat_ioctl)(struct sock *sk, 988 unsigned int cmd, unsigned long arg); 989 #endif 990 int (*sendmsg)(struct sock *sk, struct msghdr *msg, 991 size_t len); 992 int (*recvmsg)(struct sock *sk, struct msghdr *msg, 993 size_t len, int noblock, int flags, 994 int *addr_len); 995 int (*sendpage)(struct sock *sk, struct page *page, 996 int offset, size_t size, int flags); 997 int (*bind)(struct sock *sk, 998 struct sockaddr *uaddr, int addr_len); 999 1000 int (*backlog_rcv) (struct sock *sk, 1001 struct sk_buff *skb); 1002 1003 void (*release_cb)(struct sock *sk); 1004 1005 /* Keeping track of sk's, looking them up, and port selection methods. */ 1006 void (*hash)(struct sock *sk); 1007 void (*unhash)(struct sock *sk); 1008 void (*rehash)(struct sock *sk); 1009 int (*get_port)(struct sock *sk, unsigned short snum); 1010 void (*clear_sk)(struct sock *sk, int size); 1011 1012 /* Keeping track of sockets in use */ 1013 #ifdef CONFIG_PROC_FS 1014 unsigned int inuse_idx; 1015 #endif 1016 1017 bool (*stream_memory_free)(const struct sock *sk); 1018 /* Memory pressure */ 1019 void (*enter_memory_pressure)(struct sock *sk); 1020 atomic_long_t *memory_allocated; /* Current allocated memory. */ 1021 struct percpu_counter *sockets_allocated; /* Current number of sockets. */ 1022 /* 1023 * Pressure flag: try to collapse. 1024 * Technical note: it is used by multiple contexts non atomically. 1025 * All the __sk_mem_schedule() is of this nature: accounting 1026 * is strict, actions are advisory and have some latency. 1027 */ 1028 int *memory_pressure; 1029 long *sysctl_mem; 1030 int *sysctl_wmem; 1031 int *sysctl_rmem; 1032 int max_header; 1033 bool no_autobind; 1034 1035 struct kmem_cache *slab; 1036 unsigned int obj_size; 1037 int slab_flags; 1038 1039 struct percpu_counter *orphan_count; 1040 1041 struct request_sock_ops *rsk_prot; 1042 struct timewait_sock_ops *twsk_prot; 1043 1044 union { 1045 struct inet_hashinfo *hashinfo; 1046 struct udp_table *udp_table; 1047 struct raw_hashinfo *raw_hash; 1048 } h; 1049 1050 struct module *owner; 1051 1052 char name[32]; 1053 1054 struct list_head node; 1055 #ifdef SOCK_REFCNT_DEBUG 1056 atomic_t socks; 1057 #endif 1058 #ifdef CONFIG_MEMCG_KMEM 1059 /* 1060 * cgroup specific init/deinit functions. Called once for all 1061 * protocols that implement it, from cgroups populate function. 1062 * This function has to setup any files the protocol want to 1063 * appear in the kmem cgroup filesystem. 1064 */ 1065 int (*init_cgroup)(struct mem_cgroup *memcg, 1066 struct cgroup_subsys *ss); 1067 void (*destroy_cgroup)(struct mem_cgroup *memcg); 1068 struct cg_proto *(*proto_cgroup)(struct mem_cgroup *memcg); 1069 #endif 1070 }; 1071 1072 int proto_register(struct proto *prot, int alloc_slab); 1073 void proto_unregister(struct proto *prot); 1074 1075 #ifdef SOCK_REFCNT_DEBUG 1076 static inline void sk_refcnt_debug_inc(struct sock *sk) 1077 { 1078 atomic_inc(&sk->sk_prot->socks); 1079 } 1080 1081 static inline void sk_refcnt_debug_dec(struct sock *sk) 1082 { 1083 atomic_dec(&sk->sk_prot->socks); 1084 printk(KERN_DEBUG "%s socket %p released, %d are still alive\n", 1085 sk->sk_prot->name, sk, atomic_read(&sk->sk_prot->socks)); 1086 } 1087 1088 static inline void sk_refcnt_debug_release(const struct sock *sk) 1089 { 1090 if (atomic_read(&sk->sk_refcnt) != 1) 1091 printk(KERN_DEBUG "Destruction of the %s socket %p delayed, refcnt=%d\n", 1092 sk->sk_prot->name, sk, atomic_read(&sk->sk_refcnt)); 1093 } 1094 #else /* SOCK_REFCNT_DEBUG */ 1095 #define sk_refcnt_debug_inc(sk) do { } while (0) 1096 #define sk_refcnt_debug_dec(sk) do { } while (0) 1097 #define sk_refcnt_debug_release(sk) do { } while (0) 1098 #endif /* SOCK_REFCNT_DEBUG */ 1099 1100 #if defined(CONFIG_MEMCG_KMEM) && defined(CONFIG_NET) 1101 extern struct static_key memcg_socket_limit_enabled; 1102 static inline struct cg_proto *parent_cg_proto(struct proto *proto, 1103 struct cg_proto *cg_proto) 1104 { 1105 return proto->proto_cgroup(parent_mem_cgroup(cg_proto->memcg)); 1106 } 1107 #define mem_cgroup_sockets_enabled static_key_false(&memcg_socket_limit_enabled) 1108 #else 1109 #define mem_cgroup_sockets_enabled 0 1110 static inline struct cg_proto *parent_cg_proto(struct proto *proto, 1111 struct cg_proto *cg_proto) 1112 { 1113 return NULL; 1114 } 1115 #endif 1116 1117 static inline bool sk_stream_memory_free(const struct sock *sk) 1118 { 1119 if (sk->sk_wmem_queued >= sk->sk_sndbuf) 1120 return false; 1121 1122 return sk->sk_prot->stream_memory_free ? 1123 sk->sk_prot->stream_memory_free(sk) : true; 1124 } 1125 1126 static inline bool sk_stream_is_writeable(const struct sock *sk) 1127 { 1128 return sk_stream_wspace(sk) >= sk_stream_min_wspace(sk) && 1129 sk_stream_memory_free(sk); 1130 } 1131 1132 1133 static inline bool sk_has_memory_pressure(const struct sock *sk) 1134 { 1135 return sk->sk_prot->memory_pressure != NULL; 1136 } 1137 1138 static inline bool sk_under_memory_pressure(const struct sock *sk) 1139 { 1140 if (!sk->sk_prot->memory_pressure) 1141 return false; 1142 1143 if (mem_cgroup_sockets_enabled && sk->sk_cgrp) 1144 return !!sk->sk_cgrp->memory_pressure; 1145 1146 return !!*sk->sk_prot->memory_pressure; 1147 } 1148 1149 static inline void sk_leave_memory_pressure(struct sock *sk) 1150 { 1151 int *memory_pressure = sk->sk_prot->memory_pressure; 1152 1153 if (!memory_pressure) 1154 return; 1155 1156 if (*memory_pressure) 1157 *memory_pressure = 0; 1158 1159 if (mem_cgroup_sockets_enabled && sk->sk_cgrp) { 1160 struct cg_proto *cg_proto = sk->sk_cgrp; 1161 struct proto *prot = sk->sk_prot; 1162 1163 for (; cg_proto; cg_proto = parent_cg_proto(prot, cg_proto)) 1164 cg_proto->memory_pressure = 0; 1165 } 1166 1167 } 1168 1169 static inline void sk_enter_memory_pressure(struct sock *sk) 1170 { 1171 if (!sk->sk_prot->enter_memory_pressure) 1172 return; 1173 1174 if (mem_cgroup_sockets_enabled && sk->sk_cgrp) { 1175 struct cg_proto *cg_proto = sk->sk_cgrp; 1176 struct proto *prot = sk->sk_prot; 1177 1178 for (; cg_proto; cg_proto = parent_cg_proto(prot, cg_proto)) 1179 cg_proto->memory_pressure = 1; 1180 } 1181 1182 sk->sk_prot->enter_memory_pressure(sk); 1183 } 1184 1185 static inline long sk_prot_mem_limits(const struct sock *sk, int index) 1186 { 1187 long *prot = sk->sk_prot->sysctl_mem; 1188 if (mem_cgroup_sockets_enabled && sk->sk_cgrp) 1189 prot = sk->sk_cgrp->sysctl_mem; 1190 return prot[index]; 1191 } 1192 1193 static inline void memcg_memory_allocated_add(struct cg_proto *prot, 1194 unsigned long amt, 1195 int *parent_status) 1196 { 1197 page_counter_charge(&prot->memory_allocated, amt); 1198 1199 if (page_counter_read(&prot->memory_allocated) > 1200 prot->memory_allocated.limit) 1201 *parent_status = OVER_LIMIT; 1202 } 1203 1204 static inline void memcg_memory_allocated_sub(struct cg_proto *prot, 1205 unsigned long amt) 1206 { 1207 page_counter_uncharge(&prot->memory_allocated, amt); 1208 } 1209 1210 static inline long 1211 sk_memory_allocated(const struct sock *sk) 1212 { 1213 struct proto *prot = sk->sk_prot; 1214 1215 if (mem_cgroup_sockets_enabled && sk->sk_cgrp) 1216 return page_counter_read(&sk->sk_cgrp->memory_allocated); 1217 1218 return atomic_long_read(prot->memory_allocated); 1219 } 1220 1221 static inline long 1222 sk_memory_allocated_add(struct sock *sk, int amt, int *parent_status) 1223 { 1224 struct proto *prot = sk->sk_prot; 1225 1226 if (mem_cgroup_sockets_enabled && sk->sk_cgrp) { 1227 memcg_memory_allocated_add(sk->sk_cgrp, amt, parent_status); 1228 /* update the root cgroup regardless */ 1229 atomic_long_add_return(amt, prot->memory_allocated); 1230 return page_counter_read(&sk->sk_cgrp->memory_allocated); 1231 } 1232 1233 return atomic_long_add_return(amt, prot->memory_allocated); 1234 } 1235 1236 static inline void 1237 sk_memory_allocated_sub(struct sock *sk, int amt) 1238 { 1239 struct proto *prot = sk->sk_prot; 1240 1241 if (mem_cgroup_sockets_enabled && sk->sk_cgrp) 1242 memcg_memory_allocated_sub(sk->sk_cgrp, amt); 1243 1244 atomic_long_sub(amt, prot->memory_allocated); 1245 } 1246 1247 static inline void sk_sockets_allocated_dec(struct sock *sk) 1248 { 1249 struct proto *prot = sk->sk_prot; 1250 1251 if (mem_cgroup_sockets_enabled && sk->sk_cgrp) { 1252 struct cg_proto *cg_proto = sk->sk_cgrp; 1253 1254 for (; cg_proto; cg_proto = parent_cg_proto(prot, cg_proto)) 1255 percpu_counter_dec(&cg_proto->sockets_allocated); 1256 } 1257 1258 percpu_counter_dec(prot->sockets_allocated); 1259 } 1260 1261 static inline void sk_sockets_allocated_inc(struct sock *sk) 1262 { 1263 struct proto *prot = sk->sk_prot; 1264 1265 if (mem_cgroup_sockets_enabled && sk->sk_cgrp) { 1266 struct cg_proto *cg_proto = sk->sk_cgrp; 1267 1268 for (; cg_proto; cg_proto = parent_cg_proto(prot, cg_proto)) 1269 percpu_counter_inc(&cg_proto->sockets_allocated); 1270 } 1271 1272 percpu_counter_inc(prot->sockets_allocated); 1273 } 1274 1275 static inline int 1276 sk_sockets_allocated_read_positive(struct sock *sk) 1277 { 1278 struct proto *prot = sk->sk_prot; 1279 1280 if (mem_cgroup_sockets_enabled && sk->sk_cgrp) 1281 return percpu_counter_read_positive(&sk->sk_cgrp->sockets_allocated); 1282 1283 return percpu_counter_read_positive(prot->sockets_allocated); 1284 } 1285 1286 static inline int 1287 proto_sockets_allocated_sum_positive(struct proto *prot) 1288 { 1289 return percpu_counter_sum_positive(prot->sockets_allocated); 1290 } 1291 1292 static inline long 1293 proto_memory_allocated(struct proto *prot) 1294 { 1295 return atomic_long_read(prot->memory_allocated); 1296 } 1297 1298 static inline bool 1299 proto_memory_pressure(struct proto *prot) 1300 { 1301 if (!prot->memory_pressure) 1302 return false; 1303 return !!*prot->memory_pressure; 1304 } 1305 1306 1307 #ifdef CONFIG_PROC_FS 1308 /* Called with local bh disabled */ 1309 void sock_prot_inuse_add(struct net *net, struct proto *prot, int inc); 1310 int sock_prot_inuse_get(struct net *net, struct proto *proto); 1311 #else 1312 static inline void sock_prot_inuse_add(struct net *net, struct proto *prot, 1313 int inc) 1314 { 1315 } 1316 #endif 1317 1318 1319 /* With per-bucket locks this operation is not-atomic, so that 1320 * this version is not worse. 1321 */ 1322 static inline void __sk_prot_rehash(struct sock *sk) 1323 { 1324 sk->sk_prot->unhash(sk); 1325 sk->sk_prot->hash(sk); 1326 } 1327 1328 void sk_prot_clear_portaddr_nulls(struct sock *sk, int size); 1329 1330 /* About 10 seconds */ 1331 #define SOCK_DESTROY_TIME (10*HZ) 1332 1333 /* Sockets 0-1023 can't be bound to unless you are superuser */ 1334 #define PROT_SOCK 1024 1335 1336 #define SHUTDOWN_MASK 3 1337 #define RCV_SHUTDOWN 1 1338 #define SEND_SHUTDOWN 2 1339 1340 #define SOCK_SNDBUF_LOCK 1 1341 #define SOCK_RCVBUF_LOCK 2 1342 #define SOCK_BINDADDR_LOCK 4 1343 #define SOCK_BINDPORT_LOCK 8 1344 1345 struct socket_alloc { 1346 struct socket socket; 1347 struct inode vfs_inode; 1348 }; 1349 1350 static inline struct socket *SOCKET_I(struct inode *inode) 1351 { 1352 return &container_of(inode, struct socket_alloc, vfs_inode)->socket; 1353 } 1354 1355 static inline struct inode *SOCK_INODE(struct socket *socket) 1356 { 1357 return &container_of(socket, struct socket_alloc, socket)->vfs_inode; 1358 } 1359 1360 /* 1361 * Functions for memory accounting 1362 */ 1363 int __sk_mem_schedule(struct sock *sk, int size, int kind); 1364 void __sk_mem_reclaim(struct sock *sk, int amount); 1365 1366 #define SK_MEM_QUANTUM ((int)PAGE_SIZE) 1367 #define SK_MEM_QUANTUM_SHIFT ilog2(SK_MEM_QUANTUM) 1368 #define SK_MEM_SEND 0 1369 #define SK_MEM_RECV 1 1370 1371 static inline int sk_mem_pages(int amt) 1372 { 1373 return (amt + SK_MEM_QUANTUM - 1) >> SK_MEM_QUANTUM_SHIFT; 1374 } 1375 1376 static inline bool sk_has_account(struct sock *sk) 1377 { 1378 /* return true if protocol supports memory accounting */ 1379 return !!sk->sk_prot->memory_allocated; 1380 } 1381 1382 static inline bool sk_wmem_schedule(struct sock *sk, int size) 1383 { 1384 if (!sk_has_account(sk)) 1385 return true; 1386 return size <= sk->sk_forward_alloc || 1387 __sk_mem_schedule(sk, size, SK_MEM_SEND); 1388 } 1389 1390 static inline bool 1391 sk_rmem_schedule(struct sock *sk, struct sk_buff *skb, int size) 1392 { 1393 if (!sk_has_account(sk)) 1394 return true; 1395 return size<= sk->sk_forward_alloc || 1396 __sk_mem_schedule(sk, size, SK_MEM_RECV) || 1397 skb_pfmemalloc(skb); 1398 } 1399 1400 static inline void sk_mem_reclaim(struct sock *sk) 1401 { 1402 if (!sk_has_account(sk)) 1403 return; 1404 if (sk->sk_forward_alloc >= SK_MEM_QUANTUM) 1405 __sk_mem_reclaim(sk, sk->sk_forward_alloc); 1406 } 1407 1408 static inline void sk_mem_reclaim_partial(struct sock *sk) 1409 { 1410 if (!sk_has_account(sk)) 1411 return; 1412 if (sk->sk_forward_alloc > SK_MEM_QUANTUM) 1413 __sk_mem_reclaim(sk, sk->sk_forward_alloc - 1); 1414 } 1415 1416 static inline void sk_mem_charge(struct sock *sk, int size) 1417 { 1418 if (!sk_has_account(sk)) 1419 return; 1420 sk->sk_forward_alloc -= size; 1421 } 1422 1423 static inline void sk_mem_uncharge(struct sock *sk, int size) 1424 { 1425 if (!sk_has_account(sk)) 1426 return; 1427 sk->sk_forward_alloc += size; 1428 } 1429 1430 static inline void sk_wmem_free_skb(struct sock *sk, struct sk_buff *skb) 1431 { 1432 sock_set_flag(sk, SOCK_QUEUE_SHRUNK); 1433 sk->sk_wmem_queued -= skb->truesize; 1434 sk_mem_uncharge(sk, skb->truesize); 1435 __kfree_skb(skb); 1436 } 1437 1438 /* Used by processes to "lock" a socket state, so that 1439 * interrupts and bottom half handlers won't change it 1440 * from under us. It essentially blocks any incoming 1441 * packets, so that we won't get any new data or any 1442 * packets that change the state of the socket. 1443 * 1444 * While locked, BH processing will add new packets to 1445 * the backlog queue. This queue is processed by the 1446 * owner of the socket lock right before it is released. 1447 * 1448 * Since ~2.3.5 it is also exclusive sleep lock serializing 1449 * accesses from user process context. 1450 */ 1451 #define sock_owned_by_user(sk) ((sk)->sk_lock.owned) 1452 1453 static inline void sock_release_ownership(struct sock *sk) 1454 { 1455 sk->sk_lock.owned = 0; 1456 } 1457 1458 /* 1459 * Macro so as to not evaluate some arguments when 1460 * lockdep is not enabled. 1461 * 1462 * Mark both the sk_lock and the sk_lock.slock as a 1463 * per-address-family lock class. 1464 */ 1465 #define sock_lock_init_class_and_name(sk, sname, skey, name, key) \ 1466 do { \ 1467 sk->sk_lock.owned = 0; \ 1468 init_waitqueue_head(&sk->sk_lock.wq); \ 1469 spin_lock_init(&(sk)->sk_lock.slock); \ 1470 debug_check_no_locks_freed((void *)&(sk)->sk_lock, \ 1471 sizeof((sk)->sk_lock)); \ 1472 lockdep_set_class_and_name(&(sk)->sk_lock.slock, \ 1473 (skey), (sname)); \ 1474 lockdep_init_map(&(sk)->sk_lock.dep_map, (name), (key), 0); \ 1475 } while (0) 1476 1477 void lock_sock_nested(struct sock *sk, int subclass); 1478 1479 static inline void lock_sock(struct sock *sk) 1480 { 1481 lock_sock_nested(sk, 0); 1482 } 1483 1484 void release_sock(struct sock *sk); 1485 1486 /* BH context may only use the following locking interface. */ 1487 #define bh_lock_sock(__sk) spin_lock(&((__sk)->sk_lock.slock)) 1488 #define bh_lock_sock_nested(__sk) \ 1489 spin_lock_nested(&((__sk)->sk_lock.slock), \ 1490 SINGLE_DEPTH_NESTING) 1491 #define bh_unlock_sock(__sk) spin_unlock(&((__sk)->sk_lock.slock)) 1492 1493 bool lock_sock_fast(struct sock *sk); 1494 /** 1495 * unlock_sock_fast - complement of lock_sock_fast 1496 * @sk: socket 1497 * @slow: slow mode 1498 * 1499 * fast unlock socket for user context. 1500 * If slow mode is on, we call regular release_sock() 1501 */ 1502 static inline void unlock_sock_fast(struct sock *sk, bool slow) 1503 { 1504 if (slow) 1505 release_sock(sk); 1506 else 1507 spin_unlock_bh(&sk->sk_lock.slock); 1508 } 1509 1510 1511 struct sock *sk_alloc(struct net *net, int family, gfp_t priority, 1512 struct proto *prot, int kern); 1513 void sk_free(struct sock *sk); 1514 void sk_destruct(struct sock *sk); 1515 struct sock *sk_clone_lock(const struct sock *sk, const gfp_t priority); 1516 1517 struct sk_buff *sock_wmalloc(struct sock *sk, unsigned long size, int force, 1518 gfp_t priority); 1519 void sock_wfree(struct sk_buff *skb); 1520 void skb_orphan_partial(struct sk_buff *skb); 1521 void sock_rfree(struct sk_buff *skb); 1522 void sock_efree(struct sk_buff *skb); 1523 #ifdef CONFIG_INET 1524 void sock_edemux(struct sk_buff *skb); 1525 #else 1526 #define sock_edemux(skb) sock_efree(skb) 1527 #endif 1528 1529 int sock_setsockopt(struct socket *sock, int level, int op, 1530 char __user *optval, unsigned int optlen); 1531 1532 int sock_getsockopt(struct socket *sock, int level, int op, 1533 char __user *optval, int __user *optlen); 1534 struct sk_buff *sock_alloc_send_skb(struct sock *sk, unsigned long size, 1535 int noblock, int *errcode); 1536 struct sk_buff *sock_alloc_send_pskb(struct sock *sk, unsigned long header_len, 1537 unsigned long data_len, int noblock, 1538 int *errcode, int max_page_order); 1539 void *sock_kmalloc(struct sock *sk, int size, gfp_t priority); 1540 void sock_kfree_s(struct sock *sk, void *mem, int size); 1541 void sock_kzfree_s(struct sock *sk, void *mem, int size); 1542 void sk_send_sigurg(struct sock *sk); 1543 1544 struct sockcm_cookie { 1545 u32 mark; 1546 }; 1547 1548 int sock_cmsg_send(struct sock *sk, struct msghdr *msg, 1549 struct sockcm_cookie *sockc); 1550 1551 /* 1552 * Functions to fill in entries in struct proto_ops when a protocol 1553 * does not implement a particular function. 1554 */ 1555 int sock_no_bind(struct socket *, struct sockaddr *, int); 1556 int sock_no_connect(struct socket *, struct sockaddr *, int, int); 1557 int sock_no_socketpair(struct socket *, struct socket *); 1558 int sock_no_accept(struct socket *, struct socket *, int); 1559 int sock_no_getname(struct socket *, struct sockaddr *, int *, int); 1560 unsigned int sock_no_poll(struct file *, struct socket *, 1561 struct poll_table_struct *); 1562 int sock_no_ioctl(struct socket *, unsigned int, unsigned long); 1563 int sock_no_listen(struct socket *, int); 1564 int sock_no_shutdown(struct socket *, int); 1565 int sock_no_getsockopt(struct socket *, int , int, char __user *, int __user *); 1566 int sock_no_setsockopt(struct socket *, int, int, char __user *, unsigned int); 1567 int sock_no_sendmsg(struct socket *, struct msghdr *, size_t); 1568 int sock_no_recvmsg(struct socket *, struct msghdr *, size_t, int); 1569 int sock_no_mmap(struct file *file, struct socket *sock, 1570 struct vm_area_struct *vma); 1571 ssize_t sock_no_sendpage(struct socket *sock, struct page *page, int offset, 1572 size_t size, int flags); 1573 1574 /* 1575 * Functions to fill in entries in struct proto_ops when a protocol 1576 * uses the inet style. 1577 */ 1578 int sock_common_getsockopt(struct socket *sock, int level, int optname, 1579 char __user *optval, int __user *optlen); 1580 int sock_common_recvmsg(struct socket *sock, struct msghdr *msg, size_t size, 1581 int flags); 1582 int sock_common_setsockopt(struct socket *sock, int level, int optname, 1583 char __user *optval, unsigned int optlen); 1584 int compat_sock_common_getsockopt(struct socket *sock, int level, 1585 int optname, char __user *optval, int __user *optlen); 1586 int compat_sock_common_setsockopt(struct socket *sock, int level, 1587 int optname, char __user *optval, unsigned int optlen); 1588 1589 void sk_common_release(struct sock *sk); 1590 1591 /* 1592 * Default socket callbacks and setup code 1593 */ 1594 1595 /* Initialise core socket variables */ 1596 void sock_init_data(struct socket *sock, struct sock *sk); 1597 1598 /* 1599 * Socket reference counting postulates. 1600 * 1601 * * Each user of socket SHOULD hold a reference count. 1602 * * Each access point to socket (an hash table bucket, reference from a list, 1603 * running timer, skb in flight MUST hold a reference count. 1604 * * When reference count hits 0, it means it will never increase back. 1605 * * When reference count hits 0, it means that no references from 1606 * outside exist to this socket and current process on current CPU 1607 * is last user and may/should destroy this socket. 1608 * * sk_free is called from any context: process, BH, IRQ. When 1609 * it is called, socket has no references from outside -> sk_free 1610 * may release descendant resources allocated by the socket, but 1611 * to the time when it is called, socket is NOT referenced by any 1612 * hash tables, lists etc. 1613 * * Packets, delivered from outside (from network or from another process) 1614 * and enqueued on receive/error queues SHOULD NOT grab reference count, 1615 * when they sit in queue. Otherwise, packets will leak to hole, when 1616 * socket is looked up by one cpu and unhasing is made by another CPU. 1617 * It is true for udp/raw, netlink (leak to receive and error queues), tcp 1618 * (leak to backlog). Packet socket does all the processing inside 1619 * BR_NETPROTO_LOCK, so that it has not this race condition. UNIX sockets 1620 * use separate SMP lock, so that they are prone too. 1621 */ 1622 1623 /* Ungrab socket and destroy it, if it was the last reference. */ 1624 static inline void sock_put(struct sock *sk) 1625 { 1626 if (atomic_dec_and_test(&sk->sk_refcnt)) 1627 sk_free(sk); 1628 } 1629 /* Generic version of sock_put(), dealing with all sockets 1630 * (TCP_TIMEWAIT, TCP_NEW_SYN_RECV, ESTABLISHED...) 1631 */ 1632 void sock_gen_put(struct sock *sk); 1633 1634 int sk_receive_skb(struct sock *sk, struct sk_buff *skb, const int nested); 1635 1636 static inline void sk_tx_queue_set(struct sock *sk, int tx_queue) 1637 { 1638 sk->sk_tx_queue_mapping = tx_queue; 1639 } 1640 1641 static inline void sk_tx_queue_clear(struct sock *sk) 1642 { 1643 sk->sk_tx_queue_mapping = -1; 1644 } 1645 1646 static inline int sk_tx_queue_get(const struct sock *sk) 1647 { 1648 return sk ? sk->sk_tx_queue_mapping : -1; 1649 } 1650 1651 static inline void sk_set_socket(struct sock *sk, struct socket *sock) 1652 { 1653 sk_tx_queue_clear(sk); 1654 sk->sk_socket = sock; 1655 } 1656 1657 static inline wait_queue_head_t *sk_sleep(struct sock *sk) 1658 { 1659 BUILD_BUG_ON(offsetof(struct socket_wq, wait) != 0); 1660 return &rcu_dereference_raw(sk->sk_wq)->wait; 1661 } 1662 /* Detach socket from process context. 1663 * Announce socket dead, detach it from wait queue and inode. 1664 * Note that parent inode held reference count on this struct sock, 1665 * we do not release it in this function, because protocol 1666 * probably wants some additional cleanups or even continuing 1667 * to work with this socket (TCP). 1668 */ 1669 static inline void sock_orphan(struct sock *sk) 1670 { 1671 write_lock_bh(&sk->sk_callback_lock); 1672 sock_set_flag(sk, SOCK_DEAD); 1673 sk_set_socket(sk, NULL); 1674 sk->sk_wq = NULL; 1675 write_unlock_bh(&sk->sk_callback_lock); 1676 } 1677 1678 static inline void sock_graft(struct sock *sk, struct socket *parent) 1679 { 1680 write_lock_bh(&sk->sk_callback_lock); 1681 sk->sk_wq = parent->wq; 1682 parent->sk = sk; 1683 sk_set_socket(sk, parent); 1684 security_sock_graft(sk, parent); 1685 write_unlock_bh(&sk->sk_callback_lock); 1686 } 1687 1688 kuid_t sock_i_uid(struct sock *sk); 1689 unsigned long sock_i_ino(struct sock *sk); 1690 1691 static inline u32 net_tx_rndhash(void) 1692 { 1693 u32 v = prandom_u32(); 1694 1695 return v ?: 1; 1696 } 1697 1698 static inline void sk_set_txhash(struct sock *sk) 1699 { 1700 sk->sk_txhash = net_tx_rndhash(); 1701 } 1702 1703 static inline void sk_rethink_txhash(struct sock *sk) 1704 { 1705 if (sk->sk_txhash) 1706 sk_set_txhash(sk); 1707 } 1708 1709 static inline struct dst_entry * 1710 __sk_dst_get(struct sock *sk) 1711 { 1712 return rcu_dereference_check(sk->sk_dst_cache, sock_owned_by_user(sk) || 1713 lockdep_is_held(&sk->sk_lock.slock)); 1714 } 1715 1716 static inline struct dst_entry * 1717 sk_dst_get(struct sock *sk) 1718 { 1719 struct dst_entry *dst; 1720 1721 rcu_read_lock(); 1722 dst = rcu_dereference(sk->sk_dst_cache); 1723 if (dst && !atomic_inc_not_zero(&dst->__refcnt)) 1724 dst = NULL; 1725 rcu_read_unlock(); 1726 return dst; 1727 } 1728 1729 static inline void dst_negative_advice(struct sock *sk) 1730 { 1731 struct dst_entry *ndst, *dst = __sk_dst_get(sk); 1732 1733 sk_rethink_txhash(sk); 1734 1735 if (dst && dst->ops->negative_advice) { 1736 ndst = dst->ops->negative_advice(dst); 1737 1738 if (ndst != dst) { 1739 rcu_assign_pointer(sk->sk_dst_cache, ndst); 1740 sk_tx_queue_clear(sk); 1741 } 1742 } 1743 } 1744 1745 static inline void 1746 __sk_dst_set(struct sock *sk, struct dst_entry *dst) 1747 { 1748 struct dst_entry *old_dst; 1749 1750 sk_tx_queue_clear(sk); 1751 /* 1752 * This can be called while sk is owned by the caller only, 1753 * with no state that can be checked in a rcu_dereference_check() cond 1754 */ 1755 old_dst = rcu_dereference_raw(sk->sk_dst_cache); 1756 rcu_assign_pointer(sk->sk_dst_cache, dst); 1757 dst_release(old_dst); 1758 } 1759 1760 static inline void 1761 sk_dst_set(struct sock *sk, struct dst_entry *dst) 1762 { 1763 struct dst_entry *old_dst; 1764 1765 sk_tx_queue_clear(sk); 1766 old_dst = xchg((__force struct dst_entry **)&sk->sk_dst_cache, dst); 1767 dst_release(old_dst); 1768 } 1769 1770 static inline void 1771 __sk_dst_reset(struct sock *sk) 1772 { 1773 __sk_dst_set(sk, NULL); 1774 } 1775 1776 static inline void 1777 sk_dst_reset(struct sock *sk) 1778 { 1779 sk_dst_set(sk, NULL); 1780 } 1781 1782 struct dst_entry *__sk_dst_check(struct sock *sk, u32 cookie); 1783 1784 struct dst_entry *sk_dst_check(struct sock *sk, u32 cookie); 1785 1786 bool sk_mc_loop(struct sock *sk); 1787 1788 static inline bool sk_can_gso(const struct sock *sk) 1789 { 1790 return net_gso_ok(sk->sk_route_caps, sk->sk_gso_type); 1791 } 1792 1793 void sk_setup_caps(struct sock *sk, struct dst_entry *dst); 1794 1795 static inline void sk_nocaps_add(struct sock *sk, netdev_features_t flags) 1796 { 1797 sk->sk_route_nocaps |= flags; 1798 sk->sk_route_caps &= ~flags; 1799 } 1800 1801 static inline int skb_do_copy_data_nocache(struct sock *sk, struct sk_buff *skb, 1802 struct iov_iter *from, char *to, 1803 int copy, int offset) 1804 { 1805 if (skb->ip_summed == CHECKSUM_NONE) { 1806 __wsum csum = 0; 1807 if (csum_and_copy_from_iter(to, copy, &csum, from) != copy) 1808 return -EFAULT; 1809 skb->csum = csum_block_add(skb->csum, csum, offset); 1810 } else if (sk->sk_route_caps & NETIF_F_NOCACHE_COPY) { 1811 if (copy_from_iter_nocache(to, copy, from) != copy) 1812 return -EFAULT; 1813 } else if (copy_from_iter(to, copy, from) != copy) 1814 return -EFAULT; 1815 1816 return 0; 1817 } 1818 1819 static inline int skb_add_data_nocache(struct sock *sk, struct sk_buff *skb, 1820 struct iov_iter *from, int copy) 1821 { 1822 int err, offset = skb->len; 1823 1824 err = skb_do_copy_data_nocache(sk, skb, from, skb_put(skb, copy), 1825 copy, offset); 1826 if (err) 1827 __skb_trim(skb, offset); 1828 1829 return err; 1830 } 1831 1832 static inline int skb_copy_to_page_nocache(struct sock *sk, struct iov_iter *from, 1833 struct sk_buff *skb, 1834 struct page *page, 1835 int off, int copy) 1836 { 1837 int err; 1838 1839 err = skb_do_copy_data_nocache(sk, skb, from, page_address(page) + off, 1840 copy, skb->len); 1841 if (err) 1842 return err; 1843 1844 skb->len += copy; 1845 skb->data_len += copy; 1846 skb->truesize += copy; 1847 sk->sk_wmem_queued += copy; 1848 sk_mem_charge(sk, copy); 1849 return 0; 1850 } 1851 1852 /** 1853 * sk_wmem_alloc_get - returns write allocations 1854 * @sk: socket 1855 * 1856 * Returns sk_wmem_alloc minus initial offset of one 1857 */ 1858 static inline int sk_wmem_alloc_get(const struct sock *sk) 1859 { 1860 return atomic_read(&sk->sk_wmem_alloc) - 1; 1861 } 1862 1863 /** 1864 * sk_rmem_alloc_get - returns read allocations 1865 * @sk: socket 1866 * 1867 * Returns sk_rmem_alloc 1868 */ 1869 static inline int sk_rmem_alloc_get(const struct sock *sk) 1870 { 1871 return atomic_read(&sk->sk_rmem_alloc); 1872 } 1873 1874 /** 1875 * sk_has_allocations - check if allocations are outstanding 1876 * @sk: socket 1877 * 1878 * Returns true if socket has write or read allocations 1879 */ 1880 static inline bool sk_has_allocations(const struct sock *sk) 1881 { 1882 return sk_wmem_alloc_get(sk) || sk_rmem_alloc_get(sk); 1883 } 1884 1885 /** 1886 * wq_has_sleeper - check if there are any waiting processes 1887 * @wq: struct socket_wq 1888 * 1889 * Returns true if socket_wq has waiting processes 1890 * 1891 * The purpose of the wq_has_sleeper and sock_poll_wait is to wrap the memory 1892 * barrier call. They were added due to the race found within the tcp code. 1893 * 1894 * Consider following tcp code paths: 1895 * 1896 * CPU1 CPU2 1897 * 1898 * sys_select receive packet 1899 * ... ... 1900 * __add_wait_queue update tp->rcv_nxt 1901 * ... ... 1902 * tp->rcv_nxt check sock_def_readable 1903 * ... { 1904 * schedule rcu_read_lock(); 1905 * wq = rcu_dereference(sk->sk_wq); 1906 * if (wq && waitqueue_active(&wq->wait)) 1907 * wake_up_interruptible(&wq->wait) 1908 * ... 1909 * } 1910 * 1911 * The race for tcp fires when the __add_wait_queue changes done by CPU1 stay 1912 * in its cache, and so does the tp->rcv_nxt update on CPU2 side. The CPU1 1913 * could then endup calling schedule and sleep forever if there are no more 1914 * data on the socket. 1915 * 1916 */ 1917 static inline bool wq_has_sleeper(struct socket_wq *wq) 1918 { 1919 /* We need to be sure we are in sync with the 1920 * add_wait_queue modifications to the wait queue. 1921 * 1922 * This memory barrier is paired in the sock_poll_wait. 1923 */ 1924 smp_mb(); 1925 return wq && waitqueue_active(&wq->wait); 1926 } 1927 1928 /** 1929 * sock_poll_wait - place memory barrier behind the poll_wait call. 1930 * @filp: file 1931 * @wait_address: socket wait queue 1932 * @p: poll_table 1933 * 1934 * See the comments in the wq_has_sleeper function. 1935 */ 1936 static inline void sock_poll_wait(struct file *filp, 1937 wait_queue_head_t *wait_address, poll_table *p) 1938 { 1939 if (!poll_does_not_wait(p) && wait_address) { 1940 poll_wait(filp, wait_address, p); 1941 /* We need to be sure we are in sync with the 1942 * socket flags modification. 1943 * 1944 * This memory barrier is paired in the wq_has_sleeper. 1945 */ 1946 smp_mb(); 1947 } 1948 } 1949 1950 static inline void skb_set_hash_from_sk(struct sk_buff *skb, struct sock *sk) 1951 { 1952 if (sk->sk_txhash) { 1953 skb->l4_hash = 1; 1954 skb->hash = sk->sk_txhash; 1955 } 1956 } 1957 1958 void skb_set_owner_w(struct sk_buff *skb, struct sock *sk); 1959 1960 /* 1961 * Queue a received datagram if it will fit. Stream and sequenced 1962 * protocols can't normally use this as they need to fit buffers in 1963 * and play with them. 1964 * 1965 * Inlined as it's very short and called for pretty much every 1966 * packet ever received. 1967 */ 1968 static inline void skb_set_owner_r(struct sk_buff *skb, struct sock *sk) 1969 { 1970 skb_orphan(skb); 1971 skb->sk = sk; 1972 skb->destructor = sock_rfree; 1973 atomic_add(skb->truesize, &sk->sk_rmem_alloc); 1974 sk_mem_charge(sk, skb->truesize); 1975 } 1976 1977 void sk_reset_timer(struct sock *sk, struct timer_list *timer, 1978 unsigned long expires); 1979 1980 void sk_stop_timer(struct sock *sk, struct timer_list *timer); 1981 1982 int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb); 1983 1984 int sock_queue_err_skb(struct sock *sk, struct sk_buff *skb); 1985 struct sk_buff *sock_dequeue_err_skb(struct sock *sk); 1986 1987 /* 1988 * Recover an error report and clear atomically 1989 */ 1990 1991 static inline int sock_error(struct sock *sk) 1992 { 1993 int err; 1994 if (likely(!sk->sk_err)) 1995 return 0; 1996 err = xchg(&sk->sk_err, 0); 1997 return -err; 1998 } 1999 2000 static inline unsigned long sock_wspace(struct sock *sk) 2001 { 2002 int amt = 0; 2003 2004 if (!(sk->sk_shutdown & SEND_SHUTDOWN)) { 2005 amt = sk->sk_sndbuf - atomic_read(&sk->sk_wmem_alloc); 2006 if (amt < 0) 2007 amt = 0; 2008 } 2009 return amt; 2010 } 2011 2012 /* Note: 2013 * We use sk->sk_wq_raw, from contexts knowing this 2014 * pointer is not NULL and cannot disappear/change. 2015 */ 2016 static inline void sk_set_bit(int nr, struct sock *sk) 2017 { 2018 set_bit(nr, &sk->sk_wq_raw->flags); 2019 } 2020 2021 static inline void sk_clear_bit(int nr, struct sock *sk) 2022 { 2023 clear_bit(nr, &sk->sk_wq_raw->flags); 2024 } 2025 2026 static inline void sk_wake_async(const struct sock *sk, int how, int band) 2027 { 2028 if (sock_flag(sk, SOCK_FASYNC)) { 2029 rcu_read_lock(); 2030 sock_wake_async(rcu_dereference(sk->sk_wq), how, band); 2031 rcu_read_unlock(); 2032 } 2033 } 2034 2035 /* Since sk_{r,w}mem_alloc sums skb->truesize, even a small frame might 2036 * need sizeof(sk_buff) + MTU + padding, unless net driver perform copybreak. 2037 * Note: for send buffers, TCP works better if we can build two skbs at 2038 * minimum. 2039 */ 2040 #define TCP_SKB_MIN_TRUESIZE (2048 + SKB_DATA_ALIGN(sizeof(struct sk_buff))) 2041 2042 #define SOCK_MIN_SNDBUF (TCP_SKB_MIN_TRUESIZE * 2) 2043 #define SOCK_MIN_RCVBUF TCP_SKB_MIN_TRUESIZE 2044 2045 static inline void sk_stream_moderate_sndbuf(struct sock *sk) 2046 { 2047 if (!(sk->sk_userlocks & SOCK_SNDBUF_LOCK)) { 2048 sk->sk_sndbuf = min(sk->sk_sndbuf, sk->sk_wmem_queued >> 1); 2049 sk->sk_sndbuf = max_t(u32, sk->sk_sndbuf, SOCK_MIN_SNDBUF); 2050 } 2051 } 2052 2053 struct sk_buff *sk_stream_alloc_skb(struct sock *sk, int size, gfp_t gfp, 2054 bool force_schedule); 2055 2056 /** 2057 * sk_page_frag - return an appropriate page_frag 2058 * @sk: socket 2059 * 2060 * If socket allocation mode allows current thread to sleep, it means its 2061 * safe to use the per task page_frag instead of the per socket one. 2062 */ 2063 static inline struct page_frag *sk_page_frag(struct sock *sk) 2064 { 2065 if (gfpflags_allow_blocking(sk->sk_allocation)) 2066 return ¤t->task_frag; 2067 2068 return &sk->sk_frag; 2069 } 2070 2071 bool sk_page_frag_refill(struct sock *sk, struct page_frag *pfrag); 2072 2073 /* 2074 * Default write policy as shown to user space via poll/select/SIGIO 2075 */ 2076 static inline bool sock_writeable(const struct sock *sk) 2077 { 2078 return atomic_read(&sk->sk_wmem_alloc) < (sk->sk_sndbuf >> 1); 2079 } 2080 2081 static inline gfp_t gfp_any(void) 2082 { 2083 return in_softirq() ? GFP_ATOMIC : GFP_KERNEL; 2084 } 2085 2086 static inline long sock_rcvtimeo(const struct sock *sk, bool noblock) 2087 { 2088 return noblock ? 0 : sk->sk_rcvtimeo; 2089 } 2090 2091 static inline long sock_sndtimeo(const struct sock *sk, bool noblock) 2092 { 2093 return noblock ? 0 : sk->sk_sndtimeo; 2094 } 2095 2096 static inline int sock_rcvlowat(const struct sock *sk, int waitall, int len) 2097 { 2098 return (waitall ? len : min_t(int, sk->sk_rcvlowat, len)) ? : 1; 2099 } 2100 2101 /* Alas, with timeout socket operations are not restartable. 2102 * Compare this to poll(). 2103 */ 2104 static inline int sock_intr_errno(long timeo) 2105 { 2106 return timeo == MAX_SCHEDULE_TIMEOUT ? -ERESTARTSYS : -EINTR; 2107 } 2108 2109 struct sock_skb_cb { 2110 u32 dropcount; 2111 }; 2112 2113 /* Store sock_skb_cb at the end of skb->cb[] so protocol families 2114 * using skb->cb[] would keep using it directly and utilize its 2115 * alignement guarantee. 2116 */ 2117 #define SOCK_SKB_CB_OFFSET ((FIELD_SIZEOF(struct sk_buff, cb) - \ 2118 sizeof(struct sock_skb_cb))) 2119 2120 #define SOCK_SKB_CB(__skb) ((struct sock_skb_cb *)((__skb)->cb + \ 2121 SOCK_SKB_CB_OFFSET)) 2122 2123 #define sock_skb_cb_check_size(size) \ 2124 BUILD_BUG_ON((size) > SOCK_SKB_CB_OFFSET) 2125 2126 static inline void 2127 sock_skb_set_dropcount(const struct sock *sk, struct sk_buff *skb) 2128 { 2129 SOCK_SKB_CB(skb)->dropcount = atomic_read(&sk->sk_drops); 2130 } 2131 2132 void __sock_recv_timestamp(struct msghdr *msg, struct sock *sk, 2133 struct sk_buff *skb); 2134 void __sock_recv_wifi_status(struct msghdr *msg, struct sock *sk, 2135 struct sk_buff *skb); 2136 2137 static inline void 2138 sock_recv_timestamp(struct msghdr *msg, struct sock *sk, struct sk_buff *skb) 2139 { 2140 ktime_t kt = skb->tstamp; 2141 struct skb_shared_hwtstamps *hwtstamps = skb_hwtstamps(skb); 2142 2143 /* 2144 * generate control messages if 2145 * - receive time stamping in software requested 2146 * - software time stamp available and wanted 2147 * - hardware time stamps available and wanted 2148 */ 2149 if (sock_flag(sk, SOCK_RCVTSTAMP) || 2150 (sk->sk_tsflags & SOF_TIMESTAMPING_RX_SOFTWARE) || 2151 (kt.tv64 && sk->sk_tsflags & SOF_TIMESTAMPING_SOFTWARE) || 2152 (hwtstamps->hwtstamp.tv64 && 2153 (sk->sk_tsflags & SOF_TIMESTAMPING_RAW_HARDWARE))) 2154 __sock_recv_timestamp(msg, sk, skb); 2155 else 2156 sk->sk_stamp = kt; 2157 2158 if (sock_flag(sk, SOCK_WIFI_STATUS) && skb->wifi_acked_valid) 2159 __sock_recv_wifi_status(msg, sk, skb); 2160 } 2161 2162 void __sock_recv_ts_and_drops(struct msghdr *msg, struct sock *sk, 2163 struct sk_buff *skb); 2164 2165 static inline void sock_recv_ts_and_drops(struct msghdr *msg, struct sock *sk, 2166 struct sk_buff *skb) 2167 { 2168 #define FLAGS_TS_OR_DROPS ((1UL << SOCK_RXQ_OVFL) | \ 2169 (1UL << SOCK_RCVTSTAMP)) 2170 #define TSFLAGS_ANY (SOF_TIMESTAMPING_SOFTWARE | \ 2171 SOF_TIMESTAMPING_RAW_HARDWARE) 2172 2173 if (sk->sk_flags & FLAGS_TS_OR_DROPS || sk->sk_tsflags & TSFLAGS_ANY) 2174 __sock_recv_ts_and_drops(msg, sk, skb); 2175 else 2176 sk->sk_stamp = skb->tstamp; 2177 } 2178 2179 void __sock_tx_timestamp(const struct sock *sk, __u8 *tx_flags); 2180 2181 /** 2182 * sock_tx_timestamp - checks whether the outgoing packet is to be time stamped 2183 * @sk: socket sending this packet 2184 * @tx_flags: completed with instructions for time stamping 2185 * 2186 * Note : callers should take care of initial *tx_flags value (usually 0) 2187 */ 2188 static inline void sock_tx_timestamp(const struct sock *sk, __u8 *tx_flags) 2189 { 2190 if (unlikely(sk->sk_tsflags)) 2191 __sock_tx_timestamp(sk, tx_flags); 2192 if (unlikely(sock_flag(sk, SOCK_WIFI_STATUS))) 2193 *tx_flags |= SKBTX_WIFI_STATUS; 2194 } 2195 2196 /** 2197 * sk_eat_skb - Release a skb if it is no longer needed 2198 * @sk: socket to eat this skb from 2199 * @skb: socket buffer to eat 2200 * 2201 * This routine must be called with interrupts disabled or with the socket 2202 * locked so that the sk_buff queue operation is ok. 2203 */ 2204 static inline void sk_eat_skb(struct sock *sk, struct sk_buff *skb) 2205 { 2206 __skb_unlink(skb, &sk->sk_receive_queue); 2207 __kfree_skb(skb); 2208 } 2209 2210 static inline 2211 struct net *sock_net(const struct sock *sk) 2212 { 2213 return read_pnet(&sk->sk_net); 2214 } 2215 2216 static inline 2217 void sock_net_set(struct sock *sk, struct net *net) 2218 { 2219 write_pnet(&sk->sk_net, net); 2220 } 2221 2222 static inline struct sock *skb_steal_sock(struct sk_buff *skb) 2223 { 2224 if (skb->sk) { 2225 struct sock *sk = skb->sk; 2226 2227 skb->destructor = NULL; 2228 skb->sk = NULL; 2229 return sk; 2230 } 2231 return NULL; 2232 } 2233 2234 /* This helper checks if a socket is a full socket, 2235 * ie _not_ a timewait or request socket. 2236 */ 2237 static inline bool sk_fullsock(const struct sock *sk) 2238 { 2239 return (1 << sk->sk_state) & ~(TCPF_TIME_WAIT | TCPF_NEW_SYN_RECV); 2240 } 2241 2242 /* This helper checks if a socket is a LISTEN or NEW_SYN_RECV 2243 * SYNACK messages can be attached to either ones (depending on SYNCOOKIE) 2244 */ 2245 static inline bool sk_listener(const struct sock *sk) 2246 { 2247 return (1 << sk->sk_state) & (TCPF_LISTEN | TCPF_NEW_SYN_RECV); 2248 } 2249 2250 /** 2251 * sk_state_load - read sk->sk_state for lockless contexts 2252 * @sk: socket pointer 2253 * 2254 * Paired with sk_state_store(). Used in places we do not hold socket lock : 2255 * tcp_diag_get_info(), tcp_get_info(), tcp_poll(), get_tcp4_sock() ... 2256 */ 2257 static inline int sk_state_load(const struct sock *sk) 2258 { 2259 return smp_load_acquire(&sk->sk_state); 2260 } 2261 2262 /** 2263 * sk_state_store - update sk->sk_state 2264 * @sk: socket pointer 2265 * @newstate: new state 2266 * 2267 * Paired with sk_state_load(). Should be used in contexts where 2268 * state change might impact lockless readers. 2269 */ 2270 static inline void sk_state_store(struct sock *sk, int newstate) 2271 { 2272 smp_store_release(&sk->sk_state, newstate); 2273 } 2274 2275 void sock_enable_timestamp(struct sock *sk, int flag); 2276 int sock_get_timestamp(struct sock *, struct timeval __user *); 2277 int sock_get_timestampns(struct sock *, struct timespec __user *); 2278 int sock_recv_errqueue(struct sock *sk, struct msghdr *msg, int len, int level, 2279 int type); 2280 2281 bool sk_ns_capable(const struct sock *sk, 2282 struct user_namespace *user_ns, int cap); 2283 bool sk_capable(const struct sock *sk, int cap); 2284 bool sk_net_capable(const struct sock *sk, int cap); 2285 2286 extern __u32 sysctl_wmem_max; 2287 extern __u32 sysctl_rmem_max; 2288 2289 extern int sysctl_tstamp_allow_data; 2290 extern int sysctl_optmem_max; 2291 2292 extern __u32 sysctl_wmem_default; 2293 extern __u32 sysctl_rmem_default; 2294 2295 #endif /* _SOCK_H */ 2296