xref: /linux/net/ipv4/tcp_metrics.c (revision ab520be8cd5d56867fc95cfbc34b90880faf1f9d)
1 #include <linux/rcupdate.h>
2 #include <linux/spinlock.h>
3 #include <linux/jiffies.h>
4 #include <linux/module.h>
5 #include <linux/cache.h>
6 #include <linux/slab.h>
7 #include <linux/init.h>
8 #include <linux/tcp.h>
9 #include <linux/hash.h>
10 #include <linux/tcp_metrics.h>
11 #include <linux/vmalloc.h>
12 
13 #include <net/inet_connection_sock.h>
14 #include <net/net_namespace.h>
15 #include <net/request_sock.h>
16 #include <net/inetpeer.h>
17 #include <net/sock.h>
18 #include <net/ipv6.h>
19 #include <net/dst.h>
20 #include <net/tcp.h>
21 #include <net/genetlink.h>
22 
23 int sysctl_tcp_nometrics_save __read_mostly;
24 
25 static struct tcp_metrics_block *__tcp_get_metrics(const struct inetpeer_addr *saddr,
26 						   const struct inetpeer_addr *daddr,
27 						   struct net *net, unsigned int hash);
28 
29 struct tcp_fastopen_metrics {
30 	u16	mss;
31 	u16	syn_loss:10,		/* Recurring Fast Open SYN losses */
32 		try_exp:2;		/* Request w/ exp. option (once) */
33 	unsigned long	last_syn_loss;	/* Last Fast Open SYN loss */
34 	struct	tcp_fastopen_cookie	cookie;
35 };
36 
37 /* TCP_METRIC_MAX includes 2 extra fields for userspace compatibility
38  * Kernel only stores RTT and RTTVAR in usec resolution
39  */
40 #define TCP_METRIC_MAX_KERNEL (TCP_METRIC_MAX - 2)
41 
42 struct tcp_metrics_block {
43 	struct tcp_metrics_block __rcu	*tcpm_next;
44 	possible_net_t			tcpm_net;
45 	struct inetpeer_addr		tcpm_saddr;
46 	struct inetpeer_addr		tcpm_daddr;
47 	unsigned long			tcpm_stamp;
48 	u32				tcpm_ts;
49 	u32				tcpm_ts_stamp;
50 	u32				tcpm_lock;
51 	u32				tcpm_vals[TCP_METRIC_MAX_KERNEL + 1];
52 	struct tcp_fastopen_metrics	tcpm_fastopen;
53 
54 	struct rcu_head			rcu_head;
55 };
56 
57 static inline struct net *tm_net(struct tcp_metrics_block *tm)
58 {
59 	return read_pnet(&tm->tcpm_net);
60 }
61 
62 static bool tcp_metric_locked(struct tcp_metrics_block *tm,
63 			      enum tcp_metric_index idx)
64 {
65 	return tm->tcpm_lock & (1 << idx);
66 }
67 
68 static u32 tcp_metric_get(struct tcp_metrics_block *tm,
69 			  enum tcp_metric_index idx)
70 {
71 	return tm->tcpm_vals[idx];
72 }
73 
74 static void tcp_metric_set(struct tcp_metrics_block *tm,
75 			   enum tcp_metric_index idx,
76 			   u32 val)
77 {
78 	tm->tcpm_vals[idx] = val;
79 }
80 
81 static bool addr_same(const struct inetpeer_addr *a,
82 		      const struct inetpeer_addr *b)
83 {
84 	return inetpeer_addr_cmp(a, b) == 0;
85 }
86 
87 struct tcpm_hash_bucket {
88 	struct tcp_metrics_block __rcu	*chain;
89 };
90 
91 static struct tcpm_hash_bucket	*tcp_metrics_hash __read_mostly;
92 static unsigned int		tcp_metrics_hash_log __read_mostly;
93 
94 static DEFINE_SPINLOCK(tcp_metrics_lock);
95 
96 static void tcpm_suck_dst(struct tcp_metrics_block *tm,
97 			  const struct dst_entry *dst,
98 			  bool fastopen_clear)
99 {
100 	u32 msval;
101 	u32 val;
102 
103 	tm->tcpm_stamp = jiffies;
104 
105 	val = 0;
106 	if (dst_metric_locked(dst, RTAX_RTT))
107 		val |= 1 << TCP_METRIC_RTT;
108 	if (dst_metric_locked(dst, RTAX_RTTVAR))
109 		val |= 1 << TCP_METRIC_RTTVAR;
110 	if (dst_metric_locked(dst, RTAX_SSTHRESH))
111 		val |= 1 << TCP_METRIC_SSTHRESH;
112 	if (dst_metric_locked(dst, RTAX_CWND))
113 		val |= 1 << TCP_METRIC_CWND;
114 	if (dst_metric_locked(dst, RTAX_REORDERING))
115 		val |= 1 << TCP_METRIC_REORDERING;
116 	tm->tcpm_lock = val;
117 
118 	msval = dst_metric_raw(dst, RTAX_RTT);
119 	tm->tcpm_vals[TCP_METRIC_RTT] = msval * USEC_PER_MSEC;
120 
121 	msval = dst_metric_raw(dst, RTAX_RTTVAR);
122 	tm->tcpm_vals[TCP_METRIC_RTTVAR] = msval * USEC_PER_MSEC;
123 	tm->tcpm_vals[TCP_METRIC_SSTHRESH] = dst_metric_raw(dst, RTAX_SSTHRESH);
124 	tm->tcpm_vals[TCP_METRIC_CWND] = dst_metric_raw(dst, RTAX_CWND);
125 	tm->tcpm_vals[TCP_METRIC_REORDERING] = dst_metric_raw(dst, RTAX_REORDERING);
126 	tm->tcpm_ts = 0;
127 	tm->tcpm_ts_stamp = 0;
128 	if (fastopen_clear) {
129 		tm->tcpm_fastopen.mss = 0;
130 		tm->tcpm_fastopen.syn_loss = 0;
131 		tm->tcpm_fastopen.try_exp = 0;
132 		tm->tcpm_fastopen.cookie.exp = false;
133 		tm->tcpm_fastopen.cookie.len = 0;
134 	}
135 }
136 
137 #define TCP_METRICS_TIMEOUT		(60 * 60 * HZ)
138 
139 static void tcpm_check_stamp(struct tcp_metrics_block *tm, struct dst_entry *dst)
140 {
141 	if (tm && unlikely(time_after(jiffies, tm->tcpm_stamp + TCP_METRICS_TIMEOUT)))
142 		tcpm_suck_dst(tm, dst, false);
143 }
144 
145 #define TCP_METRICS_RECLAIM_DEPTH	5
146 #define TCP_METRICS_RECLAIM_PTR		(struct tcp_metrics_block *) 0x1UL
147 
148 #define deref_locked(p)	\
149 	rcu_dereference_protected(p, lockdep_is_held(&tcp_metrics_lock))
150 
151 static struct tcp_metrics_block *tcpm_new(struct dst_entry *dst,
152 					  struct inetpeer_addr *saddr,
153 					  struct inetpeer_addr *daddr,
154 					  unsigned int hash)
155 {
156 	struct tcp_metrics_block *tm;
157 	struct net *net;
158 	bool reclaim = false;
159 
160 	spin_lock_bh(&tcp_metrics_lock);
161 	net = dev_net(dst->dev);
162 
163 	/* While waiting for the spin-lock the cache might have been populated
164 	 * with this entry and so we have to check again.
165 	 */
166 	tm = __tcp_get_metrics(saddr, daddr, net, hash);
167 	if (tm == TCP_METRICS_RECLAIM_PTR) {
168 		reclaim = true;
169 		tm = NULL;
170 	}
171 	if (tm) {
172 		tcpm_check_stamp(tm, dst);
173 		goto out_unlock;
174 	}
175 
176 	if (unlikely(reclaim)) {
177 		struct tcp_metrics_block *oldest;
178 
179 		oldest = deref_locked(tcp_metrics_hash[hash].chain);
180 		for (tm = deref_locked(oldest->tcpm_next); tm;
181 		     tm = deref_locked(tm->tcpm_next)) {
182 			if (time_before(tm->tcpm_stamp, oldest->tcpm_stamp))
183 				oldest = tm;
184 		}
185 		tm = oldest;
186 	} else {
187 		tm = kmalloc(sizeof(*tm), GFP_ATOMIC);
188 		if (!tm)
189 			goto out_unlock;
190 	}
191 	write_pnet(&tm->tcpm_net, net);
192 	tm->tcpm_saddr = *saddr;
193 	tm->tcpm_daddr = *daddr;
194 
195 	tcpm_suck_dst(tm, dst, true);
196 
197 	if (likely(!reclaim)) {
198 		tm->tcpm_next = tcp_metrics_hash[hash].chain;
199 		rcu_assign_pointer(tcp_metrics_hash[hash].chain, tm);
200 	}
201 
202 out_unlock:
203 	spin_unlock_bh(&tcp_metrics_lock);
204 	return tm;
205 }
206 
207 static struct tcp_metrics_block *tcp_get_encode(struct tcp_metrics_block *tm, int depth)
208 {
209 	if (tm)
210 		return tm;
211 	if (depth > TCP_METRICS_RECLAIM_DEPTH)
212 		return TCP_METRICS_RECLAIM_PTR;
213 	return NULL;
214 }
215 
216 static struct tcp_metrics_block *__tcp_get_metrics(const struct inetpeer_addr *saddr,
217 						   const struct inetpeer_addr *daddr,
218 						   struct net *net, unsigned int hash)
219 {
220 	struct tcp_metrics_block *tm;
221 	int depth = 0;
222 
223 	for (tm = rcu_dereference(tcp_metrics_hash[hash].chain); tm;
224 	     tm = rcu_dereference(tm->tcpm_next)) {
225 		if (addr_same(&tm->tcpm_saddr, saddr) &&
226 		    addr_same(&tm->tcpm_daddr, daddr) &&
227 		    net_eq(tm_net(tm), net))
228 			break;
229 		depth++;
230 	}
231 	return tcp_get_encode(tm, depth);
232 }
233 
234 static struct tcp_metrics_block *__tcp_get_metrics_req(struct request_sock *req,
235 						       struct dst_entry *dst)
236 {
237 	struct tcp_metrics_block *tm;
238 	struct inetpeer_addr saddr, daddr;
239 	unsigned int hash;
240 	struct net *net;
241 
242 	saddr.family = req->rsk_ops->family;
243 	daddr.family = req->rsk_ops->family;
244 	switch (daddr.family) {
245 	case AF_INET:
246 		inetpeer_set_addr_v4(&saddr, inet_rsk(req)->ir_loc_addr);
247 		inetpeer_set_addr_v4(&daddr, inet_rsk(req)->ir_rmt_addr);
248 		hash = ipv4_addr_hash(inet_rsk(req)->ir_rmt_addr);
249 		break;
250 #if IS_ENABLED(CONFIG_IPV6)
251 	case AF_INET6:
252 		inetpeer_set_addr_v6(&saddr, &inet_rsk(req)->ir_v6_loc_addr);
253 		inetpeer_set_addr_v6(&daddr, &inet_rsk(req)->ir_v6_rmt_addr);
254 		hash = ipv6_addr_hash(&inet_rsk(req)->ir_v6_rmt_addr);
255 		break;
256 #endif
257 	default:
258 		return NULL;
259 	}
260 
261 	net = dev_net(dst->dev);
262 	hash ^= net_hash_mix(net);
263 	hash = hash_32(hash, tcp_metrics_hash_log);
264 
265 	for (tm = rcu_dereference(tcp_metrics_hash[hash].chain); tm;
266 	     tm = rcu_dereference(tm->tcpm_next)) {
267 		if (addr_same(&tm->tcpm_saddr, &saddr) &&
268 		    addr_same(&tm->tcpm_daddr, &daddr) &&
269 		    net_eq(tm_net(tm), net))
270 			break;
271 	}
272 	tcpm_check_stamp(tm, dst);
273 	return tm;
274 }
275 
276 static struct tcp_metrics_block *__tcp_get_metrics_tw(struct inet_timewait_sock *tw)
277 {
278 	struct tcp_metrics_block *tm;
279 	struct inetpeer_addr saddr, daddr;
280 	unsigned int hash;
281 	struct net *net;
282 
283 	if (tw->tw_family == AF_INET) {
284 		inetpeer_set_addr_v4(&saddr, tw->tw_rcv_saddr);
285 		inetpeer_set_addr_v4(&daddr, tw->tw_daddr);
286 		hash = ipv4_addr_hash(tw->tw_daddr);
287 	}
288 #if IS_ENABLED(CONFIG_IPV6)
289 	else if (tw->tw_family == AF_INET6) {
290 		if (ipv6_addr_v4mapped(&tw->tw_v6_daddr)) {
291 			inetpeer_set_addr_v4(&saddr, tw->tw_rcv_saddr);
292 			inetpeer_set_addr_v4(&daddr, tw->tw_daddr);
293 			hash = ipv4_addr_hash(tw->tw_daddr);
294 		} else {
295 			inetpeer_set_addr_v6(&saddr, &tw->tw_v6_rcv_saddr);
296 			inetpeer_set_addr_v6(&daddr, &tw->tw_v6_daddr);
297 			hash = ipv6_addr_hash(&tw->tw_v6_daddr);
298 		}
299 	}
300 #endif
301 	else
302 		return NULL;
303 
304 	net = twsk_net(tw);
305 	hash ^= net_hash_mix(net);
306 	hash = hash_32(hash, tcp_metrics_hash_log);
307 
308 	for (tm = rcu_dereference(tcp_metrics_hash[hash].chain); tm;
309 	     tm = rcu_dereference(tm->tcpm_next)) {
310 		if (addr_same(&tm->tcpm_saddr, &saddr) &&
311 		    addr_same(&tm->tcpm_daddr, &daddr) &&
312 		    net_eq(tm_net(tm), net))
313 			break;
314 	}
315 	return tm;
316 }
317 
318 static struct tcp_metrics_block *tcp_get_metrics(struct sock *sk,
319 						 struct dst_entry *dst,
320 						 bool create)
321 {
322 	struct tcp_metrics_block *tm;
323 	struct inetpeer_addr saddr, daddr;
324 	unsigned int hash;
325 	struct net *net;
326 
327 	if (sk->sk_family == AF_INET) {
328 		inetpeer_set_addr_v4(&saddr, inet_sk(sk)->inet_saddr);
329 		inetpeer_set_addr_v4(&daddr, inet_sk(sk)->inet_daddr);
330 		hash = ipv4_addr_hash(inet_sk(sk)->inet_daddr);
331 	}
332 #if IS_ENABLED(CONFIG_IPV6)
333 	else if (sk->sk_family == AF_INET6) {
334 		if (ipv6_addr_v4mapped(&sk->sk_v6_daddr)) {
335 			inetpeer_set_addr_v4(&saddr, inet_sk(sk)->inet_saddr);
336 			inetpeer_set_addr_v4(&daddr, inet_sk(sk)->inet_daddr);
337 			hash = ipv4_addr_hash(inet_sk(sk)->inet_daddr);
338 		} else {
339 			inetpeer_set_addr_v6(&saddr, &sk->sk_v6_rcv_saddr);
340 			inetpeer_set_addr_v6(&daddr, &sk->sk_v6_daddr);
341 			hash = ipv6_addr_hash(&sk->sk_v6_daddr);
342 		}
343 	}
344 #endif
345 	else
346 		return NULL;
347 
348 	net = dev_net(dst->dev);
349 	hash ^= net_hash_mix(net);
350 	hash = hash_32(hash, tcp_metrics_hash_log);
351 
352 	tm = __tcp_get_metrics(&saddr, &daddr, net, hash);
353 	if (tm == TCP_METRICS_RECLAIM_PTR)
354 		tm = NULL;
355 	if (!tm && create)
356 		tm = tcpm_new(dst, &saddr, &daddr, hash);
357 	else
358 		tcpm_check_stamp(tm, dst);
359 
360 	return tm;
361 }
362 
363 /* Save metrics learned by this TCP session.  This function is called
364  * only, when TCP finishes successfully i.e. when it enters TIME-WAIT
365  * or goes from LAST-ACK to CLOSE.
366  */
367 void tcp_update_metrics(struct sock *sk)
368 {
369 	const struct inet_connection_sock *icsk = inet_csk(sk);
370 	struct dst_entry *dst = __sk_dst_get(sk);
371 	struct tcp_sock *tp = tcp_sk(sk);
372 	struct net *net = sock_net(sk);
373 	struct tcp_metrics_block *tm;
374 	unsigned long rtt;
375 	u32 val;
376 	int m;
377 
378 	if (sysctl_tcp_nometrics_save || !dst)
379 		return;
380 
381 	if (dst->flags & DST_HOST)
382 		dst_confirm(dst);
383 
384 	rcu_read_lock();
385 	if (icsk->icsk_backoff || !tp->srtt_us) {
386 		/* This session failed to estimate rtt. Why?
387 		 * Probably, no packets returned in time.  Reset our
388 		 * results.
389 		 */
390 		tm = tcp_get_metrics(sk, dst, false);
391 		if (tm && !tcp_metric_locked(tm, TCP_METRIC_RTT))
392 			tcp_metric_set(tm, TCP_METRIC_RTT, 0);
393 		goto out_unlock;
394 	} else
395 		tm = tcp_get_metrics(sk, dst, true);
396 
397 	if (!tm)
398 		goto out_unlock;
399 
400 	rtt = tcp_metric_get(tm, TCP_METRIC_RTT);
401 	m = rtt - tp->srtt_us;
402 
403 	/* If newly calculated rtt larger than stored one, store new
404 	 * one. Otherwise, use EWMA. Remember, rtt overestimation is
405 	 * always better than underestimation.
406 	 */
407 	if (!tcp_metric_locked(tm, TCP_METRIC_RTT)) {
408 		if (m <= 0)
409 			rtt = tp->srtt_us;
410 		else
411 			rtt -= (m >> 3);
412 		tcp_metric_set(tm, TCP_METRIC_RTT, rtt);
413 	}
414 
415 	if (!tcp_metric_locked(tm, TCP_METRIC_RTTVAR)) {
416 		unsigned long var;
417 
418 		if (m < 0)
419 			m = -m;
420 
421 		/* Scale deviation to rttvar fixed point */
422 		m >>= 1;
423 		if (m < tp->mdev_us)
424 			m = tp->mdev_us;
425 
426 		var = tcp_metric_get(tm, TCP_METRIC_RTTVAR);
427 		if (m >= var)
428 			var = m;
429 		else
430 			var -= (var - m) >> 2;
431 
432 		tcp_metric_set(tm, TCP_METRIC_RTTVAR, var);
433 	}
434 
435 	if (tcp_in_initial_slowstart(tp)) {
436 		/* Slow start still did not finish. */
437 		if (!tcp_metric_locked(tm, TCP_METRIC_SSTHRESH)) {
438 			val = tcp_metric_get(tm, TCP_METRIC_SSTHRESH);
439 			if (val && (tp->snd_cwnd >> 1) > val)
440 				tcp_metric_set(tm, TCP_METRIC_SSTHRESH,
441 					       tp->snd_cwnd >> 1);
442 		}
443 		if (!tcp_metric_locked(tm, TCP_METRIC_CWND)) {
444 			val = tcp_metric_get(tm, TCP_METRIC_CWND);
445 			if (tp->snd_cwnd > val)
446 				tcp_metric_set(tm, TCP_METRIC_CWND,
447 					       tp->snd_cwnd);
448 		}
449 	} else if (!tcp_in_slow_start(tp) &&
450 		   icsk->icsk_ca_state == TCP_CA_Open) {
451 		/* Cong. avoidance phase, cwnd is reliable. */
452 		if (!tcp_metric_locked(tm, TCP_METRIC_SSTHRESH))
453 			tcp_metric_set(tm, TCP_METRIC_SSTHRESH,
454 				       max(tp->snd_cwnd >> 1, tp->snd_ssthresh));
455 		if (!tcp_metric_locked(tm, TCP_METRIC_CWND)) {
456 			val = tcp_metric_get(tm, TCP_METRIC_CWND);
457 			tcp_metric_set(tm, TCP_METRIC_CWND, (val + tp->snd_cwnd) >> 1);
458 		}
459 	} else {
460 		/* Else slow start did not finish, cwnd is non-sense,
461 		 * ssthresh may be also invalid.
462 		 */
463 		if (!tcp_metric_locked(tm, TCP_METRIC_CWND)) {
464 			val = tcp_metric_get(tm, TCP_METRIC_CWND);
465 			tcp_metric_set(tm, TCP_METRIC_CWND,
466 				       (val + tp->snd_ssthresh) >> 1);
467 		}
468 		if (!tcp_metric_locked(tm, TCP_METRIC_SSTHRESH)) {
469 			val = tcp_metric_get(tm, TCP_METRIC_SSTHRESH);
470 			if (val && tp->snd_ssthresh > val)
471 				tcp_metric_set(tm, TCP_METRIC_SSTHRESH,
472 					       tp->snd_ssthresh);
473 		}
474 		if (!tcp_metric_locked(tm, TCP_METRIC_REORDERING)) {
475 			val = tcp_metric_get(tm, TCP_METRIC_REORDERING);
476 			if (val < tp->reordering &&
477 			    tp->reordering != net->ipv4.sysctl_tcp_reordering)
478 				tcp_metric_set(tm, TCP_METRIC_REORDERING,
479 					       tp->reordering);
480 		}
481 	}
482 	tm->tcpm_stamp = jiffies;
483 out_unlock:
484 	rcu_read_unlock();
485 }
486 
487 /* Initialize metrics on socket. */
488 
489 void tcp_init_metrics(struct sock *sk)
490 {
491 	struct dst_entry *dst = __sk_dst_get(sk);
492 	struct tcp_sock *tp = tcp_sk(sk);
493 	struct tcp_metrics_block *tm;
494 	u32 val, crtt = 0; /* cached RTT scaled by 8 */
495 
496 	if (!dst)
497 		goto reset;
498 
499 	dst_confirm(dst);
500 
501 	rcu_read_lock();
502 	tm = tcp_get_metrics(sk, dst, true);
503 	if (!tm) {
504 		rcu_read_unlock();
505 		goto reset;
506 	}
507 
508 	if (tcp_metric_locked(tm, TCP_METRIC_CWND))
509 		tp->snd_cwnd_clamp = tcp_metric_get(tm, TCP_METRIC_CWND);
510 
511 	val = tcp_metric_get(tm, TCP_METRIC_SSTHRESH);
512 	if (val) {
513 		tp->snd_ssthresh = val;
514 		if (tp->snd_ssthresh > tp->snd_cwnd_clamp)
515 			tp->snd_ssthresh = tp->snd_cwnd_clamp;
516 	} else {
517 		/* ssthresh may have been reduced unnecessarily during.
518 		 * 3WHS. Restore it back to its initial default.
519 		 */
520 		tp->snd_ssthresh = TCP_INFINITE_SSTHRESH;
521 	}
522 	val = tcp_metric_get(tm, TCP_METRIC_REORDERING);
523 	if (val && tp->reordering != val) {
524 		tcp_disable_fack(tp);
525 		tcp_disable_early_retrans(tp);
526 		tp->reordering = val;
527 	}
528 
529 	crtt = tcp_metric_get(tm, TCP_METRIC_RTT);
530 	rcu_read_unlock();
531 reset:
532 	/* The initial RTT measurement from the SYN/SYN-ACK is not ideal
533 	 * to seed the RTO for later data packets because SYN packets are
534 	 * small. Use the per-dst cached values to seed the RTO but keep
535 	 * the RTT estimator variables intact (e.g., srtt, mdev, rttvar).
536 	 * Later the RTO will be updated immediately upon obtaining the first
537 	 * data RTT sample (tcp_rtt_estimator()). Hence the cached RTT only
538 	 * influences the first RTO but not later RTT estimation.
539 	 *
540 	 * But if RTT is not available from the SYN (due to retransmits or
541 	 * syn cookies) or the cache, force a conservative 3secs timeout.
542 	 *
543 	 * A bit of theory. RTT is time passed after "normal" sized packet
544 	 * is sent until it is ACKed. In normal circumstances sending small
545 	 * packets force peer to delay ACKs and calculation is correct too.
546 	 * The algorithm is adaptive and, provided we follow specs, it
547 	 * NEVER underestimate RTT. BUT! If peer tries to make some clever
548 	 * tricks sort of "quick acks" for time long enough to decrease RTT
549 	 * to low value, and then abruptly stops to do it and starts to delay
550 	 * ACKs, wait for troubles.
551 	 */
552 	if (crtt > tp->srtt_us) {
553 		/* Set RTO like tcp_rtt_estimator(), but from cached RTT. */
554 		crtt /= 8 * USEC_PER_SEC / HZ;
555 		inet_csk(sk)->icsk_rto = crtt + max(2 * crtt, tcp_rto_min(sk));
556 	} else if (tp->srtt_us == 0) {
557 		/* RFC6298: 5.7 We've failed to get a valid RTT sample from
558 		 * 3WHS. This is most likely due to retransmission,
559 		 * including spurious one. Reset the RTO back to 3secs
560 		 * from the more aggressive 1sec to avoid more spurious
561 		 * retransmission.
562 		 */
563 		tp->rttvar_us = jiffies_to_usecs(TCP_TIMEOUT_FALLBACK);
564 		tp->mdev_us = tp->mdev_max_us = tp->rttvar_us;
565 
566 		inet_csk(sk)->icsk_rto = TCP_TIMEOUT_FALLBACK;
567 	}
568 	/* Cut cwnd down to 1 per RFC5681 if SYN or SYN-ACK has been
569 	 * retransmitted. In light of RFC6298 more aggressive 1sec
570 	 * initRTO, we only reset cwnd when more than 1 SYN/SYN-ACK
571 	 * retransmission has occurred.
572 	 */
573 	if (tp->total_retrans > 1)
574 		tp->snd_cwnd = 1;
575 	else
576 		tp->snd_cwnd = tcp_init_cwnd(tp, dst);
577 	tp->snd_cwnd_stamp = tcp_time_stamp;
578 }
579 
580 bool tcp_peer_is_proven(struct request_sock *req, struct dst_entry *dst,
581 			bool paws_check, bool timestamps)
582 {
583 	struct tcp_metrics_block *tm;
584 	bool ret;
585 
586 	if (!dst)
587 		return false;
588 
589 	rcu_read_lock();
590 	tm = __tcp_get_metrics_req(req, dst);
591 	if (paws_check) {
592 		if (tm &&
593 		    (u32)get_seconds() - tm->tcpm_ts_stamp < TCP_PAWS_MSL &&
594 		    ((s32)(tm->tcpm_ts - req->ts_recent) > TCP_PAWS_WINDOW ||
595 		     !timestamps))
596 			ret = false;
597 		else
598 			ret = true;
599 	} else {
600 		if (tm && tcp_metric_get(tm, TCP_METRIC_RTT) && tm->tcpm_ts_stamp)
601 			ret = true;
602 		else
603 			ret = false;
604 	}
605 	rcu_read_unlock();
606 
607 	return ret;
608 }
609 
610 void tcp_fetch_timewait_stamp(struct sock *sk, struct dst_entry *dst)
611 {
612 	struct tcp_metrics_block *tm;
613 
614 	rcu_read_lock();
615 	tm = tcp_get_metrics(sk, dst, true);
616 	if (tm) {
617 		struct tcp_sock *tp = tcp_sk(sk);
618 
619 		if ((u32)get_seconds() - tm->tcpm_ts_stamp <= TCP_PAWS_MSL) {
620 			tp->rx_opt.ts_recent_stamp = tm->tcpm_ts_stamp;
621 			tp->rx_opt.ts_recent = tm->tcpm_ts;
622 		}
623 	}
624 	rcu_read_unlock();
625 }
626 EXPORT_SYMBOL_GPL(tcp_fetch_timewait_stamp);
627 
628 /* VJ's idea. Save last timestamp seen from this destination and hold
629  * it at least for normal timewait interval to use for duplicate
630  * segment detection in subsequent connections, before they enter
631  * synchronized state.
632  */
633 bool tcp_remember_stamp(struct sock *sk)
634 {
635 	struct dst_entry *dst = __sk_dst_get(sk);
636 	bool ret = false;
637 
638 	if (dst) {
639 		struct tcp_metrics_block *tm;
640 
641 		rcu_read_lock();
642 		tm = tcp_get_metrics(sk, dst, true);
643 		if (tm) {
644 			struct tcp_sock *tp = tcp_sk(sk);
645 
646 			if ((s32)(tm->tcpm_ts - tp->rx_opt.ts_recent) <= 0 ||
647 			    ((u32)get_seconds() - tm->tcpm_ts_stamp > TCP_PAWS_MSL &&
648 			     tm->tcpm_ts_stamp <= (u32)tp->rx_opt.ts_recent_stamp)) {
649 				tm->tcpm_ts_stamp = (u32)tp->rx_opt.ts_recent_stamp;
650 				tm->tcpm_ts = tp->rx_opt.ts_recent;
651 			}
652 			ret = true;
653 		}
654 		rcu_read_unlock();
655 	}
656 	return ret;
657 }
658 
659 bool tcp_tw_remember_stamp(struct inet_timewait_sock *tw)
660 {
661 	struct tcp_metrics_block *tm;
662 	bool ret = false;
663 
664 	rcu_read_lock();
665 	tm = __tcp_get_metrics_tw(tw);
666 	if (tm) {
667 		const struct tcp_timewait_sock *tcptw;
668 		struct sock *sk = (struct sock *) tw;
669 
670 		tcptw = tcp_twsk(sk);
671 		if ((s32)(tm->tcpm_ts - tcptw->tw_ts_recent) <= 0 ||
672 		    ((u32)get_seconds() - tm->tcpm_ts_stamp > TCP_PAWS_MSL &&
673 		     tm->tcpm_ts_stamp <= (u32)tcptw->tw_ts_recent_stamp)) {
674 			tm->tcpm_ts_stamp = (u32)tcptw->tw_ts_recent_stamp;
675 			tm->tcpm_ts	   = tcptw->tw_ts_recent;
676 		}
677 		ret = true;
678 	}
679 	rcu_read_unlock();
680 
681 	return ret;
682 }
683 
684 static DEFINE_SEQLOCK(fastopen_seqlock);
685 
686 void tcp_fastopen_cache_get(struct sock *sk, u16 *mss,
687 			    struct tcp_fastopen_cookie *cookie,
688 			    int *syn_loss, unsigned long *last_syn_loss)
689 {
690 	struct tcp_metrics_block *tm;
691 
692 	rcu_read_lock();
693 	tm = tcp_get_metrics(sk, __sk_dst_get(sk), false);
694 	if (tm) {
695 		struct tcp_fastopen_metrics *tfom = &tm->tcpm_fastopen;
696 		unsigned int seq;
697 
698 		do {
699 			seq = read_seqbegin(&fastopen_seqlock);
700 			if (tfom->mss)
701 				*mss = tfom->mss;
702 			*cookie = tfom->cookie;
703 			if (cookie->len <= 0 && tfom->try_exp == 1)
704 				cookie->exp = true;
705 			*syn_loss = tfom->syn_loss;
706 			*last_syn_loss = *syn_loss ? tfom->last_syn_loss : 0;
707 		} while (read_seqretry(&fastopen_seqlock, seq));
708 	}
709 	rcu_read_unlock();
710 }
711 
712 void tcp_fastopen_cache_set(struct sock *sk, u16 mss,
713 			    struct tcp_fastopen_cookie *cookie, bool syn_lost,
714 			    u16 try_exp)
715 {
716 	struct dst_entry *dst = __sk_dst_get(sk);
717 	struct tcp_metrics_block *tm;
718 
719 	if (!dst)
720 		return;
721 	rcu_read_lock();
722 	tm = tcp_get_metrics(sk, dst, true);
723 	if (tm) {
724 		struct tcp_fastopen_metrics *tfom = &tm->tcpm_fastopen;
725 
726 		write_seqlock_bh(&fastopen_seqlock);
727 		if (mss)
728 			tfom->mss = mss;
729 		if (cookie && cookie->len > 0)
730 			tfom->cookie = *cookie;
731 		else if (try_exp > tfom->try_exp &&
732 			 tfom->cookie.len <= 0 && !tfom->cookie.exp)
733 			tfom->try_exp = try_exp;
734 		if (syn_lost) {
735 			++tfom->syn_loss;
736 			tfom->last_syn_loss = jiffies;
737 		} else
738 			tfom->syn_loss = 0;
739 		write_sequnlock_bh(&fastopen_seqlock);
740 	}
741 	rcu_read_unlock();
742 }
743 
744 static struct genl_family tcp_metrics_nl_family;
745 
746 static const struct nla_policy tcp_metrics_nl_policy[TCP_METRICS_ATTR_MAX + 1] = {
747 	[TCP_METRICS_ATTR_ADDR_IPV4]	= { .type = NLA_U32, },
748 	[TCP_METRICS_ATTR_ADDR_IPV6]	= { .type = NLA_BINARY,
749 					    .len = sizeof(struct in6_addr), },
750 	/* Following attributes are not received for GET/DEL,
751 	 * we keep them for reference
752 	 */
753 #if 0
754 	[TCP_METRICS_ATTR_AGE]		= { .type = NLA_MSECS, },
755 	[TCP_METRICS_ATTR_TW_TSVAL]	= { .type = NLA_U32, },
756 	[TCP_METRICS_ATTR_TW_TS_STAMP]	= { .type = NLA_S32, },
757 	[TCP_METRICS_ATTR_VALS]		= { .type = NLA_NESTED, },
758 	[TCP_METRICS_ATTR_FOPEN_MSS]	= { .type = NLA_U16, },
759 	[TCP_METRICS_ATTR_FOPEN_SYN_DROPS]	= { .type = NLA_U16, },
760 	[TCP_METRICS_ATTR_FOPEN_SYN_DROP_TS]	= { .type = NLA_MSECS, },
761 	[TCP_METRICS_ATTR_FOPEN_COOKIE]	= { .type = NLA_BINARY,
762 					    .len = TCP_FASTOPEN_COOKIE_MAX, },
763 #endif
764 };
765 
766 /* Add attributes, caller cancels its header on failure */
767 static int tcp_metrics_fill_info(struct sk_buff *msg,
768 				 struct tcp_metrics_block *tm)
769 {
770 	struct nlattr *nest;
771 	int i;
772 
773 	switch (tm->tcpm_daddr.family) {
774 	case AF_INET:
775 		if (nla_put_in_addr(msg, TCP_METRICS_ATTR_ADDR_IPV4,
776 				    inetpeer_get_addr_v4(&tm->tcpm_daddr)) < 0)
777 			goto nla_put_failure;
778 		if (nla_put_in_addr(msg, TCP_METRICS_ATTR_SADDR_IPV4,
779 				    inetpeer_get_addr_v4(&tm->tcpm_saddr)) < 0)
780 			goto nla_put_failure;
781 		break;
782 	case AF_INET6:
783 		if (nla_put_in6_addr(msg, TCP_METRICS_ATTR_ADDR_IPV6,
784 				     inetpeer_get_addr_v6(&tm->tcpm_daddr)) < 0)
785 			goto nla_put_failure;
786 		if (nla_put_in6_addr(msg, TCP_METRICS_ATTR_SADDR_IPV6,
787 				     inetpeer_get_addr_v6(&tm->tcpm_saddr)) < 0)
788 			goto nla_put_failure;
789 		break;
790 	default:
791 		return -EAFNOSUPPORT;
792 	}
793 
794 	if (nla_put_msecs(msg, TCP_METRICS_ATTR_AGE,
795 			  jiffies - tm->tcpm_stamp,
796 			  TCP_METRICS_ATTR_PAD) < 0)
797 		goto nla_put_failure;
798 	if (tm->tcpm_ts_stamp) {
799 		if (nla_put_s32(msg, TCP_METRICS_ATTR_TW_TS_STAMP,
800 				(s32) (get_seconds() - tm->tcpm_ts_stamp)) < 0)
801 			goto nla_put_failure;
802 		if (nla_put_u32(msg, TCP_METRICS_ATTR_TW_TSVAL,
803 				tm->tcpm_ts) < 0)
804 			goto nla_put_failure;
805 	}
806 
807 	{
808 		int n = 0;
809 
810 		nest = nla_nest_start(msg, TCP_METRICS_ATTR_VALS);
811 		if (!nest)
812 			goto nla_put_failure;
813 		for (i = 0; i < TCP_METRIC_MAX_KERNEL + 1; i++) {
814 			u32 val = tm->tcpm_vals[i];
815 
816 			if (!val)
817 				continue;
818 			if (i == TCP_METRIC_RTT) {
819 				if (nla_put_u32(msg, TCP_METRIC_RTT_US + 1,
820 						val) < 0)
821 					goto nla_put_failure;
822 				n++;
823 				val = max(val / 1000, 1U);
824 			}
825 			if (i == TCP_METRIC_RTTVAR) {
826 				if (nla_put_u32(msg, TCP_METRIC_RTTVAR_US + 1,
827 						val) < 0)
828 					goto nla_put_failure;
829 				n++;
830 				val = max(val / 1000, 1U);
831 			}
832 			if (nla_put_u32(msg, i + 1, val) < 0)
833 				goto nla_put_failure;
834 			n++;
835 		}
836 		if (n)
837 			nla_nest_end(msg, nest);
838 		else
839 			nla_nest_cancel(msg, nest);
840 	}
841 
842 	{
843 		struct tcp_fastopen_metrics tfom_copy[1], *tfom;
844 		unsigned int seq;
845 
846 		do {
847 			seq = read_seqbegin(&fastopen_seqlock);
848 			tfom_copy[0] = tm->tcpm_fastopen;
849 		} while (read_seqretry(&fastopen_seqlock, seq));
850 
851 		tfom = tfom_copy;
852 		if (tfom->mss &&
853 		    nla_put_u16(msg, TCP_METRICS_ATTR_FOPEN_MSS,
854 				tfom->mss) < 0)
855 			goto nla_put_failure;
856 		if (tfom->syn_loss &&
857 		    (nla_put_u16(msg, TCP_METRICS_ATTR_FOPEN_SYN_DROPS,
858 				tfom->syn_loss) < 0 ||
859 		     nla_put_msecs(msg, TCP_METRICS_ATTR_FOPEN_SYN_DROP_TS,
860 				jiffies - tfom->last_syn_loss,
861 				TCP_METRICS_ATTR_PAD) < 0))
862 			goto nla_put_failure;
863 		if (tfom->cookie.len > 0 &&
864 		    nla_put(msg, TCP_METRICS_ATTR_FOPEN_COOKIE,
865 			    tfom->cookie.len, tfom->cookie.val) < 0)
866 			goto nla_put_failure;
867 	}
868 
869 	return 0;
870 
871 nla_put_failure:
872 	return -EMSGSIZE;
873 }
874 
875 static int tcp_metrics_dump_info(struct sk_buff *skb,
876 				 struct netlink_callback *cb,
877 				 struct tcp_metrics_block *tm)
878 {
879 	void *hdr;
880 
881 	hdr = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
882 			  &tcp_metrics_nl_family, NLM_F_MULTI,
883 			  TCP_METRICS_CMD_GET);
884 	if (!hdr)
885 		return -EMSGSIZE;
886 
887 	if (tcp_metrics_fill_info(skb, tm) < 0)
888 		goto nla_put_failure;
889 
890 	genlmsg_end(skb, hdr);
891 	return 0;
892 
893 nla_put_failure:
894 	genlmsg_cancel(skb, hdr);
895 	return -EMSGSIZE;
896 }
897 
898 static int tcp_metrics_nl_dump(struct sk_buff *skb,
899 			       struct netlink_callback *cb)
900 {
901 	struct net *net = sock_net(skb->sk);
902 	unsigned int max_rows = 1U << tcp_metrics_hash_log;
903 	unsigned int row, s_row = cb->args[0];
904 	int s_col = cb->args[1], col = s_col;
905 
906 	for (row = s_row; row < max_rows; row++, s_col = 0) {
907 		struct tcp_metrics_block *tm;
908 		struct tcpm_hash_bucket *hb = tcp_metrics_hash + row;
909 
910 		rcu_read_lock();
911 		for (col = 0, tm = rcu_dereference(hb->chain); tm;
912 		     tm = rcu_dereference(tm->tcpm_next), col++) {
913 			if (!net_eq(tm_net(tm), net))
914 				continue;
915 			if (col < s_col)
916 				continue;
917 			if (tcp_metrics_dump_info(skb, cb, tm) < 0) {
918 				rcu_read_unlock();
919 				goto done;
920 			}
921 		}
922 		rcu_read_unlock();
923 	}
924 
925 done:
926 	cb->args[0] = row;
927 	cb->args[1] = col;
928 	return skb->len;
929 }
930 
931 static int __parse_nl_addr(struct genl_info *info, struct inetpeer_addr *addr,
932 			   unsigned int *hash, int optional, int v4, int v6)
933 {
934 	struct nlattr *a;
935 
936 	a = info->attrs[v4];
937 	if (a) {
938 		inetpeer_set_addr_v4(addr, nla_get_in_addr(a));
939 		if (hash)
940 			*hash = ipv4_addr_hash(inetpeer_get_addr_v4(addr));
941 		return 0;
942 	}
943 	a = info->attrs[v6];
944 	if (a) {
945 		struct in6_addr in6;
946 
947 		if (nla_len(a) != sizeof(struct in6_addr))
948 			return -EINVAL;
949 		in6 = nla_get_in6_addr(a);
950 		inetpeer_set_addr_v6(addr, &in6);
951 		if (hash)
952 			*hash = ipv6_addr_hash(inetpeer_get_addr_v6(addr));
953 		return 0;
954 	}
955 	return optional ? 1 : -EAFNOSUPPORT;
956 }
957 
958 static int parse_nl_addr(struct genl_info *info, struct inetpeer_addr *addr,
959 			 unsigned int *hash, int optional)
960 {
961 	return __parse_nl_addr(info, addr, hash, optional,
962 			       TCP_METRICS_ATTR_ADDR_IPV4,
963 			       TCP_METRICS_ATTR_ADDR_IPV6);
964 }
965 
966 static int parse_nl_saddr(struct genl_info *info, struct inetpeer_addr *addr)
967 {
968 	return __parse_nl_addr(info, addr, NULL, 0,
969 			       TCP_METRICS_ATTR_SADDR_IPV4,
970 			       TCP_METRICS_ATTR_SADDR_IPV6);
971 }
972 
973 static int tcp_metrics_nl_cmd_get(struct sk_buff *skb, struct genl_info *info)
974 {
975 	struct tcp_metrics_block *tm;
976 	struct inetpeer_addr saddr, daddr;
977 	unsigned int hash;
978 	struct sk_buff *msg;
979 	struct net *net = genl_info_net(info);
980 	void *reply;
981 	int ret;
982 	bool src = true;
983 
984 	ret = parse_nl_addr(info, &daddr, &hash, 0);
985 	if (ret < 0)
986 		return ret;
987 
988 	ret = parse_nl_saddr(info, &saddr);
989 	if (ret < 0)
990 		src = false;
991 
992 	msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
993 	if (!msg)
994 		return -ENOMEM;
995 
996 	reply = genlmsg_put_reply(msg, info, &tcp_metrics_nl_family, 0,
997 				  info->genlhdr->cmd);
998 	if (!reply)
999 		goto nla_put_failure;
1000 
1001 	hash ^= net_hash_mix(net);
1002 	hash = hash_32(hash, tcp_metrics_hash_log);
1003 	ret = -ESRCH;
1004 	rcu_read_lock();
1005 	for (tm = rcu_dereference(tcp_metrics_hash[hash].chain); tm;
1006 	     tm = rcu_dereference(tm->tcpm_next)) {
1007 		if (addr_same(&tm->tcpm_daddr, &daddr) &&
1008 		    (!src || addr_same(&tm->tcpm_saddr, &saddr)) &&
1009 		    net_eq(tm_net(tm), net)) {
1010 			ret = tcp_metrics_fill_info(msg, tm);
1011 			break;
1012 		}
1013 	}
1014 	rcu_read_unlock();
1015 	if (ret < 0)
1016 		goto out_free;
1017 
1018 	genlmsg_end(msg, reply);
1019 	return genlmsg_reply(msg, info);
1020 
1021 nla_put_failure:
1022 	ret = -EMSGSIZE;
1023 
1024 out_free:
1025 	nlmsg_free(msg);
1026 	return ret;
1027 }
1028 
1029 static void tcp_metrics_flush_all(struct net *net)
1030 {
1031 	unsigned int max_rows = 1U << tcp_metrics_hash_log;
1032 	struct tcpm_hash_bucket *hb = tcp_metrics_hash;
1033 	struct tcp_metrics_block *tm;
1034 	unsigned int row;
1035 
1036 	for (row = 0; row < max_rows; row++, hb++) {
1037 		struct tcp_metrics_block __rcu **pp;
1038 		spin_lock_bh(&tcp_metrics_lock);
1039 		pp = &hb->chain;
1040 		for (tm = deref_locked(*pp); tm; tm = deref_locked(*pp)) {
1041 			if (net_eq(tm_net(tm), net)) {
1042 				*pp = tm->tcpm_next;
1043 				kfree_rcu(tm, rcu_head);
1044 			} else {
1045 				pp = &tm->tcpm_next;
1046 			}
1047 		}
1048 		spin_unlock_bh(&tcp_metrics_lock);
1049 	}
1050 }
1051 
1052 static int tcp_metrics_nl_cmd_del(struct sk_buff *skb, struct genl_info *info)
1053 {
1054 	struct tcpm_hash_bucket *hb;
1055 	struct tcp_metrics_block *tm;
1056 	struct tcp_metrics_block __rcu **pp;
1057 	struct inetpeer_addr saddr, daddr;
1058 	unsigned int hash;
1059 	struct net *net = genl_info_net(info);
1060 	int ret;
1061 	bool src = true, found = false;
1062 
1063 	ret = parse_nl_addr(info, &daddr, &hash, 1);
1064 	if (ret < 0)
1065 		return ret;
1066 	if (ret > 0) {
1067 		tcp_metrics_flush_all(net);
1068 		return 0;
1069 	}
1070 	ret = parse_nl_saddr(info, &saddr);
1071 	if (ret < 0)
1072 		src = false;
1073 
1074 	hash ^= net_hash_mix(net);
1075 	hash = hash_32(hash, tcp_metrics_hash_log);
1076 	hb = tcp_metrics_hash + hash;
1077 	pp = &hb->chain;
1078 	spin_lock_bh(&tcp_metrics_lock);
1079 	for (tm = deref_locked(*pp); tm; tm = deref_locked(*pp)) {
1080 		if (addr_same(&tm->tcpm_daddr, &daddr) &&
1081 		    (!src || addr_same(&tm->tcpm_saddr, &saddr)) &&
1082 		    net_eq(tm_net(tm), net)) {
1083 			*pp = tm->tcpm_next;
1084 			kfree_rcu(tm, rcu_head);
1085 			found = true;
1086 		} else {
1087 			pp = &tm->tcpm_next;
1088 		}
1089 	}
1090 	spin_unlock_bh(&tcp_metrics_lock);
1091 	if (!found)
1092 		return -ESRCH;
1093 	return 0;
1094 }
1095 
1096 static const struct genl_ops tcp_metrics_nl_ops[] = {
1097 	{
1098 		.cmd = TCP_METRICS_CMD_GET,
1099 		.doit = tcp_metrics_nl_cmd_get,
1100 		.dumpit = tcp_metrics_nl_dump,
1101 		.policy = tcp_metrics_nl_policy,
1102 	},
1103 	{
1104 		.cmd = TCP_METRICS_CMD_DEL,
1105 		.doit = tcp_metrics_nl_cmd_del,
1106 		.policy = tcp_metrics_nl_policy,
1107 		.flags = GENL_ADMIN_PERM,
1108 	},
1109 };
1110 
1111 static struct genl_family tcp_metrics_nl_family __ro_after_init = {
1112 	.hdrsize	= 0,
1113 	.name		= TCP_METRICS_GENL_NAME,
1114 	.version	= TCP_METRICS_GENL_VERSION,
1115 	.maxattr	= TCP_METRICS_ATTR_MAX,
1116 	.netnsok	= true,
1117 	.module		= THIS_MODULE,
1118 	.ops		= tcp_metrics_nl_ops,
1119 	.n_ops		= ARRAY_SIZE(tcp_metrics_nl_ops),
1120 };
1121 
1122 static unsigned int tcpmhash_entries;
1123 static int __init set_tcpmhash_entries(char *str)
1124 {
1125 	ssize_t ret;
1126 
1127 	if (!str)
1128 		return 0;
1129 
1130 	ret = kstrtouint(str, 0, &tcpmhash_entries);
1131 	if (ret)
1132 		return 0;
1133 
1134 	return 1;
1135 }
1136 __setup("tcpmhash_entries=", set_tcpmhash_entries);
1137 
1138 static int __net_init tcp_net_metrics_init(struct net *net)
1139 {
1140 	size_t size;
1141 	unsigned int slots;
1142 
1143 	if (!net_eq(net, &init_net))
1144 		return 0;
1145 
1146 	slots = tcpmhash_entries;
1147 	if (!slots) {
1148 		if (totalram_pages >= 128 * 1024)
1149 			slots = 16 * 1024;
1150 		else
1151 			slots = 8 * 1024;
1152 	}
1153 
1154 	tcp_metrics_hash_log = order_base_2(slots);
1155 	size = sizeof(struct tcpm_hash_bucket) << tcp_metrics_hash_log;
1156 
1157 	tcp_metrics_hash = kzalloc(size, GFP_KERNEL | __GFP_NOWARN);
1158 	if (!tcp_metrics_hash)
1159 		tcp_metrics_hash = vzalloc(size);
1160 
1161 	if (!tcp_metrics_hash)
1162 		return -ENOMEM;
1163 
1164 	return 0;
1165 }
1166 
1167 static void __net_exit tcp_net_metrics_exit(struct net *net)
1168 {
1169 	tcp_metrics_flush_all(net);
1170 }
1171 
1172 static __net_initdata struct pernet_operations tcp_net_metrics_ops = {
1173 	.init	=	tcp_net_metrics_init,
1174 	.exit	=	tcp_net_metrics_exit,
1175 };
1176 
1177 void __init tcp_metrics_init(void)
1178 {
1179 	int ret;
1180 
1181 	ret = register_pernet_subsys(&tcp_net_metrics_ops);
1182 	if (ret < 0)
1183 		panic("Could not allocate the tcp_metrics hash table\n");
1184 
1185 	ret = genl_register_family(&tcp_metrics_nl_family);
1186 	if (ret < 0)
1187 		panic("Could not register tcp_metrics generic netlink\n");
1188 }
1189