xref: /linux/net/core/net_namespace.c (revision 621cde16e49b3ecf7d59a8106a20aaebfb4a59a9)
1 // SPDX-License-Identifier: GPL-2.0-only
2 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
3 
4 #include <linux/workqueue.h>
5 #include <linux/rtnetlink.h>
6 #include <linux/cache.h>
7 #include <linux/slab.h>
8 #include <linux/list.h>
9 #include <linux/delay.h>
10 #include <linux/sched.h>
11 #include <linux/idr.h>
12 #include <linux/rculist.h>
13 #include <linux/nsproxy.h>
14 #include <linux/fs.h>
15 #include <linux/proc_ns.h>
16 #include <linux/file.h>
17 #include <linux/export.h>
18 #include <linux/user_namespace.h>
19 #include <linux/net_namespace.h>
20 #include <linux/sched/task.h>
21 #include <linux/uidgid.h>
22 #include <linux/cookie.h>
23 #include <linux/proc_fs.h>
24 
25 #include <net/sock.h>
26 #include <net/netlink.h>
27 #include <net/net_namespace.h>
28 #include <net/netns/generic.h>
29 
30 /*
31  *	Our network namespace constructor/destructor lists
32  */
33 
34 static LIST_HEAD(pernet_list);
35 static struct list_head *first_device = &pernet_list;
36 
37 LIST_HEAD(net_namespace_list);
38 EXPORT_SYMBOL_GPL(net_namespace_list);
39 
40 /* Protects net_namespace_list. Nests iside rtnl_lock() */
41 DECLARE_RWSEM(net_rwsem);
42 EXPORT_SYMBOL_GPL(net_rwsem);
43 
44 #ifdef CONFIG_KEYS
45 static struct key_tag init_net_key_domain = { .usage = REFCOUNT_INIT(1) };
46 #endif
47 
48 struct net init_net;
49 EXPORT_SYMBOL(init_net);
50 
51 static bool init_net_initialized;
52 /*
53  * pernet_ops_rwsem: protects: pernet_list, net_generic_ids,
54  * init_net_initialized and first_device pointer.
55  * This is internal net namespace object. Please, don't use it
56  * outside.
57  */
58 DECLARE_RWSEM(pernet_ops_rwsem);
59 EXPORT_SYMBOL_GPL(pernet_ops_rwsem);
60 
61 #define MIN_PERNET_OPS_ID	\
62 	((sizeof(struct net_generic) + sizeof(void *) - 1) / sizeof(void *))
63 
64 #define INITIAL_NET_GEN_PTRS	13 /* +1 for len +2 for rcu_head */
65 
66 static unsigned int max_gen_ptrs = INITIAL_NET_GEN_PTRS;
67 
68 DEFINE_COOKIE(net_cookie);
69 
net_alloc_generic(void)70 static struct net_generic *net_alloc_generic(void)
71 {
72 	unsigned int gen_ptrs = READ_ONCE(max_gen_ptrs);
73 	unsigned int generic_size;
74 	struct net_generic *ng;
75 
76 	generic_size = offsetof(struct net_generic, ptr[gen_ptrs]);
77 
78 	ng = kzalloc(generic_size, GFP_KERNEL);
79 	if (ng)
80 		ng->s.len = gen_ptrs;
81 
82 	return ng;
83 }
84 
net_assign_generic(struct net * net,unsigned int id,void * data)85 static int net_assign_generic(struct net *net, unsigned int id, void *data)
86 {
87 	struct net_generic *ng, *old_ng;
88 
89 	BUG_ON(id < MIN_PERNET_OPS_ID);
90 
91 	old_ng = rcu_dereference_protected(net->gen,
92 					   lockdep_is_held(&pernet_ops_rwsem));
93 	if (old_ng->s.len > id) {
94 		old_ng->ptr[id] = data;
95 		return 0;
96 	}
97 
98 	ng = net_alloc_generic();
99 	if (!ng)
100 		return -ENOMEM;
101 
102 	/*
103 	 * Some synchronisation notes:
104 	 *
105 	 * The net_generic explores the net->gen array inside rcu
106 	 * read section. Besides once set the net->gen->ptr[x]
107 	 * pointer never changes (see rules in netns/generic.h).
108 	 *
109 	 * That said, we simply duplicate this array and schedule
110 	 * the old copy for kfree after a grace period.
111 	 */
112 
113 	memcpy(&ng->ptr[MIN_PERNET_OPS_ID], &old_ng->ptr[MIN_PERNET_OPS_ID],
114 	       (old_ng->s.len - MIN_PERNET_OPS_ID) * sizeof(void *));
115 	ng->ptr[id] = data;
116 
117 	rcu_assign_pointer(net->gen, ng);
118 	kfree_rcu(old_ng, s.rcu);
119 	return 0;
120 }
121 
ops_init(const struct pernet_operations * ops,struct net * net)122 static int ops_init(const struct pernet_operations *ops, struct net *net)
123 {
124 	struct net_generic *ng;
125 	int err = -ENOMEM;
126 	void *data = NULL;
127 
128 	if (ops->id && ops->size) {
129 		data = kzalloc(ops->size, GFP_KERNEL);
130 		if (!data)
131 			goto out;
132 
133 		err = net_assign_generic(net, *ops->id, data);
134 		if (err)
135 			goto cleanup;
136 	}
137 	err = 0;
138 	if (ops->init)
139 		err = ops->init(net);
140 	if (!err)
141 		return 0;
142 
143 	if (ops->id && ops->size) {
144 		ng = rcu_dereference_protected(net->gen,
145 					       lockdep_is_held(&pernet_ops_rwsem));
146 		ng->ptr[*ops->id] = NULL;
147 	}
148 
149 cleanup:
150 	kfree(data);
151 
152 out:
153 	return err;
154 }
155 
ops_pre_exit_list(const struct pernet_operations * ops,struct list_head * net_exit_list)156 static void ops_pre_exit_list(const struct pernet_operations *ops,
157 			      struct list_head *net_exit_list)
158 {
159 	struct net *net;
160 
161 	if (ops->pre_exit) {
162 		list_for_each_entry(net, net_exit_list, exit_list)
163 			ops->pre_exit(net);
164 	}
165 }
166 
ops_exit_list(const struct pernet_operations * ops,struct list_head * net_exit_list)167 static void ops_exit_list(const struct pernet_operations *ops,
168 			  struct list_head *net_exit_list)
169 {
170 	struct net *net;
171 	if (ops->exit) {
172 		list_for_each_entry(net, net_exit_list, exit_list) {
173 			ops->exit(net);
174 			cond_resched();
175 		}
176 	}
177 	if (ops->exit_batch)
178 		ops->exit_batch(net_exit_list);
179 }
180 
ops_free_list(const struct pernet_operations * ops,struct list_head * net_exit_list)181 static void ops_free_list(const struct pernet_operations *ops,
182 			  struct list_head *net_exit_list)
183 {
184 	struct net *net;
185 	if (ops->size && ops->id) {
186 		list_for_each_entry(net, net_exit_list, exit_list)
187 			kfree(net_generic(net, *ops->id));
188 	}
189 }
190 
191 /* should be called with nsid_lock held */
alloc_netid(struct net * net,struct net * peer,int reqid)192 static int alloc_netid(struct net *net, struct net *peer, int reqid)
193 {
194 	int min = 0, max = 0;
195 
196 	if (reqid >= 0) {
197 		min = reqid;
198 		max = reqid + 1;
199 	}
200 
201 	return idr_alloc(&net->netns_ids, peer, min, max, GFP_ATOMIC);
202 }
203 
204 /* This function is used by idr_for_each(). If net is equal to peer, the
205  * function returns the id so that idr_for_each() stops. Because we cannot
206  * returns the id 0 (idr_for_each() will not stop), we return the magic value
207  * NET_ID_ZERO (-1) for it.
208  */
209 #define NET_ID_ZERO -1
net_eq_idr(int id,void * net,void * peer)210 static int net_eq_idr(int id, void *net, void *peer)
211 {
212 	if (net_eq(net, peer))
213 		return id ? : NET_ID_ZERO;
214 	return 0;
215 }
216 
217 /* Must be called from RCU-critical section or with nsid_lock held */
__peernet2id(const struct net * net,struct net * peer)218 static int __peernet2id(const struct net *net, struct net *peer)
219 {
220 	int id = idr_for_each(&net->netns_ids, net_eq_idr, peer);
221 
222 	/* Magic value for id 0. */
223 	if (id == NET_ID_ZERO)
224 		return 0;
225 	if (id > 0)
226 		return id;
227 
228 	return NETNSA_NSID_NOT_ASSIGNED;
229 }
230 
231 static void rtnl_net_notifyid(struct net *net, int cmd, int id, u32 portid,
232 			      struct nlmsghdr *nlh, gfp_t gfp);
233 /* This function returns the id of a peer netns. If no id is assigned, one will
234  * be allocated and returned.
235  */
peernet2id_alloc(struct net * net,struct net * peer,gfp_t gfp)236 int peernet2id_alloc(struct net *net, struct net *peer, gfp_t gfp)
237 {
238 	int id;
239 
240 	if (refcount_read(&net->ns.count) == 0)
241 		return NETNSA_NSID_NOT_ASSIGNED;
242 
243 	spin_lock_bh(&net->nsid_lock);
244 	id = __peernet2id(net, peer);
245 	if (id >= 0) {
246 		spin_unlock_bh(&net->nsid_lock);
247 		return id;
248 	}
249 
250 	/* When peer is obtained from RCU lists, we may race with
251 	 * its cleanup. Check whether it's alive, and this guarantees
252 	 * we never hash a peer back to net->netns_ids, after it has
253 	 * just been idr_remove()'d from there in cleanup_net().
254 	 */
255 	if (!maybe_get_net(peer)) {
256 		spin_unlock_bh(&net->nsid_lock);
257 		return NETNSA_NSID_NOT_ASSIGNED;
258 	}
259 
260 	id = alloc_netid(net, peer, -1);
261 	spin_unlock_bh(&net->nsid_lock);
262 
263 	put_net(peer);
264 	if (id < 0)
265 		return NETNSA_NSID_NOT_ASSIGNED;
266 
267 	rtnl_net_notifyid(net, RTM_NEWNSID, id, 0, NULL, gfp);
268 
269 	return id;
270 }
271 EXPORT_SYMBOL_GPL(peernet2id_alloc);
272 
273 /* This function returns, if assigned, the id of a peer netns. */
peernet2id(const struct net * net,struct net * peer)274 int peernet2id(const struct net *net, struct net *peer)
275 {
276 	int id;
277 
278 	rcu_read_lock();
279 	id = __peernet2id(net, peer);
280 	rcu_read_unlock();
281 
282 	return id;
283 }
284 EXPORT_SYMBOL(peernet2id);
285 
286 /* This function returns true is the peer netns has an id assigned into the
287  * current netns.
288  */
peernet_has_id(const struct net * net,struct net * peer)289 bool peernet_has_id(const struct net *net, struct net *peer)
290 {
291 	return peernet2id(net, peer) >= 0;
292 }
293 
get_net_ns_by_id(const struct net * net,int id)294 struct net *get_net_ns_by_id(const struct net *net, int id)
295 {
296 	struct net *peer;
297 
298 	if (id < 0)
299 		return NULL;
300 
301 	rcu_read_lock();
302 	peer = idr_find(&net->netns_ids, id);
303 	if (peer)
304 		peer = maybe_get_net(peer);
305 	rcu_read_unlock();
306 
307 	return peer;
308 }
309 EXPORT_SYMBOL_GPL(get_net_ns_by_id);
310 
311 /* init code that must occur even if setup_net() is not called. */
preinit_net(struct net * net)312 static __net_init void preinit_net(struct net *net)
313 {
314 	ref_tracker_dir_init(&net->notrefcnt_tracker, 128, "net notrefcnt");
315 }
316 
317 /*
318  * setup_net runs the initializers for the network namespace object.
319  */
setup_net(struct net * net,struct user_namespace * user_ns)320 static __net_init int setup_net(struct net *net, struct user_namespace *user_ns)
321 {
322 	/* Must be called with pernet_ops_rwsem held */
323 	const struct pernet_operations *ops, *saved_ops;
324 	LIST_HEAD(net_exit_list);
325 	LIST_HEAD(dev_kill_list);
326 	int error = 0;
327 
328 	refcount_set(&net->ns.count, 1);
329 	ref_tracker_dir_init(&net->refcnt_tracker, 128, "net refcnt");
330 
331 	refcount_set(&net->passive, 1);
332 	get_random_bytes(&net->hash_mix, sizeof(u32));
333 	preempt_disable();
334 	net->net_cookie = gen_cookie_next(&net_cookie);
335 	preempt_enable();
336 	net->dev_base_seq = 1;
337 	net->user_ns = user_ns;
338 	idr_init(&net->netns_ids);
339 	spin_lock_init(&net->nsid_lock);
340 	mutex_init(&net->ipv4.ra_mutex);
341 
342 	list_for_each_entry(ops, &pernet_list, list) {
343 		error = ops_init(ops, net);
344 		if (error < 0)
345 			goto out_undo;
346 	}
347 	down_write(&net_rwsem);
348 	list_add_tail_rcu(&net->list, &net_namespace_list);
349 	up_write(&net_rwsem);
350 out:
351 	return error;
352 
353 out_undo:
354 	/* Walk through the list backwards calling the exit functions
355 	 * for the pernet modules whose init functions did not fail.
356 	 */
357 	list_add(&net->exit_list, &net_exit_list);
358 	saved_ops = ops;
359 	list_for_each_entry_continue_reverse(ops, &pernet_list, list)
360 		ops_pre_exit_list(ops, &net_exit_list);
361 
362 	synchronize_rcu();
363 
364 	ops = saved_ops;
365 	rtnl_lock();
366 	list_for_each_entry_continue_reverse(ops, &pernet_list, list) {
367 		if (ops->exit_batch_rtnl)
368 			ops->exit_batch_rtnl(&net_exit_list, &dev_kill_list);
369 	}
370 	unregister_netdevice_many(&dev_kill_list);
371 	rtnl_unlock();
372 
373 	ops = saved_ops;
374 	list_for_each_entry_continue_reverse(ops, &pernet_list, list)
375 		ops_exit_list(ops, &net_exit_list);
376 
377 	ops = saved_ops;
378 	list_for_each_entry_continue_reverse(ops, &pernet_list, list)
379 		ops_free_list(ops, &net_exit_list);
380 
381 	rcu_barrier();
382 	goto out;
383 }
384 
net_defaults_init_net(struct net * net)385 static int __net_init net_defaults_init_net(struct net *net)
386 {
387 	net->core.sysctl_somaxconn = SOMAXCONN;
388 	/* Limits per socket sk_omem_alloc usage.
389 	 * TCP zerocopy regular usage needs 128 KB.
390 	 */
391 	net->core.sysctl_optmem_max = 128 * 1024;
392 	net->core.sysctl_txrehash = SOCK_TXREHASH_ENABLED;
393 
394 	return 0;
395 }
396 
397 static struct pernet_operations net_defaults_ops = {
398 	.init = net_defaults_init_net,
399 };
400 
net_defaults_init(void)401 static __init int net_defaults_init(void)
402 {
403 	if (register_pernet_subsys(&net_defaults_ops))
404 		panic("Cannot initialize net default settings");
405 
406 	return 0;
407 }
408 
409 core_initcall(net_defaults_init);
410 
411 #ifdef CONFIG_NET_NS
inc_net_namespaces(struct user_namespace * ns)412 static struct ucounts *inc_net_namespaces(struct user_namespace *ns)
413 {
414 	return inc_ucount(ns, current_euid(), UCOUNT_NET_NAMESPACES);
415 }
416 
dec_net_namespaces(struct ucounts * ucounts)417 static void dec_net_namespaces(struct ucounts *ucounts)
418 {
419 	dec_ucount(ucounts, UCOUNT_NET_NAMESPACES);
420 }
421 
422 static struct kmem_cache *net_cachep __ro_after_init;
423 static struct workqueue_struct *netns_wq;
424 
net_alloc(void)425 static struct net *net_alloc(void)
426 {
427 	struct net *net = NULL;
428 	struct net_generic *ng;
429 
430 	ng = net_alloc_generic();
431 	if (!ng)
432 		goto out;
433 
434 	net = kmem_cache_zalloc(net_cachep, GFP_KERNEL);
435 	if (!net)
436 		goto out_free;
437 
438 #ifdef CONFIG_KEYS
439 	net->key_domain = kzalloc(sizeof(struct key_tag), GFP_KERNEL);
440 	if (!net->key_domain)
441 		goto out_free_2;
442 	refcount_set(&net->key_domain->usage, 1);
443 #endif
444 
445 	rcu_assign_pointer(net->gen, ng);
446 out:
447 	return net;
448 
449 #ifdef CONFIG_KEYS
450 out_free_2:
451 	kmem_cache_free(net_cachep, net);
452 	net = NULL;
453 #endif
454 out_free:
455 	kfree(ng);
456 	goto out;
457 }
458 
net_free(struct net * net)459 static void net_free(struct net *net)
460 {
461 	if (refcount_dec_and_test(&net->passive)) {
462 		kfree(rcu_access_pointer(net->gen));
463 
464 		/* There should not be any trackers left there. */
465 		ref_tracker_dir_exit(&net->notrefcnt_tracker);
466 
467 		kmem_cache_free(net_cachep, net);
468 	}
469 }
470 
net_drop_ns(void * p)471 void net_drop_ns(void *p)
472 {
473 	struct net *net = (struct net *)p;
474 
475 	if (net)
476 		net_free(net);
477 }
478 
copy_net_ns(unsigned long flags,struct user_namespace * user_ns,struct net * old_net)479 struct net *copy_net_ns(unsigned long flags,
480 			struct user_namespace *user_ns, struct net *old_net)
481 {
482 	struct ucounts *ucounts;
483 	struct net *net;
484 	int rv;
485 
486 	if (!(flags & CLONE_NEWNET))
487 		return get_net(old_net);
488 
489 	ucounts = inc_net_namespaces(user_ns);
490 	if (!ucounts)
491 		return ERR_PTR(-ENOSPC);
492 
493 	net = net_alloc();
494 	if (!net) {
495 		rv = -ENOMEM;
496 		goto dec_ucounts;
497 	}
498 
499 	preinit_net(net);
500 	refcount_set(&net->passive, 1);
501 	net->ucounts = ucounts;
502 	get_user_ns(user_ns);
503 
504 	rv = down_read_killable(&pernet_ops_rwsem);
505 	if (rv < 0)
506 		goto put_userns;
507 
508 	rv = setup_net(net, user_ns);
509 
510 	up_read(&pernet_ops_rwsem);
511 
512 	if (rv < 0) {
513 put_userns:
514 #ifdef CONFIG_KEYS
515 		key_remove_domain(net->key_domain);
516 #endif
517 		put_user_ns(user_ns);
518 		net_free(net);
519 dec_ucounts:
520 		dec_net_namespaces(ucounts);
521 		return ERR_PTR(rv);
522 	}
523 	return net;
524 }
525 
526 /**
527  * net_ns_get_ownership - get sysfs ownership data for @net
528  * @net: network namespace in question (can be NULL)
529  * @uid: kernel user ID for sysfs objects
530  * @gid: kernel group ID for sysfs objects
531  *
532  * Returns the uid/gid pair of root in the user namespace associated with the
533  * given network namespace.
534  */
net_ns_get_ownership(const struct net * net,kuid_t * uid,kgid_t * gid)535 void net_ns_get_ownership(const struct net *net, kuid_t *uid, kgid_t *gid)
536 {
537 	if (net) {
538 		kuid_t ns_root_uid = make_kuid(net->user_ns, 0);
539 		kgid_t ns_root_gid = make_kgid(net->user_ns, 0);
540 
541 		if (uid_valid(ns_root_uid))
542 			*uid = ns_root_uid;
543 
544 		if (gid_valid(ns_root_gid))
545 			*gid = ns_root_gid;
546 	} else {
547 		*uid = GLOBAL_ROOT_UID;
548 		*gid = GLOBAL_ROOT_GID;
549 	}
550 }
551 EXPORT_SYMBOL_GPL(net_ns_get_ownership);
552 
unhash_nsid(struct net * net,struct net * last)553 static void unhash_nsid(struct net *net, struct net *last)
554 {
555 	struct net *tmp;
556 	/* This function is only called from cleanup_net() work,
557 	 * and this work is the only process, that may delete
558 	 * a net from net_namespace_list. So, when the below
559 	 * is executing, the list may only grow. Thus, we do not
560 	 * use for_each_net_rcu() or net_rwsem.
561 	 */
562 	for_each_net(tmp) {
563 		int id;
564 
565 		spin_lock_bh(&tmp->nsid_lock);
566 		id = __peernet2id(tmp, net);
567 		if (id >= 0)
568 			idr_remove(&tmp->netns_ids, id);
569 		spin_unlock_bh(&tmp->nsid_lock);
570 		if (id >= 0)
571 			rtnl_net_notifyid(tmp, RTM_DELNSID, id, 0, NULL,
572 					  GFP_KERNEL);
573 		if (tmp == last)
574 			break;
575 	}
576 	spin_lock_bh(&net->nsid_lock);
577 	idr_destroy(&net->netns_ids);
578 	spin_unlock_bh(&net->nsid_lock);
579 }
580 
581 static LLIST_HEAD(cleanup_list);
582 
cleanup_net(struct work_struct * work)583 static void cleanup_net(struct work_struct *work)
584 {
585 	const struct pernet_operations *ops;
586 	struct net *net, *tmp, *last;
587 	struct llist_node *net_kill_list;
588 	LIST_HEAD(net_exit_list);
589 	LIST_HEAD(dev_kill_list);
590 
591 	/* Atomically snapshot the list of namespaces to cleanup */
592 	net_kill_list = llist_del_all(&cleanup_list);
593 
594 	down_read(&pernet_ops_rwsem);
595 
596 	/* Don't let anyone else find us. */
597 	down_write(&net_rwsem);
598 	llist_for_each_entry(net, net_kill_list, cleanup_list)
599 		list_del_rcu(&net->list);
600 	/* Cache last net. After we unlock rtnl, no one new net
601 	 * added to net_namespace_list can assign nsid pointer
602 	 * to a net from net_kill_list (see peernet2id_alloc()).
603 	 * So, we skip them in unhash_nsid().
604 	 *
605 	 * Note, that unhash_nsid() does not delete nsid links
606 	 * between net_kill_list's nets, as they've already
607 	 * deleted from net_namespace_list. But, this would be
608 	 * useless anyway, as netns_ids are destroyed there.
609 	 */
610 	last = list_last_entry(&net_namespace_list, struct net, list);
611 	up_write(&net_rwsem);
612 
613 	llist_for_each_entry(net, net_kill_list, cleanup_list) {
614 		unhash_nsid(net, last);
615 		list_add_tail(&net->exit_list, &net_exit_list);
616 	}
617 
618 	/* Run all of the network namespace pre_exit methods */
619 	list_for_each_entry_reverse(ops, &pernet_list, list)
620 		ops_pre_exit_list(ops, &net_exit_list);
621 
622 	/*
623 	 * Another CPU might be rcu-iterating the list, wait for it.
624 	 * This needs to be before calling the exit() notifiers, so
625 	 * the rcu_barrier() below isn't sufficient alone.
626 	 * Also the pre_exit() and exit() methods need this barrier.
627 	 */
628 	synchronize_rcu_expedited();
629 
630 	rtnl_lock();
631 	list_for_each_entry_reverse(ops, &pernet_list, list) {
632 		if (ops->exit_batch_rtnl)
633 			ops->exit_batch_rtnl(&net_exit_list, &dev_kill_list);
634 	}
635 	unregister_netdevice_many(&dev_kill_list);
636 	rtnl_unlock();
637 
638 	/* Run all of the network namespace exit methods */
639 	list_for_each_entry_reverse(ops, &pernet_list, list)
640 		ops_exit_list(ops, &net_exit_list);
641 
642 	/* Free the net generic variables */
643 	list_for_each_entry_reverse(ops, &pernet_list, list)
644 		ops_free_list(ops, &net_exit_list);
645 
646 	up_read(&pernet_ops_rwsem);
647 
648 	/* Ensure there are no outstanding rcu callbacks using this
649 	 * network namespace.
650 	 */
651 	rcu_barrier();
652 
653 	/* Finally it is safe to free my network namespace structure */
654 	list_for_each_entry_safe(net, tmp, &net_exit_list, exit_list) {
655 		list_del_init(&net->exit_list);
656 		dec_net_namespaces(net->ucounts);
657 #ifdef CONFIG_KEYS
658 		key_remove_domain(net->key_domain);
659 #endif
660 		put_user_ns(net->user_ns);
661 		net_free(net);
662 	}
663 }
664 
665 /**
666  * net_ns_barrier - wait until concurrent net_cleanup_work is done
667  *
668  * cleanup_net runs from work queue and will first remove namespaces
669  * from the global list, then run net exit functions.
670  *
671  * Call this in module exit path to make sure that all netns
672  * ->exit ops have been invoked before the function is removed.
673  */
net_ns_barrier(void)674 void net_ns_barrier(void)
675 {
676 	down_write(&pernet_ops_rwsem);
677 	up_write(&pernet_ops_rwsem);
678 }
679 EXPORT_SYMBOL(net_ns_barrier);
680 
681 static DECLARE_WORK(net_cleanup_work, cleanup_net);
682 
__put_net(struct net * net)683 void __put_net(struct net *net)
684 {
685 	ref_tracker_dir_exit(&net->refcnt_tracker);
686 	/* Cleanup the network namespace in process context */
687 	if (llist_add(&net->cleanup_list, &cleanup_list))
688 		queue_work(netns_wq, &net_cleanup_work);
689 }
690 EXPORT_SYMBOL_GPL(__put_net);
691 
692 /**
693  * get_net_ns - increment the refcount of the network namespace
694  * @ns: common namespace (net)
695  *
696  * Returns the net's common namespace.
697  */
get_net_ns(struct ns_common * ns)698 struct ns_common *get_net_ns(struct ns_common *ns)
699 {
700 	return &get_net(container_of(ns, struct net, ns))->ns;
701 }
702 EXPORT_SYMBOL_GPL(get_net_ns);
703 
get_net_ns_by_fd(int fd)704 struct net *get_net_ns_by_fd(int fd)
705 {
706 	struct fd f = fdget(fd);
707 	struct net *net = ERR_PTR(-EINVAL);
708 
709 	if (!f.file)
710 		return ERR_PTR(-EBADF);
711 
712 	if (proc_ns_file(f.file)) {
713 		struct ns_common *ns = get_proc_ns(file_inode(f.file));
714 		if (ns->ops == &netns_operations)
715 			net = get_net(container_of(ns, struct net, ns));
716 	}
717 	fdput(f);
718 
719 	return net;
720 }
721 EXPORT_SYMBOL_GPL(get_net_ns_by_fd);
722 #endif
723 
get_net_ns_by_pid(pid_t pid)724 struct net *get_net_ns_by_pid(pid_t pid)
725 {
726 	struct task_struct *tsk;
727 	struct net *net;
728 
729 	/* Lookup the network namespace */
730 	net = ERR_PTR(-ESRCH);
731 	rcu_read_lock();
732 	tsk = find_task_by_vpid(pid);
733 	if (tsk) {
734 		struct nsproxy *nsproxy;
735 		task_lock(tsk);
736 		nsproxy = tsk->nsproxy;
737 		if (nsproxy)
738 			net = get_net(nsproxy->net_ns);
739 		task_unlock(tsk);
740 	}
741 	rcu_read_unlock();
742 	return net;
743 }
744 EXPORT_SYMBOL_GPL(get_net_ns_by_pid);
745 
net_ns_net_init(struct net * net)746 static __net_init int net_ns_net_init(struct net *net)
747 {
748 #ifdef CONFIG_NET_NS
749 	net->ns.ops = &netns_operations;
750 #endif
751 	return ns_alloc_inum(&net->ns);
752 }
753 
net_ns_net_exit(struct net * net)754 static __net_exit void net_ns_net_exit(struct net *net)
755 {
756 	ns_free_inum(&net->ns);
757 }
758 
759 static struct pernet_operations __net_initdata net_ns_ops = {
760 	.init = net_ns_net_init,
761 	.exit = net_ns_net_exit,
762 };
763 
764 static const struct nla_policy rtnl_net_policy[NETNSA_MAX + 1] = {
765 	[NETNSA_NONE]		= { .type = NLA_UNSPEC },
766 	[NETNSA_NSID]		= { .type = NLA_S32 },
767 	[NETNSA_PID]		= { .type = NLA_U32 },
768 	[NETNSA_FD]		= { .type = NLA_U32 },
769 	[NETNSA_TARGET_NSID]	= { .type = NLA_S32 },
770 };
771 
rtnl_net_newid(struct sk_buff * skb,struct nlmsghdr * nlh,struct netlink_ext_ack * extack)772 static int rtnl_net_newid(struct sk_buff *skb, struct nlmsghdr *nlh,
773 			  struct netlink_ext_ack *extack)
774 {
775 	struct net *net = sock_net(skb->sk);
776 	struct nlattr *tb[NETNSA_MAX + 1];
777 	struct nlattr *nla;
778 	struct net *peer;
779 	int nsid, err;
780 
781 	err = nlmsg_parse_deprecated(nlh, sizeof(struct rtgenmsg), tb,
782 				     NETNSA_MAX, rtnl_net_policy, extack);
783 	if (err < 0)
784 		return err;
785 	if (!tb[NETNSA_NSID]) {
786 		NL_SET_ERR_MSG(extack, "nsid is missing");
787 		return -EINVAL;
788 	}
789 	nsid = nla_get_s32(tb[NETNSA_NSID]);
790 
791 	if (tb[NETNSA_PID]) {
792 		peer = get_net_ns_by_pid(nla_get_u32(tb[NETNSA_PID]));
793 		nla = tb[NETNSA_PID];
794 	} else if (tb[NETNSA_FD]) {
795 		peer = get_net_ns_by_fd(nla_get_u32(tb[NETNSA_FD]));
796 		nla = tb[NETNSA_FD];
797 	} else {
798 		NL_SET_ERR_MSG(extack, "Peer netns reference is missing");
799 		return -EINVAL;
800 	}
801 	if (IS_ERR(peer)) {
802 		NL_SET_BAD_ATTR(extack, nla);
803 		NL_SET_ERR_MSG(extack, "Peer netns reference is invalid");
804 		return PTR_ERR(peer);
805 	}
806 
807 	spin_lock_bh(&net->nsid_lock);
808 	if (__peernet2id(net, peer) >= 0) {
809 		spin_unlock_bh(&net->nsid_lock);
810 		err = -EEXIST;
811 		NL_SET_BAD_ATTR(extack, nla);
812 		NL_SET_ERR_MSG(extack,
813 			       "Peer netns already has a nsid assigned");
814 		goto out;
815 	}
816 
817 	err = alloc_netid(net, peer, nsid);
818 	spin_unlock_bh(&net->nsid_lock);
819 	if (err >= 0) {
820 		rtnl_net_notifyid(net, RTM_NEWNSID, err, NETLINK_CB(skb).portid,
821 				  nlh, GFP_KERNEL);
822 		err = 0;
823 	} else if (err == -ENOSPC && nsid >= 0) {
824 		err = -EEXIST;
825 		NL_SET_BAD_ATTR(extack, tb[NETNSA_NSID]);
826 		NL_SET_ERR_MSG(extack, "The specified nsid is already used");
827 	}
828 out:
829 	put_net(peer);
830 	return err;
831 }
832 
rtnl_net_get_size(void)833 static int rtnl_net_get_size(void)
834 {
835 	return NLMSG_ALIGN(sizeof(struct rtgenmsg))
836 	       + nla_total_size(sizeof(s32)) /* NETNSA_NSID */
837 	       + nla_total_size(sizeof(s32)) /* NETNSA_CURRENT_NSID */
838 	       ;
839 }
840 
841 struct net_fill_args {
842 	u32 portid;
843 	u32 seq;
844 	int flags;
845 	int cmd;
846 	int nsid;
847 	bool add_ref;
848 	int ref_nsid;
849 };
850 
rtnl_net_fill(struct sk_buff * skb,struct net_fill_args * args)851 static int rtnl_net_fill(struct sk_buff *skb, struct net_fill_args *args)
852 {
853 	struct nlmsghdr *nlh;
854 	struct rtgenmsg *rth;
855 
856 	nlh = nlmsg_put(skb, args->portid, args->seq, args->cmd, sizeof(*rth),
857 			args->flags);
858 	if (!nlh)
859 		return -EMSGSIZE;
860 
861 	rth = nlmsg_data(nlh);
862 	rth->rtgen_family = AF_UNSPEC;
863 
864 	if (nla_put_s32(skb, NETNSA_NSID, args->nsid))
865 		goto nla_put_failure;
866 
867 	if (args->add_ref &&
868 	    nla_put_s32(skb, NETNSA_CURRENT_NSID, args->ref_nsid))
869 		goto nla_put_failure;
870 
871 	nlmsg_end(skb, nlh);
872 	return 0;
873 
874 nla_put_failure:
875 	nlmsg_cancel(skb, nlh);
876 	return -EMSGSIZE;
877 }
878 
rtnl_net_valid_getid_req(struct sk_buff * skb,const struct nlmsghdr * nlh,struct nlattr ** tb,struct netlink_ext_ack * extack)879 static int rtnl_net_valid_getid_req(struct sk_buff *skb,
880 				    const struct nlmsghdr *nlh,
881 				    struct nlattr **tb,
882 				    struct netlink_ext_ack *extack)
883 {
884 	int i, err;
885 
886 	if (!netlink_strict_get_check(skb))
887 		return nlmsg_parse_deprecated(nlh, sizeof(struct rtgenmsg),
888 					      tb, NETNSA_MAX, rtnl_net_policy,
889 					      extack);
890 
891 	err = nlmsg_parse_deprecated_strict(nlh, sizeof(struct rtgenmsg), tb,
892 					    NETNSA_MAX, rtnl_net_policy,
893 					    extack);
894 	if (err)
895 		return err;
896 
897 	for (i = 0; i <= NETNSA_MAX; i++) {
898 		if (!tb[i])
899 			continue;
900 
901 		switch (i) {
902 		case NETNSA_PID:
903 		case NETNSA_FD:
904 		case NETNSA_NSID:
905 		case NETNSA_TARGET_NSID:
906 			break;
907 		default:
908 			NL_SET_ERR_MSG(extack, "Unsupported attribute in peer netns getid request");
909 			return -EINVAL;
910 		}
911 	}
912 
913 	return 0;
914 }
915 
rtnl_net_getid(struct sk_buff * skb,struct nlmsghdr * nlh,struct netlink_ext_ack * extack)916 static int rtnl_net_getid(struct sk_buff *skb, struct nlmsghdr *nlh,
917 			  struct netlink_ext_ack *extack)
918 {
919 	struct net *net = sock_net(skb->sk);
920 	struct nlattr *tb[NETNSA_MAX + 1];
921 	struct net_fill_args fillargs = {
922 		.portid = NETLINK_CB(skb).portid,
923 		.seq = nlh->nlmsg_seq,
924 		.cmd = RTM_NEWNSID,
925 	};
926 	struct net *peer, *target = net;
927 	struct nlattr *nla;
928 	struct sk_buff *msg;
929 	int err;
930 
931 	err = rtnl_net_valid_getid_req(skb, nlh, tb, extack);
932 	if (err < 0)
933 		return err;
934 	if (tb[NETNSA_PID]) {
935 		peer = get_net_ns_by_pid(nla_get_u32(tb[NETNSA_PID]));
936 		nla = tb[NETNSA_PID];
937 	} else if (tb[NETNSA_FD]) {
938 		peer = get_net_ns_by_fd(nla_get_u32(tb[NETNSA_FD]));
939 		nla = tb[NETNSA_FD];
940 	} else if (tb[NETNSA_NSID]) {
941 		peer = get_net_ns_by_id(net, nla_get_s32(tb[NETNSA_NSID]));
942 		if (!peer)
943 			peer = ERR_PTR(-ENOENT);
944 		nla = tb[NETNSA_NSID];
945 	} else {
946 		NL_SET_ERR_MSG(extack, "Peer netns reference is missing");
947 		return -EINVAL;
948 	}
949 
950 	if (IS_ERR(peer)) {
951 		NL_SET_BAD_ATTR(extack, nla);
952 		NL_SET_ERR_MSG(extack, "Peer netns reference is invalid");
953 		return PTR_ERR(peer);
954 	}
955 
956 	if (tb[NETNSA_TARGET_NSID]) {
957 		int id = nla_get_s32(tb[NETNSA_TARGET_NSID]);
958 
959 		target = rtnl_get_net_ns_capable(NETLINK_CB(skb).sk, id);
960 		if (IS_ERR(target)) {
961 			NL_SET_BAD_ATTR(extack, tb[NETNSA_TARGET_NSID]);
962 			NL_SET_ERR_MSG(extack,
963 				       "Target netns reference is invalid");
964 			err = PTR_ERR(target);
965 			goto out;
966 		}
967 		fillargs.add_ref = true;
968 		fillargs.ref_nsid = peernet2id(net, peer);
969 	}
970 
971 	msg = nlmsg_new(rtnl_net_get_size(), GFP_KERNEL);
972 	if (!msg) {
973 		err = -ENOMEM;
974 		goto out;
975 	}
976 
977 	fillargs.nsid = peernet2id(target, peer);
978 	err = rtnl_net_fill(msg, &fillargs);
979 	if (err < 0)
980 		goto err_out;
981 
982 	err = rtnl_unicast(msg, net, NETLINK_CB(skb).portid);
983 	goto out;
984 
985 err_out:
986 	nlmsg_free(msg);
987 out:
988 	if (fillargs.add_ref)
989 		put_net(target);
990 	put_net(peer);
991 	return err;
992 }
993 
994 struct rtnl_net_dump_cb {
995 	struct net *tgt_net;
996 	struct net *ref_net;
997 	struct sk_buff *skb;
998 	struct net_fill_args fillargs;
999 	int idx;
1000 	int s_idx;
1001 };
1002 
1003 /* Runs in RCU-critical section. */
rtnl_net_dumpid_one(int id,void * peer,void * data)1004 static int rtnl_net_dumpid_one(int id, void *peer, void *data)
1005 {
1006 	struct rtnl_net_dump_cb *net_cb = (struct rtnl_net_dump_cb *)data;
1007 	int ret;
1008 
1009 	if (net_cb->idx < net_cb->s_idx)
1010 		goto cont;
1011 
1012 	net_cb->fillargs.nsid = id;
1013 	if (net_cb->fillargs.add_ref)
1014 		net_cb->fillargs.ref_nsid = __peernet2id(net_cb->ref_net, peer);
1015 	ret = rtnl_net_fill(net_cb->skb, &net_cb->fillargs);
1016 	if (ret < 0)
1017 		return ret;
1018 
1019 cont:
1020 	net_cb->idx++;
1021 	return 0;
1022 }
1023 
rtnl_valid_dump_net_req(const struct nlmsghdr * nlh,struct sock * sk,struct rtnl_net_dump_cb * net_cb,struct netlink_callback * cb)1024 static int rtnl_valid_dump_net_req(const struct nlmsghdr *nlh, struct sock *sk,
1025 				   struct rtnl_net_dump_cb *net_cb,
1026 				   struct netlink_callback *cb)
1027 {
1028 	struct netlink_ext_ack *extack = cb->extack;
1029 	struct nlattr *tb[NETNSA_MAX + 1];
1030 	int err, i;
1031 
1032 	err = nlmsg_parse_deprecated_strict(nlh, sizeof(struct rtgenmsg), tb,
1033 					    NETNSA_MAX, rtnl_net_policy,
1034 					    extack);
1035 	if (err < 0)
1036 		return err;
1037 
1038 	for (i = 0; i <= NETNSA_MAX; i++) {
1039 		if (!tb[i])
1040 			continue;
1041 
1042 		if (i == NETNSA_TARGET_NSID) {
1043 			struct net *net;
1044 
1045 			net = rtnl_get_net_ns_capable(sk, nla_get_s32(tb[i]));
1046 			if (IS_ERR(net)) {
1047 				NL_SET_BAD_ATTR(extack, tb[i]);
1048 				NL_SET_ERR_MSG(extack,
1049 					       "Invalid target network namespace id");
1050 				return PTR_ERR(net);
1051 			}
1052 			net_cb->fillargs.add_ref = true;
1053 			net_cb->ref_net = net_cb->tgt_net;
1054 			net_cb->tgt_net = net;
1055 		} else {
1056 			NL_SET_BAD_ATTR(extack, tb[i]);
1057 			NL_SET_ERR_MSG(extack,
1058 				       "Unsupported attribute in dump request");
1059 			return -EINVAL;
1060 		}
1061 	}
1062 
1063 	return 0;
1064 }
1065 
rtnl_net_dumpid(struct sk_buff * skb,struct netlink_callback * cb)1066 static int rtnl_net_dumpid(struct sk_buff *skb, struct netlink_callback *cb)
1067 {
1068 	struct rtnl_net_dump_cb net_cb = {
1069 		.tgt_net = sock_net(skb->sk),
1070 		.skb = skb,
1071 		.fillargs = {
1072 			.portid = NETLINK_CB(cb->skb).portid,
1073 			.seq = cb->nlh->nlmsg_seq,
1074 			.flags = NLM_F_MULTI,
1075 			.cmd = RTM_NEWNSID,
1076 		},
1077 		.idx = 0,
1078 		.s_idx = cb->args[0],
1079 	};
1080 	int err = 0;
1081 
1082 	if (cb->strict_check) {
1083 		err = rtnl_valid_dump_net_req(cb->nlh, skb->sk, &net_cb, cb);
1084 		if (err < 0)
1085 			goto end;
1086 	}
1087 
1088 	rcu_read_lock();
1089 	idr_for_each(&net_cb.tgt_net->netns_ids, rtnl_net_dumpid_one, &net_cb);
1090 	rcu_read_unlock();
1091 
1092 	cb->args[0] = net_cb.idx;
1093 end:
1094 	if (net_cb.fillargs.add_ref)
1095 		put_net(net_cb.tgt_net);
1096 	return err;
1097 }
1098 
rtnl_net_notifyid(struct net * net,int cmd,int id,u32 portid,struct nlmsghdr * nlh,gfp_t gfp)1099 static void rtnl_net_notifyid(struct net *net, int cmd, int id, u32 portid,
1100 			      struct nlmsghdr *nlh, gfp_t gfp)
1101 {
1102 	struct net_fill_args fillargs = {
1103 		.portid = portid,
1104 		.seq = nlh ? nlh->nlmsg_seq : 0,
1105 		.cmd = cmd,
1106 		.nsid = id,
1107 	};
1108 	struct sk_buff *msg;
1109 	int err = -ENOMEM;
1110 
1111 	msg = nlmsg_new(rtnl_net_get_size(), gfp);
1112 	if (!msg)
1113 		goto out;
1114 
1115 	err = rtnl_net_fill(msg, &fillargs);
1116 	if (err < 0)
1117 		goto err_out;
1118 
1119 	rtnl_notify(msg, net, portid, RTNLGRP_NSID, nlh, gfp);
1120 	return;
1121 
1122 err_out:
1123 	nlmsg_free(msg);
1124 out:
1125 	rtnl_set_sk_err(net, RTNLGRP_NSID, err);
1126 }
1127 
1128 #ifdef CONFIG_NET_NS
netns_ipv4_struct_check(void)1129 static void __init netns_ipv4_struct_check(void)
1130 {
1131 	/* TX readonly hotpath cache lines */
1132 	CACHELINE_ASSERT_GROUP_MEMBER(struct netns_ipv4, netns_ipv4_read_tx,
1133 				      sysctl_tcp_early_retrans);
1134 	CACHELINE_ASSERT_GROUP_MEMBER(struct netns_ipv4, netns_ipv4_read_tx,
1135 				      sysctl_tcp_tso_win_divisor);
1136 	CACHELINE_ASSERT_GROUP_MEMBER(struct netns_ipv4, netns_ipv4_read_tx,
1137 				      sysctl_tcp_tso_rtt_log);
1138 	CACHELINE_ASSERT_GROUP_MEMBER(struct netns_ipv4, netns_ipv4_read_tx,
1139 				      sysctl_tcp_autocorking);
1140 	CACHELINE_ASSERT_GROUP_MEMBER(struct netns_ipv4, netns_ipv4_read_tx,
1141 				      sysctl_tcp_min_snd_mss);
1142 	CACHELINE_ASSERT_GROUP_MEMBER(struct netns_ipv4, netns_ipv4_read_tx,
1143 				      sysctl_tcp_notsent_lowat);
1144 	CACHELINE_ASSERT_GROUP_MEMBER(struct netns_ipv4, netns_ipv4_read_tx,
1145 				      sysctl_tcp_limit_output_bytes);
1146 	CACHELINE_ASSERT_GROUP_MEMBER(struct netns_ipv4, netns_ipv4_read_tx,
1147 				      sysctl_tcp_min_rtt_wlen);
1148 	CACHELINE_ASSERT_GROUP_MEMBER(struct netns_ipv4, netns_ipv4_read_tx,
1149 				      sysctl_tcp_wmem);
1150 	CACHELINE_ASSERT_GROUP_MEMBER(struct netns_ipv4, netns_ipv4_read_tx,
1151 				      sysctl_ip_fwd_use_pmtu);
1152 	CACHELINE_ASSERT_GROUP_SIZE(struct netns_ipv4, netns_ipv4_read_tx, 33);
1153 
1154 	/* TXRX readonly hotpath cache lines */
1155 	CACHELINE_ASSERT_GROUP_MEMBER(struct netns_ipv4, netns_ipv4_read_txrx,
1156 				      sysctl_tcp_moderate_rcvbuf);
1157 	CACHELINE_ASSERT_GROUP_SIZE(struct netns_ipv4, netns_ipv4_read_txrx, 1);
1158 
1159 	/* RX readonly hotpath cache line */
1160 	CACHELINE_ASSERT_GROUP_MEMBER(struct netns_ipv4, netns_ipv4_read_rx,
1161 				      sysctl_ip_early_demux);
1162 	CACHELINE_ASSERT_GROUP_MEMBER(struct netns_ipv4, netns_ipv4_read_rx,
1163 				      sysctl_tcp_early_demux);
1164 	CACHELINE_ASSERT_GROUP_MEMBER(struct netns_ipv4, netns_ipv4_read_rx,
1165 				      sysctl_tcp_reordering);
1166 	CACHELINE_ASSERT_GROUP_MEMBER(struct netns_ipv4, netns_ipv4_read_rx,
1167 				      sysctl_tcp_rmem);
1168 	CACHELINE_ASSERT_GROUP_SIZE(struct netns_ipv4, netns_ipv4_read_rx, 18);
1169 }
1170 #endif
1171 
net_ns_init(void)1172 void __init net_ns_init(void)
1173 {
1174 	struct net_generic *ng;
1175 
1176 #ifdef CONFIG_NET_NS
1177 	netns_ipv4_struct_check();
1178 	net_cachep = kmem_cache_create("net_namespace", sizeof(struct net),
1179 					SMP_CACHE_BYTES,
1180 					SLAB_PANIC|SLAB_ACCOUNT, NULL);
1181 
1182 	/* Create workqueue for cleanup */
1183 	netns_wq = create_singlethread_workqueue("netns");
1184 	if (!netns_wq)
1185 		panic("Could not create netns workq");
1186 #endif
1187 
1188 	ng = net_alloc_generic();
1189 	if (!ng)
1190 		panic("Could not allocate generic netns");
1191 
1192 	rcu_assign_pointer(init_net.gen, ng);
1193 
1194 #ifdef CONFIG_KEYS
1195 	init_net.key_domain = &init_net_key_domain;
1196 #endif
1197 	down_write(&pernet_ops_rwsem);
1198 	preinit_net(&init_net);
1199 	if (setup_net(&init_net, &init_user_ns))
1200 		panic("Could not setup the initial network namespace");
1201 
1202 	init_net_initialized = true;
1203 	up_write(&pernet_ops_rwsem);
1204 
1205 	if (register_pernet_subsys(&net_ns_ops))
1206 		panic("Could not register network namespace subsystems");
1207 
1208 	rtnl_register(PF_UNSPEC, RTM_NEWNSID, rtnl_net_newid, NULL,
1209 		      RTNL_FLAG_DOIT_UNLOCKED);
1210 	rtnl_register(PF_UNSPEC, RTM_GETNSID, rtnl_net_getid, rtnl_net_dumpid,
1211 		      RTNL_FLAG_DOIT_UNLOCKED |
1212 		      RTNL_FLAG_DUMP_UNLOCKED);
1213 }
1214 
free_exit_list(struct pernet_operations * ops,struct list_head * net_exit_list)1215 static void free_exit_list(struct pernet_operations *ops, struct list_head *net_exit_list)
1216 {
1217 	ops_pre_exit_list(ops, net_exit_list);
1218 	synchronize_rcu();
1219 
1220 	if (ops->exit_batch_rtnl) {
1221 		LIST_HEAD(dev_kill_list);
1222 
1223 		rtnl_lock();
1224 		ops->exit_batch_rtnl(net_exit_list, &dev_kill_list);
1225 		unregister_netdevice_many(&dev_kill_list);
1226 		rtnl_unlock();
1227 	}
1228 	ops_exit_list(ops, net_exit_list);
1229 
1230 	ops_free_list(ops, net_exit_list);
1231 }
1232 
1233 #ifdef CONFIG_NET_NS
__register_pernet_operations(struct list_head * list,struct pernet_operations * ops)1234 static int __register_pernet_operations(struct list_head *list,
1235 					struct pernet_operations *ops)
1236 {
1237 	struct net *net;
1238 	int error;
1239 	LIST_HEAD(net_exit_list);
1240 
1241 	list_add_tail(&ops->list, list);
1242 	if (ops->init || (ops->id && ops->size)) {
1243 		/* We held write locked pernet_ops_rwsem, and parallel
1244 		 * setup_net() and cleanup_net() are not possible.
1245 		 */
1246 		for_each_net(net) {
1247 			error = ops_init(ops, net);
1248 			if (error)
1249 				goto out_undo;
1250 			list_add_tail(&net->exit_list, &net_exit_list);
1251 		}
1252 	}
1253 	return 0;
1254 
1255 out_undo:
1256 	/* If I have an error cleanup all namespaces I initialized */
1257 	list_del(&ops->list);
1258 	free_exit_list(ops, &net_exit_list);
1259 	return error;
1260 }
1261 
__unregister_pernet_operations(struct pernet_operations * ops)1262 static void __unregister_pernet_operations(struct pernet_operations *ops)
1263 {
1264 	struct net *net;
1265 	LIST_HEAD(net_exit_list);
1266 
1267 	list_del(&ops->list);
1268 	/* See comment in __register_pernet_operations() */
1269 	for_each_net(net)
1270 		list_add_tail(&net->exit_list, &net_exit_list);
1271 
1272 	free_exit_list(ops, &net_exit_list);
1273 }
1274 
1275 #else
1276 
__register_pernet_operations(struct list_head * list,struct pernet_operations * ops)1277 static int __register_pernet_operations(struct list_head *list,
1278 					struct pernet_operations *ops)
1279 {
1280 	if (!init_net_initialized) {
1281 		list_add_tail(&ops->list, list);
1282 		return 0;
1283 	}
1284 
1285 	return ops_init(ops, &init_net);
1286 }
1287 
__unregister_pernet_operations(struct pernet_operations * ops)1288 static void __unregister_pernet_operations(struct pernet_operations *ops)
1289 {
1290 	if (!init_net_initialized) {
1291 		list_del(&ops->list);
1292 	} else {
1293 		LIST_HEAD(net_exit_list);
1294 		list_add(&init_net.exit_list, &net_exit_list);
1295 		free_exit_list(ops, &net_exit_list);
1296 	}
1297 }
1298 
1299 #endif /* CONFIG_NET_NS */
1300 
1301 static DEFINE_IDA(net_generic_ids);
1302 
register_pernet_operations(struct list_head * list,struct pernet_operations * ops)1303 static int register_pernet_operations(struct list_head *list,
1304 				      struct pernet_operations *ops)
1305 {
1306 	int error;
1307 
1308 	if (ops->id) {
1309 		error = ida_alloc_min(&net_generic_ids, MIN_PERNET_OPS_ID,
1310 				GFP_KERNEL);
1311 		if (error < 0)
1312 			return error;
1313 		*ops->id = error;
1314 		/* This does not require READ_ONCE as writers already hold
1315 		 * pernet_ops_rwsem. But WRITE_ONCE is needed to protect
1316 		 * net_alloc_generic.
1317 		 */
1318 		WRITE_ONCE(max_gen_ptrs, max(max_gen_ptrs, *ops->id + 1));
1319 	}
1320 	error = __register_pernet_operations(list, ops);
1321 	if (error) {
1322 		rcu_barrier();
1323 		if (ops->id)
1324 			ida_free(&net_generic_ids, *ops->id);
1325 	}
1326 
1327 	return error;
1328 }
1329 
unregister_pernet_operations(struct pernet_operations * ops)1330 static void unregister_pernet_operations(struct pernet_operations *ops)
1331 {
1332 	__unregister_pernet_operations(ops);
1333 	rcu_barrier();
1334 	if (ops->id)
1335 		ida_free(&net_generic_ids, *ops->id);
1336 }
1337 
1338 /**
1339  *      register_pernet_subsys - register a network namespace subsystem
1340  *	@ops:  pernet operations structure for the subsystem
1341  *
1342  *	Register a subsystem which has init and exit functions
1343  *	that are called when network namespaces are created and
1344  *	destroyed respectively.
1345  *
1346  *	When registered all network namespace init functions are
1347  *	called for every existing network namespace.  Allowing kernel
1348  *	modules to have a race free view of the set of network namespaces.
1349  *
1350  *	When a new network namespace is created all of the init
1351  *	methods are called in the order in which they were registered.
1352  *
1353  *	When a network namespace is destroyed all of the exit methods
1354  *	are called in the reverse of the order with which they were
1355  *	registered.
1356  */
register_pernet_subsys(struct pernet_operations * ops)1357 int register_pernet_subsys(struct pernet_operations *ops)
1358 {
1359 	int error;
1360 	down_write(&pernet_ops_rwsem);
1361 	error =  register_pernet_operations(first_device, ops);
1362 	up_write(&pernet_ops_rwsem);
1363 	return error;
1364 }
1365 EXPORT_SYMBOL_GPL(register_pernet_subsys);
1366 
1367 /**
1368  *      unregister_pernet_subsys - unregister a network namespace subsystem
1369  *	@ops: pernet operations structure to manipulate
1370  *
1371  *	Remove the pernet operations structure from the list to be
1372  *	used when network namespaces are created or destroyed.  In
1373  *	addition run the exit method for all existing network
1374  *	namespaces.
1375  */
unregister_pernet_subsys(struct pernet_operations * ops)1376 void unregister_pernet_subsys(struct pernet_operations *ops)
1377 {
1378 	down_write(&pernet_ops_rwsem);
1379 	unregister_pernet_operations(ops);
1380 	up_write(&pernet_ops_rwsem);
1381 }
1382 EXPORT_SYMBOL_GPL(unregister_pernet_subsys);
1383 
1384 /**
1385  *      register_pernet_device - register a network namespace device
1386  *	@ops:  pernet operations structure for the subsystem
1387  *
1388  *	Register a device which has init and exit functions
1389  *	that are called when network namespaces are created and
1390  *	destroyed respectively.
1391  *
1392  *	When registered all network namespace init functions are
1393  *	called for every existing network namespace.  Allowing kernel
1394  *	modules to have a race free view of the set of network namespaces.
1395  *
1396  *	When a new network namespace is created all of the init
1397  *	methods are called in the order in which they were registered.
1398  *
1399  *	When a network namespace is destroyed all of the exit methods
1400  *	are called in the reverse of the order with which they were
1401  *	registered.
1402  */
register_pernet_device(struct pernet_operations * ops)1403 int register_pernet_device(struct pernet_operations *ops)
1404 {
1405 	int error;
1406 	down_write(&pernet_ops_rwsem);
1407 	error = register_pernet_operations(&pernet_list, ops);
1408 	if (!error && (first_device == &pernet_list))
1409 		first_device = &ops->list;
1410 	up_write(&pernet_ops_rwsem);
1411 	return error;
1412 }
1413 EXPORT_SYMBOL_GPL(register_pernet_device);
1414 
1415 /**
1416  *      unregister_pernet_device - unregister a network namespace netdevice
1417  *	@ops: pernet operations structure to manipulate
1418  *
1419  *	Remove the pernet operations structure from the list to be
1420  *	used when network namespaces are created or destroyed.  In
1421  *	addition run the exit method for all existing network
1422  *	namespaces.
1423  */
unregister_pernet_device(struct pernet_operations * ops)1424 void unregister_pernet_device(struct pernet_operations *ops)
1425 {
1426 	down_write(&pernet_ops_rwsem);
1427 	if (&ops->list == first_device)
1428 		first_device = first_device->next;
1429 	unregister_pernet_operations(ops);
1430 	up_write(&pernet_ops_rwsem);
1431 }
1432 EXPORT_SYMBOL_GPL(unregister_pernet_device);
1433 
1434 #ifdef CONFIG_NET_NS
netns_get(struct task_struct * task)1435 static struct ns_common *netns_get(struct task_struct *task)
1436 {
1437 	struct net *net = NULL;
1438 	struct nsproxy *nsproxy;
1439 
1440 	task_lock(task);
1441 	nsproxy = task->nsproxy;
1442 	if (nsproxy)
1443 		net = get_net(nsproxy->net_ns);
1444 	task_unlock(task);
1445 
1446 	return net ? &net->ns : NULL;
1447 }
1448 
to_net_ns(struct ns_common * ns)1449 static inline struct net *to_net_ns(struct ns_common *ns)
1450 {
1451 	return container_of(ns, struct net, ns);
1452 }
1453 
netns_put(struct ns_common * ns)1454 static void netns_put(struct ns_common *ns)
1455 {
1456 	put_net(to_net_ns(ns));
1457 }
1458 
netns_install(struct nsset * nsset,struct ns_common * ns)1459 static int netns_install(struct nsset *nsset, struct ns_common *ns)
1460 {
1461 	struct nsproxy *nsproxy = nsset->nsproxy;
1462 	struct net *net = to_net_ns(ns);
1463 
1464 	if (!ns_capable(net->user_ns, CAP_SYS_ADMIN) ||
1465 	    !ns_capable(nsset->cred->user_ns, CAP_SYS_ADMIN))
1466 		return -EPERM;
1467 
1468 	put_net(nsproxy->net_ns);
1469 	nsproxy->net_ns = get_net(net);
1470 	return 0;
1471 }
1472 
netns_owner(struct ns_common * ns)1473 static struct user_namespace *netns_owner(struct ns_common *ns)
1474 {
1475 	return to_net_ns(ns)->user_ns;
1476 }
1477 
1478 const struct proc_ns_operations netns_operations = {
1479 	.name		= "net",
1480 	.type		= CLONE_NEWNET,
1481 	.get		= netns_get,
1482 	.put		= netns_put,
1483 	.install	= netns_install,
1484 	.owner		= netns_owner,
1485 };
1486 #endif
1487