xref: /linux/net/sched/cls_cgroup.c (revision e9fb13bfec7e017130ddc5c1b5466340470f4900)
1 /*
2  * net/sched/cls_cgroup.c	Control Group Classifier
3  *
4  *		This program is free software; you can redistribute it and/or
5  *		modify it under the terms of the GNU General Public License
6  *		as published by the Free Software Foundation; either version
7  *		2 of the License, or (at your option) any later version.
8  *
9  * Authors:	Thomas Graf <tgraf@suug.ch>
10  */
11 
12 #include <linux/module.h>
13 #include <linux/slab.h>
14 #include <linux/types.h>
15 #include <linux/string.h>
16 #include <linux/errno.h>
17 #include <linux/skbuff.h>
18 #include <linux/cgroup.h>
19 #include <linux/rcupdate.h>
20 #include <net/rtnetlink.h>
21 #include <net/pkt_cls.h>
22 #include <net/sock.h>
23 #include <net/cls_cgroup.h>
24 
25 static struct cgroup_subsys_state *cgrp_create(struct cgroup_subsys *ss,
26 					       struct cgroup *cgrp);
27 static void cgrp_destroy(struct cgroup_subsys *ss, struct cgroup *cgrp);
28 static int cgrp_populate(struct cgroup_subsys *ss, struct cgroup *cgrp);
29 
30 struct cgroup_subsys net_cls_subsys = {
31 	.name		= "net_cls",
32 	.create		= cgrp_create,
33 	.destroy	= cgrp_destroy,
34 	.populate	= cgrp_populate,
35 #ifdef CONFIG_NET_CLS_CGROUP
36 	.subsys_id	= net_cls_subsys_id,
37 #endif
38 	.module		= THIS_MODULE,
39 };
40 
41 
42 static inline struct cgroup_cls_state *cgrp_cls_state(struct cgroup *cgrp)
43 {
44 	return container_of(cgroup_subsys_state(cgrp, net_cls_subsys_id),
45 			    struct cgroup_cls_state, css);
46 }
47 
48 static inline struct cgroup_cls_state *task_cls_state(struct task_struct *p)
49 {
50 	return container_of(task_subsys_state(p, net_cls_subsys_id),
51 			    struct cgroup_cls_state, css);
52 }
53 
54 static struct cgroup_subsys_state *cgrp_create(struct cgroup_subsys *ss,
55 						 struct cgroup *cgrp)
56 {
57 	struct cgroup_cls_state *cs;
58 
59 	cs = kzalloc(sizeof(*cs), GFP_KERNEL);
60 	if (!cs)
61 		return ERR_PTR(-ENOMEM);
62 
63 	if (cgrp->parent)
64 		cs->classid = cgrp_cls_state(cgrp->parent)->classid;
65 
66 	return &cs->css;
67 }
68 
69 static void cgrp_destroy(struct cgroup_subsys *ss, struct cgroup *cgrp)
70 {
71 	kfree(cgrp_cls_state(cgrp));
72 }
73 
74 static u64 read_classid(struct cgroup *cgrp, struct cftype *cft)
75 {
76 	return cgrp_cls_state(cgrp)->classid;
77 }
78 
79 static int write_classid(struct cgroup *cgrp, struct cftype *cft, u64 value)
80 {
81 	cgrp_cls_state(cgrp)->classid = (u32) value;
82 	return 0;
83 }
84 
85 static struct cftype ss_files[] = {
86 	{
87 		.name = "classid",
88 		.read_u64 = read_classid,
89 		.write_u64 = write_classid,
90 	},
91 };
92 
93 static int cgrp_populate(struct cgroup_subsys *ss, struct cgroup *cgrp)
94 {
95 	return cgroup_add_files(cgrp, ss, ss_files, ARRAY_SIZE(ss_files));
96 }
97 
98 struct cls_cgroup_head {
99 	u32			handle;
100 	struct tcf_exts		exts;
101 	struct tcf_ematch_tree	ematches;
102 };
103 
104 static int cls_cgroup_classify(struct sk_buff *skb, struct tcf_proto *tp,
105 			       struct tcf_result *res)
106 {
107 	struct cls_cgroup_head *head = tp->root;
108 	u32 classid;
109 
110 	rcu_read_lock();
111 	classid = task_cls_state(current)->classid;
112 	rcu_read_unlock();
113 
114 	/*
115 	 * Due to the nature of the classifier it is required to ignore all
116 	 * packets originating from softirq context as accessing `current'
117 	 * would lead to false results.
118 	 *
119 	 * This test assumes that all callers of dev_queue_xmit() explicitely
120 	 * disable bh. Knowing this, it is possible to detect softirq based
121 	 * calls by looking at the number of nested bh disable calls because
122 	 * softirqs always disables bh.
123 	 */
124 	if (in_serving_softirq()) {
125 		/* If there is an sk_classid we'll use that. */
126 		if (!skb->sk)
127 			return -1;
128 		classid = skb->sk->sk_classid;
129 	}
130 
131 	if (!classid)
132 		return -1;
133 
134 	if (!tcf_em_tree_match(skb, &head->ematches, NULL))
135 		return -1;
136 
137 	res->classid = classid;
138 	res->class = 0;
139 	return tcf_exts_exec(skb, &head->exts, res);
140 }
141 
142 static unsigned long cls_cgroup_get(struct tcf_proto *tp, u32 handle)
143 {
144 	return 0UL;
145 }
146 
147 static void cls_cgroup_put(struct tcf_proto *tp, unsigned long f)
148 {
149 }
150 
151 static int cls_cgroup_init(struct tcf_proto *tp)
152 {
153 	return 0;
154 }
155 
156 static const struct tcf_ext_map cgroup_ext_map = {
157 	.action = TCA_CGROUP_ACT,
158 	.police = TCA_CGROUP_POLICE,
159 };
160 
161 static const struct nla_policy cgroup_policy[TCA_CGROUP_MAX + 1] = {
162 	[TCA_CGROUP_EMATCHES]	= { .type = NLA_NESTED },
163 };
164 
165 static int cls_cgroup_change(struct tcf_proto *tp, unsigned long base,
166 			     u32 handle, struct nlattr **tca,
167 			     unsigned long *arg)
168 {
169 	struct nlattr *tb[TCA_CGROUP_MAX + 1];
170 	struct cls_cgroup_head *head = tp->root;
171 	struct tcf_ematch_tree t;
172 	struct tcf_exts e;
173 	int err;
174 
175 	if (!tca[TCA_OPTIONS])
176 		return -EINVAL;
177 
178 	if (head == NULL) {
179 		if (!handle)
180 			return -EINVAL;
181 
182 		head = kzalloc(sizeof(*head), GFP_KERNEL);
183 		if (head == NULL)
184 			return -ENOBUFS;
185 
186 		head->handle = handle;
187 
188 		tcf_tree_lock(tp);
189 		tp->root = head;
190 		tcf_tree_unlock(tp);
191 	}
192 
193 	if (handle != head->handle)
194 		return -ENOENT;
195 
196 	err = nla_parse_nested(tb, TCA_CGROUP_MAX, tca[TCA_OPTIONS],
197 			       cgroup_policy);
198 	if (err < 0)
199 		return err;
200 
201 	err = tcf_exts_validate(tp, tb, tca[TCA_RATE], &e, &cgroup_ext_map);
202 	if (err < 0)
203 		return err;
204 
205 	err = tcf_em_tree_validate(tp, tb[TCA_CGROUP_EMATCHES], &t);
206 	if (err < 0)
207 		return err;
208 
209 	tcf_exts_change(tp, &head->exts, &e);
210 	tcf_em_tree_change(tp, &head->ematches, &t);
211 
212 	return 0;
213 }
214 
215 static void cls_cgroup_destroy(struct tcf_proto *tp)
216 {
217 	struct cls_cgroup_head *head = tp->root;
218 
219 	if (head) {
220 		tcf_exts_destroy(tp, &head->exts);
221 		tcf_em_tree_destroy(tp, &head->ematches);
222 		kfree(head);
223 	}
224 }
225 
226 static int cls_cgroup_delete(struct tcf_proto *tp, unsigned long arg)
227 {
228 	return -EOPNOTSUPP;
229 }
230 
231 static void cls_cgroup_walk(struct tcf_proto *tp, struct tcf_walker *arg)
232 {
233 	struct cls_cgroup_head *head = tp->root;
234 
235 	if (arg->count < arg->skip)
236 		goto skip;
237 
238 	if (arg->fn(tp, (unsigned long) head, arg) < 0) {
239 		arg->stop = 1;
240 		return;
241 	}
242 skip:
243 	arg->count++;
244 }
245 
246 static int cls_cgroup_dump(struct tcf_proto *tp, unsigned long fh,
247 			   struct sk_buff *skb, struct tcmsg *t)
248 {
249 	struct cls_cgroup_head *head = tp->root;
250 	unsigned char *b = skb_tail_pointer(skb);
251 	struct nlattr *nest;
252 
253 	t->tcm_handle = head->handle;
254 
255 	nest = nla_nest_start(skb, TCA_OPTIONS);
256 	if (nest == NULL)
257 		goto nla_put_failure;
258 
259 	if (tcf_exts_dump(skb, &head->exts, &cgroup_ext_map) < 0 ||
260 	    tcf_em_tree_dump(skb, &head->ematches, TCA_CGROUP_EMATCHES) < 0)
261 		goto nla_put_failure;
262 
263 	nla_nest_end(skb, nest);
264 
265 	if (tcf_exts_dump_stats(skb, &head->exts, &cgroup_ext_map) < 0)
266 		goto nla_put_failure;
267 
268 	return skb->len;
269 
270 nla_put_failure:
271 	nlmsg_trim(skb, b);
272 	return -1;
273 }
274 
275 static struct tcf_proto_ops cls_cgroup_ops __read_mostly = {
276 	.kind		=	"cgroup",
277 	.init		=	cls_cgroup_init,
278 	.change		=	cls_cgroup_change,
279 	.classify	=	cls_cgroup_classify,
280 	.destroy	=	cls_cgroup_destroy,
281 	.get		=	cls_cgroup_get,
282 	.put		=	cls_cgroup_put,
283 	.delete		=	cls_cgroup_delete,
284 	.walk		=	cls_cgroup_walk,
285 	.dump		=	cls_cgroup_dump,
286 	.owner		=	THIS_MODULE,
287 };
288 
289 static int __init init_cgroup_cls(void)
290 {
291 	int ret;
292 
293 	ret = cgroup_load_subsys(&net_cls_subsys);
294 	if (ret)
295 		goto out;
296 
297 #ifndef CONFIG_NET_CLS_CGROUP
298 	/* We can't use rcu_assign_pointer because this is an int. */
299 	smp_wmb();
300 	net_cls_subsys_id = net_cls_subsys.subsys_id;
301 #endif
302 
303 	ret = register_tcf_proto_ops(&cls_cgroup_ops);
304 	if (ret)
305 		cgroup_unload_subsys(&net_cls_subsys);
306 
307 out:
308 	return ret;
309 }
310 
311 static void __exit exit_cgroup_cls(void)
312 {
313 	unregister_tcf_proto_ops(&cls_cgroup_ops);
314 
315 #ifndef CONFIG_NET_CLS_CGROUP
316 	net_cls_subsys_id = -1;
317 	synchronize_rcu();
318 #endif
319 
320 	cgroup_unload_subsys(&net_cls_subsys);
321 }
322 
323 module_init(init_cgroup_cls);
324 module_exit(exit_cgroup_cls);
325 MODULE_LICENSE("GPL");
326