xref: /linux/net/sched/sch_fifo.c (revision 42874e4eb35bdfc54f8514685e50434098ba4f6c)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * net/sched/sch_fifo.c	The simplest FIFO queue.
4  *
5  * Authors:	Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
6  */
7 
8 #include <linux/module.h>
9 #include <linux/slab.h>
10 #include <linux/types.h>
11 #include <linux/kernel.h>
12 #include <linux/errno.h>
13 #include <linux/skbuff.h>
14 #include <net/pkt_sched.h>
15 #include <net/pkt_cls.h>
16 
17 /* 1 band FIFO pseudo-"scheduler" */
18 
19 static int bfifo_enqueue(struct sk_buff *skb, struct Qdisc *sch,
20 			 struct sk_buff **to_free)
21 {
22 	if (likely(sch->qstats.backlog + qdisc_pkt_len(skb) <= sch->limit))
23 		return qdisc_enqueue_tail(skb, sch);
24 
25 	return qdisc_drop(skb, sch, to_free);
26 }
27 
28 static int pfifo_enqueue(struct sk_buff *skb, struct Qdisc *sch,
29 			 struct sk_buff **to_free)
30 {
31 	if (likely(sch->q.qlen < sch->limit))
32 		return qdisc_enqueue_tail(skb, sch);
33 
34 	return qdisc_drop(skb, sch, to_free);
35 }
36 
37 static int pfifo_tail_enqueue(struct sk_buff *skb, struct Qdisc *sch,
38 			      struct sk_buff **to_free)
39 {
40 	unsigned int prev_backlog;
41 
42 	if (likely(sch->q.qlen < sch->limit))
43 		return qdisc_enqueue_tail(skb, sch);
44 
45 	prev_backlog = sch->qstats.backlog;
46 	/* queue full, remove one skb to fulfill the limit */
47 	__qdisc_queue_drop_head(sch, &sch->q, to_free);
48 	qdisc_qstats_drop(sch);
49 	qdisc_enqueue_tail(skb, sch);
50 
51 	qdisc_tree_reduce_backlog(sch, 0, prev_backlog - sch->qstats.backlog);
52 	return NET_XMIT_CN;
53 }
54 
55 static void fifo_offload_init(struct Qdisc *sch)
56 {
57 	struct net_device *dev = qdisc_dev(sch);
58 	struct tc_fifo_qopt_offload qopt;
59 
60 	if (!tc_can_offload(dev) || !dev->netdev_ops->ndo_setup_tc)
61 		return;
62 
63 	qopt.command = TC_FIFO_REPLACE;
64 	qopt.handle = sch->handle;
65 	qopt.parent = sch->parent;
66 	dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_QDISC_FIFO, &qopt);
67 }
68 
69 static void fifo_offload_destroy(struct Qdisc *sch)
70 {
71 	struct net_device *dev = qdisc_dev(sch);
72 	struct tc_fifo_qopt_offload qopt;
73 
74 	if (!tc_can_offload(dev) || !dev->netdev_ops->ndo_setup_tc)
75 		return;
76 
77 	qopt.command = TC_FIFO_DESTROY;
78 	qopt.handle = sch->handle;
79 	qopt.parent = sch->parent;
80 	dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_QDISC_FIFO, &qopt);
81 }
82 
83 static int fifo_offload_dump(struct Qdisc *sch)
84 {
85 	struct tc_fifo_qopt_offload qopt;
86 
87 	qopt.command = TC_FIFO_STATS;
88 	qopt.handle = sch->handle;
89 	qopt.parent = sch->parent;
90 	qopt.stats.bstats = &sch->bstats;
91 	qopt.stats.qstats = &sch->qstats;
92 
93 	return qdisc_offload_dump_helper(sch, TC_SETUP_QDISC_FIFO, &qopt);
94 }
95 
96 static int __fifo_init(struct Qdisc *sch, struct nlattr *opt,
97 		       struct netlink_ext_ack *extack)
98 {
99 	bool bypass;
100 	bool is_bfifo = sch->ops == &bfifo_qdisc_ops;
101 
102 	if (opt == NULL) {
103 		u32 limit = qdisc_dev(sch)->tx_queue_len;
104 
105 		if (is_bfifo)
106 			limit *= psched_mtu(qdisc_dev(sch));
107 
108 		sch->limit = limit;
109 	} else {
110 		struct tc_fifo_qopt *ctl = nla_data(opt);
111 
112 		if (nla_len(opt) < sizeof(*ctl))
113 			return -EINVAL;
114 
115 		sch->limit = ctl->limit;
116 	}
117 
118 	if (is_bfifo)
119 		bypass = sch->limit >= psched_mtu(qdisc_dev(sch));
120 	else
121 		bypass = sch->limit >= 1;
122 
123 	if (bypass)
124 		sch->flags |= TCQ_F_CAN_BYPASS;
125 	else
126 		sch->flags &= ~TCQ_F_CAN_BYPASS;
127 
128 	return 0;
129 }
130 
131 static int fifo_init(struct Qdisc *sch, struct nlattr *opt,
132 		     struct netlink_ext_ack *extack)
133 {
134 	int err;
135 
136 	err = __fifo_init(sch, opt, extack);
137 	if (err)
138 		return err;
139 
140 	fifo_offload_init(sch);
141 	return 0;
142 }
143 
144 static int fifo_hd_init(struct Qdisc *sch, struct nlattr *opt,
145 			struct netlink_ext_ack *extack)
146 {
147 	return __fifo_init(sch, opt, extack);
148 }
149 
150 static void fifo_destroy(struct Qdisc *sch)
151 {
152 	fifo_offload_destroy(sch);
153 }
154 
155 static int __fifo_dump(struct Qdisc *sch, struct sk_buff *skb)
156 {
157 	struct tc_fifo_qopt opt = { .limit = sch->limit };
158 
159 	if (nla_put(skb, TCA_OPTIONS, sizeof(opt), &opt))
160 		goto nla_put_failure;
161 	return skb->len;
162 
163 nla_put_failure:
164 	return -1;
165 }
166 
167 static int fifo_dump(struct Qdisc *sch, struct sk_buff *skb)
168 {
169 	int err;
170 
171 	err = fifo_offload_dump(sch);
172 	if (err)
173 		return err;
174 
175 	return __fifo_dump(sch, skb);
176 }
177 
178 static int fifo_hd_dump(struct Qdisc *sch, struct sk_buff *skb)
179 {
180 	return __fifo_dump(sch, skb);
181 }
182 
183 struct Qdisc_ops pfifo_qdisc_ops __read_mostly = {
184 	.id		=	"pfifo",
185 	.priv_size	=	0,
186 	.enqueue	=	pfifo_enqueue,
187 	.dequeue	=	qdisc_dequeue_head,
188 	.peek		=	qdisc_peek_head,
189 	.init		=	fifo_init,
190 	.destroy	=	fifo_destroy,
191 	.reset		=	qdisc_reset_queue,
192 	.change		=	fifo_init,
193 	.dump		=	fifo_dump,
194 	.owner		=	THIS_MODULE,
195 };
196 EXPORT_SYMBOL(pfifo_qdisc_ops);
197 
198 struct Qdisc_ops bfifo_qdisc_ops __read_mostly = {
199 	.id		=	"bfifo",
200 	.priv_size	=	0,
201 	.enqueue	=	bfifo_enqueue,
202 	.dequeue	=	qdisc_dequeue_head,
203 	.peek		=	qdisc_peek_head,
204 	.init		=	fifo_init,
205 	.destroy	=	fifo_destroy,
206 	.reset		=	qdisc_reset_queue,
207 	.change		=	fifo_init,
208 	.dump		=	fifo_dump,
209 	.owner		=	THIS_MODULE,
210 };
211 EXPORT_SYMBOL(bfifo_qdisc_ops);
212 
213 struct Qdisc_ops pfifo_head_drop_qdisc_ops __read_mostly = {
214 	.id		=	"pfifo_head_drop",
215 	.priv_size	=	0,
216 	.enqueue	=	pfifo_tail_enqueue,
217 	.dequeue	=	qdisc_dequeue_head,
218 	.peek		=	qdisc_peek_head,
219 	.init		=	fifo_hd_init,
220 	.reset		=	qdisc_reset_queue,
221 	.change		=	fifo_hd_init,
222 	.dump		=	fifo_hd_dump,
223 	.owner		=	THIS_MODULE,
224 };
225 
226 /* Pass size change message down to embedded FIFO */
227 int fifo_set_limit(struct Qdisc *q, unsigned int limit)
228 {
229 	struct nlattr *nla;
230 	int ret = -ENOMEM;
231 
232 	/* Hack to avoid sending change message to non-FIFO */
233 	if (strncmp(q->ops->id + 1, "fifo", 4) != 0)
234 		return 0;
235 
236 	if (!q->ops->change)
237 		return 0;
238 
239 	nla = kmalloc(nla_attr_size(sizeof(struct tc_fifo_qopt)), GFP_KERNEL);
240 	if (nla) {
241 		nla->nla_type = RTM_NEWQDISC;
242 		nla->nla_len = nla_attr_size(sizeof(struct tc_fifo_qopt));
243 		((struct tc_fifo_qopt *)nla_data(nla))->limit = limit;
244 
245 		ret = q->ops->change(q, nla, NULL);
246 		kfree(nla);
247 	}
248 	return ret;
249 }
250 EXPORT_SYMBOL(fifo_set_limit);
251 
252 struct Qdisc *fifo_create_dflt(struct Qdisc *sch, struct Qdisc_ops *ops,
253 			       unsigned int limit,
254 			       struct netlink_ext_ack *extack)
255 {
256 	struct Qdisc *q;
257 	int err = -ENOMEM;
258 
259 	q = qdisc_create_dflt(sch->dev_queue, ops, TC_H_MAKE(sch->handle, 1),
260 			      extack);
261 	if (q) {
262 		err = fifo_set_limit(q, limit);
263 		if (err < 0) {
264 			qdisc_put(q);
265 			q = NULL;
266 		}
267 	}
268 
269 	return q ? : ERR_PTR(err);
270 }
271 EXPORT_SYMBOL(fifo_create_dflt);
272 MODULE_DESCRIPTION("Single queue packet and byte based First In First Out(P/BFIFO) scheduler");
273