xref: /linux/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c (revision 3bdab16c55f57a24245c97d707241dd9b48d1a91)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (c) 2013-2018, The Linux Foundation. All rights reserved.
3  *
4  * RMNET Data virtual network driver
5  */
6 
7 #include <linux/etherdevice.h>
8 #include <linux/if_arp.h>
9 #include <net/pkt_sched.h>
10 #include "rmnet_config.h"
11 #include "rmnet_handlers.h"
12 #include "rmnet_private.h"
13 #include "rmnet_map.h"
14 #include "rmnet_vnd.h"
15 
16 /* RX/TX Fixup */
17 
18 void rmnet_vnd_rx_fixup(struct sk_buff *skb, struct net_device *dev)
19 {
20 	struct rmnet_priv *priv = netdev_priv(dev);
21 	struct rmnet_pcpu_stats *pcpu_ptr;
22 
23 	pcpu_ptr = this_cpu_ptr(priv->pcpu_stats);
24 
25 	u64_stats_update_begin(&pcpu_ptr->syncp);
26 	pcpu_ptr->stats.rx_pkts++;
27 	pcpu_ptr->stats.rx_bytes += skb->len;
28 	u64_stats_update_end(&pcpu_ptr->syncp);
29 }
30 
31 void rmnet_vnd_tx_fixup(struct sk_buff *skb, struct net_device *dev)
32 {
33 	struct rmnet_priv *priv = netdev_priv(dev);
34 	struct rmnet_pcpu_stats *pcpu_ptr;
35 
36 	pcpu_ptr = this_cpu_ptr(priv->pcpu_stats);
37 
38 	u64_stats_update_begin(&pcpu_ptr->syncp);
39 	pcpu_ptr->stats.tx_pkts++;
40 	pcpu_ptr->stats.tx_bytes += skb->len;
41 	u64_stats_update_end(&pcpu_ptr->syncp);
42 }
43 
44 /* Network Device Operations */
45 
46 static netdev_tx_t rmnet_vnd_start_xmit(struct sk_buff *skb,
47 					struct net_device *dev)
48 {
49 	struct rmnet_priv *priv;
50 
51 	priv = netdev_priv(dev);
52 	if (priv->real_dev) {
53 		rmnet_egress_handler(skb);
54 	} else {
55 		this_cpu_inc(priv->pcpu_stats->stats.tx_drops);
56 		kfree_skb(skb);
57 	}
58 	return NETDEV_TX_OK;
59 }
60 
61 static int rmnet_vnd_change_mtu(struct net_device *rmnet_dev, int new_mtu)
62 {
63 	if (new_mtu < 0 || new_mtu > RMNET_MAX_PACKET_SIZE)
64 		return -EINVAL;
65 
66 	rmnet_dev->mtu = new_mtu;
67 	return 0;
68 }
69 
70 static int rmnet_vnd_get_iflink(const struct net_device *dev)
71 {
72 	struct rmnet_priv *priv = netdev_priv(dev);
73 
74 	return priv->real_dev->ifindex;
75 }
76 
77 static int rmnet_vnd_init(struct net_device *dev)
78 {
79 	struct rmnet_priv *priv = netdev_priv(dev);
80 	int err;
81 
82 	priv->pcpu_stats = alloc_percpu(struct rmnet_pcpu_stats);
83 	if (!priv->pcpu_stats)
84 		return -ENOMEM;
85 
86 	err = gro_cells_init(&priv->gro_cells, dev);
87 	if (err) {
88 		free_percpu(priv->pcpu_stats);
89 		return err;
90 	}
91 
92 	return 0;
93 }
94 
95 static void rmnet_vnd_uninit(struct net_device *dev)
96 {
97 	struct rmnet_priv *priv = netdev_priv(dev);
98 
99 	gro_cells_destroy(&priv->gro_cells);
100 	free_percpu(priv->pcpu_stats);
101 }
102 
103 static void rmnet_get_stats64(struct net_device *dev,
104 			      struct rtnl_link_stats64 *s)
105 {
106 	struct rmnet_priv *priv = netdev_priv(dev);
107 	struct rmnet_vnd_stats total_stats;
108 	struct rmnet_pcpu_stats *pcpu_ptr;
109 	unsigned int cpu, start;
110 
111 	memset(&total_stats, 0, sizeof(struct rmnet_vnd_stats));
112 
113 	for_each_possible_cpu(cpu) {
114 		pcpu_ptr = per_cpu_ptr(priv->pcpu_stats, cpu);
115 
116 		do {
117 			start = u64_stats_fetch_begin_irq(&pcpu_ptr->syncp);
118 			total_stats.rx_pkts += pcpu_ptr->stats.rx_pkts;
119 			total_stats.rx_bytes += pcpu_ptr->stats.rx_bytes;
120 			total_stats.tx_pkts += pcpu_ptr->stats.tx_pkts;
121 			total_stats.tx_bytes += pcpu_ptr->stats.tx_bytes;
122 		} while (u64_stats_fetch_retry_irq(&pcpu_ptr->syncp, start));
123 
124 		total_stats.tx_drops += pcpu_ptr->stats.tx_drops;
125 	}
126 
127 	s->rx_packets = total_stats.rx_pkts;
128 	s->rx_bytes = total_stats.rx_bytes;
129 	s->tx_packets = total_stats.tx_pkts;
130 	s->tx_bytes = total_stats.tx_bytes;
131 	s->tx_dropped = total_stats.tx_drops;
132 }
133 
134 static const struct net_device_ops rmnet_vnd_ops = {
135 	.ndo_start_xmit = rmnet_vnd_start_xmit,
136 	.ndo_change_mtu = rmnet_vnd_change_mtu,
137 	.ndo_get_iflink = rmnet_vnd_get_iflink,
138 	.ndo_add_slave  = rmnet_add_bridge,
139 	.ndo_del_slave  = rmnet_del_bridge,
140 	.ndo_init       = rmnet_vnd_init,
141 	.ndo_uninit     = rmnet_vnd_uninit,
142 	.ndo_get_stats64 = rmnet_get_stats64,
143 };
144 
145 static const char rmnet_gstrings_stats[][ETH_GSTRING_LEN] = {
146 	"Checksum ok",
147 	"Checksum valid bit not set",
148 	"Checksum validation failed",
149 	"Checksum error bad buffer",
150 	"Checksum error bad ip version",
151 	"Checksum error bad transport",
152 	"Checksum skipped on ip fragment",
153 	"Checksum skipped",
154 	"Checksum computed in software",
155 };
156 
157 static void rmnet_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
158 {
159 	switch (stringset) {
160 	case ETH_SS_STATS:
161 		memcpy(buf, &rmnet_gstrings_stats,
162 		       sizeof(rmnet_gstrings_stats));
163 		break;
164 	}
165 }
166 
167 static int rmnet_get_sset_count(struct net_device *dev, int sset)
168 {
169 	switch (sset) {
170 	case ETH_SS_STATS:
171 		return ARRAY_SIZE(rmnet_gstrings_stats);
172 	default:
173 		return -EOPNOTSUPP;
174 	}
175 }
176 
177 static void rmnet_get_ethtool_stats(struct net_device *dev,
178 				    struct ethtool_stats *stats, u64 *data)
179 {
180 	struct rmnet_priv *priv = netdev_priv(dev);
181 	struct rmnet_priv_stats *st = &priv->stats;
182 
183 	if (!data)
184 		return;
185 
186 	memcpy(data, st, ARRAY_SIZE(rmnet_gstrings_stats) * sizeof(u64));
187 }
188 
189 static const struct ethtool_ops rmnet_ethtool_ops = {
190 	.get_ethtool_stats = rmnet_get_ethtool_stats,
191 	.get_strings = rmnet_get_strings,
192 	.get_sset_count = rmnet_get_sset_count,
193 };
194 
195 /* Called by kernel whenever a new rmnet<n> device is created. Sets MTU,
196  * flags, ARP type, needed headroom, etc...
197  */
198 void rmnet_vnd_setup(struct net_device *rmnet_dev)
199 {
200 	rmnet_dev->netdev_ops = &rmnet_vnd_ops;
201 	rmnet_dev->mtu = RMNET_DFLT_PACKET_SIZE;
202 	rmnet_dev->needed_headroom = RMNET_NEEDED_HEADROOM;
203 	eth_random_addr(rmnet_dev->dev_addr);
204 	rmnet_dev->tx_queue_len = RMNET_TX_QUEUE_LEN;
205 
206 	/* Raw IP mode */
207 	rmnet_dev->header_ops = NULL;  /* No header */
208 	rmnet_dev->type = ARPHRD_RAWIP;
209 	rmnet_dev->hard_header_len = 0;
210 	rmnet_dev->flags &= ~(IFF_BROADCAST | IFF_MULTICAST);
211 
212 	rmnet_dev->needs_free_netdev = true;
213 	rmnet_dev->ethtool_ops = &rmnet_ethtool_ops;
214 
215 	/* This perm addr will be used as interface identifier by IPv6 */
216 	rmnet_dev->addr_assign_type = NET_ADDR_RANDOM;
217 	eth_random_addr(rmnet_dev->perm_addr);
218 }
219 
220 /* Exposed API */
221 
222 int rmnet_vnd_newlink(u8 id, struct net_device *rmnet_dev,
223 		      struct rmnet_port *port,
224 		      struct net_device *real_dev,
225 		      struct rmnet_endpoint *ep)
226 {
227 	struct rmnet_priv *priv = netdev_priv(rmnet_dev);
228 	int rc;
229 
230 	if (ep->egress_dev)
231 		return -EINVAL;
232 
233 	if (rmnet_get_endpoint(port, id))
234 		return -EBUSY;
235 
236 	rmnet_dev->hw_features = NETIF_F_RXCSUM;
237 	rmnet_dev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
238 	rmnet_dev->hw_features |= NETIF_F_SG;
239 
240 	priv->real_dev = real_dev;
241 
242 	rc = register_netdevice(rmnet_dev);
243 	if (!rc) {
244 		ep->egress_dev = rmnet_dev;
245 		ep->mux_id = id;
246 		port->nr_rmnet_devs++;
247 
248 		rmnet_dev->rtnl_link_ops = &rmnet_link_ops;
249 
250 		priv->mux_id = id;
251 
252 		netdev_dbg(rmnet_dev, "rmnet dev created\n");
253 	}
254 
255 	return rc;
256 }
257 
258 int rmnet_vnd_dellink(u8 id, struct rmnet_port *port,
259 		      struct rmnet_endpoint *ep)
260 {
261 	if (id >= RMNET_MAX_LOGICAL_EP || !ep->egress_dev)
262 		return -EINVAL;
263 
264 	ep->egress_dev = NULL;
265 	port->nr_rmnet_devs--;
266 	return 0;
267 }
268 
269 u8 rmnet_vnd_get_mux(struct net_device *rmnet_dev)
270 {
271 	struct rmnet_priv *priv;
272 
273 	priv = netdev_priv(rmnet_dev);
274 	return priv->mux_id;
275 }
276 
277 int rmnet_vnd_do_flow_control(struct net_device *rmnet_dev, int enable)
278 {
279 	netdev_dbg(rmnet_dev, "Setting VND TX queue state to %d\n", enable);
280 	/* Although we expect similar number of enable/disable
281 	 * commands, optimize for the disable. That is more
282 	 * latency sensitive than enable
283 	 */
284 	if (unlikely(enable))
285 		netif_wake_queue(rmnet_dev);
286 	else
287 		netif_stop_queue(rmnet_dev);
288 
289 	return 0;
290 }
291