xref: /linux/net/can/bcm.c (revision 72503791edffe516848d0f01d377fa9cd0711970)
1 /*
2  * bcm.c - Broadcast Manager to filter/send (cyclic) CAN content
3  *
4  * Copyright (c) 2002-2007 Volkswagen Group Electronic Research
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  * 3. Neither the name of Volkswagen nor the names of its contributors
16  *    may be used to endorse or promote products derived from this software
17  *    without specific prior written permission.
18  *
19  * Alternatively, provided that this notice is retained in full, this
20  * software may be distributed under the terms of the GNU General
21  * Public License ("GPL") version 2, in which case the provisions of the
22  * GPL apply INSTEAD OF those given above.
23  *
24  * The provided data structures and external interfaces from this code
25  * are not restricted to be used by modules with a GPL compatible license.
26  *
27  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
28  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
29  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
30  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
31  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
32  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
33  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
34  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
35  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
36  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
37  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
38  * DAMAGE.
39  *
40  */
41 
42 #include <linux/module.h>
43 #include <linux/init.h>
44 #include <linux/interrupt.h>
45 #include <linux/hrtimer.h>
46 #include <linux/list.h>
47 #include <linux/proc_fs.h>
48 #include <linux/seq_file.h>
49 #include <linux/uio.h>
50 #include <linux/net.h>
51 #include <linux/netdevice.h>
52 #include <linux/socket.h>
53 #include <linux/if_arp.h>
54 #include <linux/skbuff.h>
55 #include <linux/can.h>
56 #include <linux/can/core.h>
57 #include <linux/can/bcm.h>
58 #include <linux/slab.h>
59 #include <net/sock.h>
60 #include <net/net_namespace.h>
61 
62 /*
63  * To send multiple CAN frame content within TX_SETUP or to filter
64  * CAN messages with multiplex index within RX_SETUP, the number of
65  * different filters is limited to 256 due to the one byte index value.
66  */
67 #define MAX_NFRAMES 256
68 
69 /* use of last_frames[index].can_dlc */
70 #define RX_RECV    0x40 /* received data for this element */
71 #define RX_THR     0x80 /* element not been sent due to throttle feature */
72 #define BCM_CAN_DLC_MASK 0x0F /* clean private flags in can_dlc by masking */
73 
74 /* get best masking value for can_rx_register() for a given single can_id */
75 #define REGMASK(id) ((id & CAN_EFF_FLAG) ? \
76 		     (CAN_EFF_MASK | CAN_EFF_FLAG | CAN_RTR_FLAG) : \
77 		     (CAN_SFF_MASK | CAN_EFF_FLAG | CAN_RTR_FLAG))
78 
79 #define CAN_BCM_VERSION CAN_VERSION
80 static __initconst const char banner[] = KERN_INFO
81 	"can: broadcast manager protocol (rev " CAN_BCM_VERSION " t)\n";
82 
83 MODULE_DESCRIPTION("PF_CAN broadcast manager protocol");
84 MODULE_LICENSE("Dual BSD/GPL");
85 MODULE_AUTHOR("Oliver Hartkopp <oliver.hartkopp@volkswagen.de>");
86 MODULE_ALIAS("can-proto-2");
87 
88 /* easy access to can_frame payload */
89 static inline u64 GET_U64(const struct can_frame *cp)
90 {
91 	return *(u64 *)cp->data;
92 }
93 
94 struct bcm_op {
95 	struct list_head list;
96 	int ifindex;
97 	canid_t can_id;
98 	u32 flags;
99 	unsigned long frames_abs, frames_filtered;
100 	struct timeval ival1, ival2;
101 	struct hrtimer timer, thrtimer;
102 	struct tasklet_struct tsklet, thrtsklet;
103 	ktime_t rx_stamp, kt_ival1, kt_ival2, kt_lastmsg;
104 	int rx_ifindex;
105 	u32 count;
106 	u32 nframes;
107 	u32 currframe;
108 	struct can_frame *frames;
109 	struct can_frame *last_frames;
110 	struct can_frame sframe;
111 	struct can_frame last_sframe;
112 	struct sock *sk;
113 	struct net_device *rx_reg_dev;
114 };
115 
116 static struct proc_dir_entry *proc_dir;
117 
118 struct bcm_sock {
119 	struct sock sk;
120 	int bound;
121 	int ifindex;
122 	struct notifier_block notifier;
123 	struct list_head rx_ops;
124 	struct list_head tx_ops;
125 	unsigned long dropped_usr_msgs;
126 	struct proc_dir_entry *bcm_proc_read;
127 	char procname [32]; /* inode number in decimal with \0 */
128 };
129 
130 static inline struct bcm_sock *bcm_sk(const struct sock *sk)
131 {
132 	return (struct bcm_sock *)sk;
133 }
134 
135 #define CFSIZ sizeof(struct can_frame)
136 #define OPSIZ sizeof(struct bcm_op)
137 #define MHSIZ sizeof(struct bcm_msg_head)
138 
139 /*
140  * procfs functions
141  */
142 static char *bcm_proc_getifname(char *result, int ifindex)
143 {
144 	struct net_device *dev;
145 
146 	if (!ifindex)
147 		return "any";
148 
149 	rcu_read_lock();
150 	dev = dev_get_by_index_rcu(&init_net, ifindex);
151 	if (dev)
152 		strcpy(result, dev->name);
153 	else
154 		strcpy(result, "???");
155 	rcu_read_unlock();
156 
157 	return result;
158 }
159 
160 static int bcm_proc_show(struct seq_file *m, void *v)
161 {
162 	char ifname[IFNAMSIZ];
163 	struct sock *sk = (struct sock *)m->private;
164 	struct bcm_sock *bo = bcm_sk(sk);
165 	struct bcm_op *op;
166 
167 	seq_printf(m, ">>> socket %pK", sk->sk_socket);
168 	seq_printf(m, " / sk %pK", sk);
169 	seq_printf(m, " / bo %pK", bo);
170 	seq_printf(m, " / dropped %lu", bo->dropped_usr_msgs);
171 	seq_printf(m, " / bound %s", bcm_proc_getifname(ifname, bo->ifindex));
172 	seq_printf(m, " <<<\n");
173 
174 	list_for_each_entry(op, &bo->rx_ops, list) {
175 
176 		unsigned long reduction;
177 
178 		/* print only active entries & prevent division by zero */
179 		if (!op->frames_abs)
180 			continue;
181 
182 		seq_printf(m, "rx_op: %03X %-5s ",
183 				op->can_id, bcm_proc_getifname(ifname, op->ifindex));
184 		seq_printf(m, "[%u]%c ", op->nframes,
185 				(op->flags & RX_CHECK_DLC)?'d':' ');
186 		if (op->kt_ival1.tv64)
187 			seq_printf(m, "timeo=%lld ",
188 					(long long)
189 					ktime_to_us(op->kt_ival1));
190 
191 		if (op->kt_ival2.tv64)
192 			seq_printf(m, "thr=%lld ",
193 					(long long)
194 					ktime_to_us(op->kt_ival2));
195 
196 		seq_printf(m, "# recv %ld (%ld) => reduction: ",
197 				op->frames_filtered, op->frames_abs);
198 
199 		reduction = 100 - (op->frames_filtered * 100) / op->frames_abs;
200 
201 		seq_printf(m, "%s%ld%%\n",
202 				(reduction == 100)?"near ":"", reduction);
203 	}
204 
205 	list_for_each_entry(op, &bo->tx_ops, list) {
206 
207 		seq_printf(m, "tx_op: %03X %s [%u] ",
208 				op->can_id,
209 				bcm_proc_getifname(ifname, op->ifindex),
210 				op->nframes);
211 
212 		if (op->kt_ival1.tv64)
213 			seq_printf(m, "t1=%lld ",
214 					(long long) ktime_to_us(op->kt_ival1));
215 
216 		if (op->kt_ival2.tv64)
217 			seq_printf(m, "t2=%lld ",
218 					(long long) ktime_to_us(op->kt_ival2));
219 
220 		seq_printf(m, "# sent %ld\n", op->frames_abs);
221 	}
222 	seq_putc(m, '\n');
223 	return 0;
224 }
225 
226 static int bcm_proc_open(struct inode *inode, struct file *file)
227 {
228 	return single_open(file, bcm_proc_show, PDE(inode)->data);
229 }
230 
231 static const struct file_operations bcm_proc_fops = {
232 	.owner		= THIS_MODULE,
233 	.open		= bcm_proc_open,
234 	.read		= seq_read,
235 	.llseek		= seq_lseek,
236 	.release	= single_release,
237 };
238 
239 /*
240  * bcm_can_tx - send the (next) CAN frame to the appropriate CAN interface
241  *              of the given bcm tx op
242  */
243 static void bcm_can_tx(struct bcm_op *op)
244 {
245 	struct sk_buff *skb;
246 	struct net_device *dev;
247 	struct can_frame *cf = &op->frames[op->currframe];
248 
249 	/* no target device? => exit */
250 	if (!op->ifindex)
251 		return;
252 
253 	dev = dev_get_by_index(&init_net, op->ifindex);
254 	if (!dev) {
255 		/* RFC: should this bcm_op remove itself here? */
256 		return;
257 	}
258 
259 	skb = alloc_skb(CFSIZ, gfp_any());
260 	if (!skb)
261 		goto out;
262 
263 	memcpy(skb_put(skb, CFSIZ), cf, CFSIZ);
264 
265 	/* send with loopback */
266 	skb->dev = dev;
267 	skb->sk = op->sk;
268 	can_send(skb, 1);
269 
270 	/* update statistics */
271 	op->currframe++;
272 	op->frames_abs++;
273 
274 	/* reached last frame? */
275 	if (op->currframe >= op->nframes)
276 		op->currframe = 0;
277  out:
278 	dev_put(dev);
279 }
280 
281 /*
282  * bcm_send_to_user - send a BCM message to the userspace
283  *                    (consisting of bcm_msg_head + x CAN frames)
284  */
285 static void bcm_send_to_user(struct bcm_op *op, struct bcm_msg_head *head,
286 			     struct can_frame *frames, int has_timestamp)
287 {
288 	struct sk_buff *skb;
289 	struct can_frame *firstframe;
290 	struct sockaddr_can *addr;
291 	struct sock *sk = op->sk;
292 	unsigned int datalen = head->nframes * CFSIZ;
293 	int err;
294 
295 	skb = alloc_skb(sizeof(*head) + datalen, gfp_any());
296 	if (!skb)
297 		return;
298 
299 	memcpy(skb_put(skb, sizeof(*head)), head, sizeof(*head));
300 
301 	if (head->nframes) {
302 		/* can_frames starting here */
303 		firstframe = (struct can_frame *)skb_tail_pointer(skb);
304 
305 		memcpy(skb_put(skb, datalen), frames, datalen);
306 
307 		/*
308 		 * the BCM uses the can_dlc-element of the can_frame
309 		 * structure for internal purposes. This is only
310 		 * relevant for updates that are generated by the
311 		 * BCM, where nframes is 1
312 		 */
313 		if (head->nframes == 1)
314 			firstframe->can_dlc &= BCM_CAN_DLC_MASK;
315 	}
316 
317 	if (has_timestamp) {
318 		/* restore rx timestamp */
319 		skb->tstamp = op->rx_stamp;
320 	}
321 
322 	/*
323 	 *  Put the datagram to the queue so that bcm_recvmsg() can
324 	 *  get it from there.  We need to pass the interface index to
325 	 *  bcm_recvmsg().  We pass a whole struct sockaddr_can in skb->cb
326 	 *  containing the interface index.
327 	 */
328 
329 	BUILD_BUG_ON(sizeof(skb->cb) < sizeof(struct sockaddr_can));
330 	addr = (struct sockaddr_can *)skb->cb;
331 	memset(addr, 0, sizeof(*addr));
332 	addr->can_family  = AF_CAN;
333 	addr->can_ifindex = op->rx_ifindex;
334 
335 	err = sock_queue_rcv_skb(sk, skb);
336 	if (err < 0) {
337 		struct bcm_sock *bo = bcm_sk(sk);
338 
339 		kfree_skb(skb);
340 		/* don't care about overflows in this statistic */
341 		bo->dropped_usr_msgs++;
342 	}
343 }
344 
345 static void bcm_tx_start_timer(struct bcm_op *op)
346 {
347 	if (op->kt_ival1.tv64 && op->count)
348 		hrtimer_start(&op->timer,
349 			      ktime_add(ktime_get(), op->kt_ival1),
350 			      HRTIMER_MODE_ABS);
351 	else if (op->kt_ival2.tv64)
352 		hrtimer_start(&op->timer,
353 			      ktime_add(ktime_get(), op->kt_ival2),
354 			      HRTIMER_MODE_ABS);
355 }
356 
357 static void bcm_tx_timeout_tsklet(unsigned long data)
358 {
359 	struct bcm_op *op = (struct bcm_op *)data;
360 	struct bcm_msg_head msg_head;
361 
362 	if (op->kt_ival1.tv64 && (op->count > 0)) {
363 
364 		op->count--;
365 		if (!op->count && (op->flags & TX_COUNTEVT)) {
366 
367 			/* create notification to user */
368 			msg_head.opcode  = TX_EXPIRED;
369 			msg_head.flags   = op->flags;
370 			msg_head.count   = op->count;
371 			msg_head.ival1   = op->ival1;
372 			msg_head.ival2   = op->ival2;
373 			msg_head.can_id  = op->can_id;
374 			msg_head.nframes = 0;
375 
376 			bcm_send_to_user(op, &msg_head, NULL, 0);
377 		}
378 		bcm_can_tx(op);
379 
380 	} else if (op->kt_ival2.tv64)
381 		bcm_can_tx(op);
382 
383 	bcm_tx_start_timer(op);
384 }
385 
386 /*
387  * bcm_tx_timeout_handler - performs cyclic CAN frame transmissions
388  */
389 static enum hrtimer_restart bcm_tx_timeout_handler(struct hrtimer *hrtimer)
390 {
391 	struct bcm_op *op = container_of(hrtimer, struct bcm_op, timer);
392 
393 	tasklet_schedule(&op->tsklet);
394 
395 	return HRTIMER_NORESTART;
396 }
397 
398 /*
399  * bcm_rx_changed - create a RX_CHANGED notification due to changed content
400  */
401 static void bcm_rx_changed(struct bcm_op *op, struct can_frame *data)
402 {
403 	struct bcm_msg_head head;
404 
405 	/* update statistics */
406 	op->frames_filtered++;
407 
408 	/* prevent statistics overflow */
409 	if (op->frames_filtered > ULONG_MAX/100)
410 		op->frames_filtered = op->frames_abs = 0;
411 
412 	/* this element is not throttled anymore */
413 	data->can_dlc &= (BCM_CAN_DLC_MASK|RX_RECV);
414 
415 	head.opcode  = RX_CHANGED;
416 	head.flags   = op->flags;
417 	head.count   = op->count;
418 	head.ival1   = op->ival1;
419 	head.ival2   = op->ival2;
420 	head.can_id  = op->can_id;
421 	head.nframes = 1;
422 
423 	bcm_send_to_user(op, &head, data, 1);
424 }
425 
426 /*
427  * bcm_rx_update_and_send - process a detected relevant receive content change
428  *                          1. update the last received data
429  *                          2. send a notification to the user (if possible)
430  */
431 static void bcm_rx_update_and_send(struct bcm_op *op,
432 				   struct can_frame *lastdata,
433 				   const struct can_frame *rxdata)
434 {
435 	memcpy(lastdata, rxdata, CFSIZ);
436 
437 	/* mark as used and throttled by default */
438 	lastdata->can_dlc |= (RX_RECV|RX_THR);
439 
440 	/* throtteling mode inactive ? */
441 	if (!op->kt_ival2.tv64) {
442 		/* send RX_CHANGED to the user immediately */
443 		bcm_rx_changed(op, lastdata);
444 		return;
445 	}
446 
447 	/* with active throttling timer we are just done here */
448 	if (hrtimer_active(&op->thrtimer))
449 		return;
450 
451 	/* first receiption with enabled throttling mode */
452 	if (!op->kt_lastmsg.tv64)
453 		goto rx_changed_settime;
454 
455 	/* got a second frame inside a potential throttle period? */
456 	if (ktime_us_delta(ktime_get(), op->kt_lastmsg) <
457 	    ktime_to_us(op->kt_ival2)) {
458 		/* do not send the saved data - only start throttle timer */
459 		hrtimer_start(&op->thrtimer,
460 			      ktime_add(op->kt_lastmsg, op->kt_ival2),
461 			      HRTIMER_MODE_ABS);
462 		return;
463 	}
464 
465 	/* the gap was that big, that throttling was not needed here */
466 rx_changed_settime:
467 	bcm_rx_changed(op, lastdata);
468 	op->kt_lastmsg = ktime_get();
469 }
470 
471 /*
472  * bcm_rx_cmp_to_index - (bit)compares the currently received data to formerly
473  *                       received data stored in op->last_frames[]
474  */
475 static void bcm_rx_cmp_to_index(struct bcm_op *op, unsigned int index,
476 				const struct can_frame *rxdata)
477 {
478 	/*
479 	 * no one uses the MSBs of can_dlc for comparation,
480 	 * so we use it here to detect the first time of reception
481 	 */
482 
483 	if (!(op->last_frames[index].can_dlc & RX_RECV)) {
484 		/* received data for the first time => send update to user */
485 		bcm_rx_update_and_send(op, &op->last_frames[index], rxdata);
486 		return;
487 	}
488 
489 	/* do a real check in can_frame data section */
490 
491 	if ((GET_U64(&op->frames[index]) & GET_U64(rxdata)) !=
492 	    (GET_U64(&op->frames[index]) & GET_U64(&op->last_frames[index]))) {
493 		bcm_rx_update_and_send(op, &op->last_frames[index], rxdata);
494 		return;
495 	}
496 
497 	if (op->flags & RX_CHECK_DLC) {
498 		/* do a real check in can_frame dlc */
499 		if (rxdata->can_dlc != (op->last_frames[index].can_dlc &
500 					BCM_CAN_DLC_MASK)) {
501 			bcm_rx_update_and_send(op, &op->last_frames[index],
502 					       rxdata);
503 			return;
504 		}
505 	}
506 }
507 
508 /*
509  * bcm_rx_starttimer - enable timeout monitoring for CAN frame receiption
510  */
511 static void bcm_rx_starttimer(struct bcm_op *op)
512 {
513 	if (op->flags & RX_NO_AUTOTIMER)
514 		return;
515 
516 	if (op->kt_ival1.tv64)
517 		hrtimer_start(&op->timer, op->kt_ival1, HRTIMER_MODE_REL);
518 }
519 
520 static void bcm_rx_timeout_tsklet(unsigned long data)
521 {
522 	struct bcm_op *op = (struct bcm_op *)data;
523 	struct bcm_msg_head msg_head;
524 
525 	/* create notification to user */
526 	msg_head.opcode  = RX_TIMEOUT;
527 	msg_head.flags   = op->flags;
528 	msg_head.count   = op->count;
529 	msg_head.ival1   = op->ival1;
530 	msg_head.ival2   = op->ival2;
531 	msg_head.can_id  = op->can_id;
532 	msg_head.nframes = 0;
533 
534 	bcm_send_to_user(op, &msg_head, NULL, 0);
535 }
536 
537 /*
538  * bcm_rx_timeout_handler - when the (cyclic) CAN frame receiption timed out
539  */
540 static enum hrtimer_restart bcm_rx_timeout_handler(struct hrtimer *hrtimer)
541 {
542 	struct bcm_op *op = container_of(hrtimer, struct bcm_op, timer);
543 
544 	/* schedule before NET_RX_SOFTIRQ */
545 	tasklet_hi_schedule(&op->tsklet);
546 
547 	/* no restart of the timer is done here! */
548 
549 	/* if user wants to be informed, when cyclic CAN-Messages come back */
550 	if ((op->flags & RX_ANNOUNCE_RESUME) && op->last_frames) {
551 		/* clear received can_frames to indicate 'nothing received' */
552 		memset(op->last_frames, 0, op->nframes * CFSIZ);
553 	}
554 
555 	return HRTIMER_NORESTART;
556 }
557 
558 /*
559  * bcm_rx_do_flush - helper for bcm_rx_thr_flush
560  */
561 static inline int bcm_rx_do_flush(struct bcm_op *op, int update,
562 				  unsigned int index)
563 {
564 	if ((op->last_frames) && (op->last_frames[index].can_dlc & RX_THR)) {
565 		if (update)
566 			bcm_rx_changed(op, &op->last_frames[index]);
567 		return 1;
568 	}
569 	return 0;
570 }
571 
572 /*
573  * bcm_rx_thr_flush - Check for throttled data and send it to the userspace
574  *
575  * update == 0 : just check if throttled data is available  (any irq context)
576  * update == 1 : check and send throttled data to userspace (soft_irq context)
577  */
578 static int bcm_rx_thr_flush(struct bcm_op *op, int update)
579 {
580 	int updated = 0;
581 
582 	if (op->nframes > 1) {
583 		unsigned int i;
584 
585 		/* for MUX filter we start at index 1 */
586 		for (i = 1; i < op->nframes; i++)
587 			updated += bcm_rx_do_flush(op, update, i);
588 
589 	} else {
590 		/* for RX_FILTER_ID and simple filter */
591 		updated += bcm_rx_do_flush(op, update, 0);
592 	}
593 
594 	return updated;
595 }
596 
597 static void bcm_rx_thr_tsklet(unsigned long data)
598 {
599 	struct bcm_op *op = (struct bcm_op *)data;
600 
601 	/* push the changed data to the userspace */
602 	bcm_rx_thr_flush(op, 1);
603 }
604 
605 /*
606  * bcm_rx_thr_handler - the time for blocked content updates is over now:
607  *                      Check for throttled data and send it to the userspace
608  */
609 static enum hrtimer_restart bcm_rx_thr_handler(struct hrtimer *hrtimer)
610 {
611 	struct bcm_op *op = container_of(hrtimer, struct bcm_op, thrtimer);
612 
613 	tasklet_schedule(&op->thrtsklet);
614 
615 	if (bcm_rx_thr_flush(op, 0)) {
616 		hrtimer_forward(hrtimer, ktime_get(), op->kt_ival2);
617 		return HRTIMER_RESTART;
618 	} else {
619 		/* rearm throttle handling */
620 		op->kt_lastmsg = ktime_set(0, 0);
621 		return HRTIMER_NORESTART;
622 	}
623 }
624 
625 /*
626  * bcm_rx_handler - handle a CAN frame receiption
627  */
628 static void bcm_rx_handler(struct sk_buff *skb, void *data)
629 {
630 	struct bcm_op *op = (struct bcm_op *)data;
631 	const struct can_frame *rxframe = (struct can_frame *)skb->data;
632 	unsigned int i;
633 
634 	/* disable timeout */
635 	hrtimer_cancel(&op->timer);
636 
637 	if (op->can_id != rxframe->can_id)
638 		return;
639 
640 	/* save rx timestamp */
641 	op->rx_stamp = skb->tstamp;
642 	/* save originator for recvfrom() */
643 	op->rx_ifindex = skb->dev->ifindex;
644 	/* update statistics */
645 	op->frames_abs++;
646 
647 	if (op->flags & RX_RTR_FRAME) {
648 		/* send reply for RTR-request (placed in op->frames[0]) */
649 		bcm_can_tx(op);
650 		return;
651 	}
652 
653 	if (op->flags & RX_FILTER_ID) {
654 		/* the easiest case */
655 		bcm_rx_update_and_send(op, &op->last_frames[0], rxframe);
656 		goto rx_starttimer;
657 	}
658 
659 	if (op->nframes == 1) {
660 		/* simple compare with index 0 */
661 		bcm_rx_cmp_to_index(op, 0, rxframe);
662 		goto rx_starttimer;
663 	}
664 
665 	if (op->nframes > 1) {
666 		/*
667 		 * multiplex compare
668 		 *
669 		 * find the first multiplex mask that fits.
670 		 * Remark: The MUX-mask is stored in index 0
671 		 */
672 
673 		for (i = 1; i < op->nframes; i++) {
674 			if ((GET_U64(&op->frames[0]) & GET_U64(rxframe)) ==
675 			    (GET_U64(&op->frames[0]) &
676 			     GET_U64(&op->frames[i]))) {
677 				bcm_rx_cmp_to_index(op, i, rxframe);
678 				break;
679 			}
680 		}
681 	}
682 
683 rx_starttimer:
684 	bcm_rx_starttimer(op);
685 }
686 
687 /*
688  * helpers for bcm_op handling: find & delete bcm [rx|tx] op elements
689  */
690 static struct bcm_op *bcm_find_op(struct list_head *ops, canid_t can_id,
691 				  int ifindex)
692 {
693 	struct bcm_op *op;
694 
695 	list_for_each_entry(op, ops, list) {
696 		if ((op->can_id == can_id) && (op->ifindex == ifindex))
697 			return op;
698 	}
699 
700 	return NULL;
701 }
702 
703 static void bcm_remove_op(struct bcm_op *op)
704 {
705 	hrtimer_cancel(&op->timer);
706 	hrtimer_cancel(&op->thrtimer);
707 
708 	if (op->tsklet.func)
709 		tasklet_kill(&op->tsklet);
710 
711 	if (op->thrtsklet.func)
712 		tasklet_kill(&op->thrtsklet);
713 
714 	if ((op->frames) && (op->frames != &op->sframe))
715 		kfree(op->frames);
716 
717 	if ((op->last_frames) && (op->last_frames != &op->last_sframe))
718 		kfree(op->last_frames);
719 
720 	kfree(op);
721 }
722 
723 static void bcm_rx_unreg(struct net_device *dev, struct bcm_op *op)
724 {
725 	if (op->rx_reg_dev == dev) {
726 		can_rx_unregister(dev, op->can_id, REGMASK(op->can_id),
727 				  bcm_rx_handler, op);
728 
729 		/* mark as removed subscription */
730 		op->rx_reg_dev = NULL;
731 	} else
732 		printk(KERN_ERR "can-bcm: bcm_rx_unreg: registered device "
733 		       "mismatch %p %p\n", op->rx_reg_dev, dev);
734 }
735 
736 /*
737  * bcm_delete_rx_op - find and remove a rx op (returns number of removed ops)
738  */
739 static int bcm_delete_rx_op(struct list_head *ops, canid_t can_id, int ifindex)
740 {
741 	struct bcm_op *op, *n;
742 
743 	list_for_each_entry_safe(op, n, ops, list) {
744 		if ((op->can_id == can_id) && (op->ifindex == ifindex)) {
745 
746 			/*
747 			 * Don't care if we're bound or not (due to netdev
748 			 * problems) can_rx_unregister() is always a save
749 			 * thing to do here.
750 			 */
751 			if (op->ifindex) {
752 				/*
753 				 * Only remove subscriptions that had not
754 				 * been removed due to NETDEV_UNREGISTER
755 				 * in bcm_notifier()
756 				 */
757 				if (op->rx_reg_dev) {
758 					struct net_device *dev;
759 
760 					dev = dev_get_by_index(&init_net,
761 							       op->ifindex);
762 					if (dev) {
763 						bcm_rx_unreg(dev, op);
764 						dev_put(dev);
765 					}
766 				}
767 			} else
768 				can_rx_unregister(NULL, op->can_id,
769 						  REGMASK(op->can_id),
770 						  bcm_rx_handler, op);
771 
772 			list_del(&op->list);
773 			bcm_remove_op(op);
774 			return 1; /* done */
775 		}
776 	}
777 
778 	return 0; /* not found */
779 }
780 
781 /*
782  * bcm_delete_tx_op - find and remove a tx op (returns number of removed ops)
783  */
784 static int bcm_delete_tx_op(struct list_head *ops, canid_t can_id, int ifindex)
785 {
786 	struct bcm_op *op, *n;
787 
788 	list_for_each_entry_safe(op, n, ops, list) {
789 		if ((op->can_id == can_id) && (op->ifindex == ifindex)) {
790 			list_del(&op->list);
791 			bcm_remove_op(op);
792 			return 1; /* done */
793 		}
794 	}
795 
796 	return 0; /* not found */
797 }
798 
799 /*
800  * bcm_read_op - read out a bcm_op and send it to the user (for bcm_sendmsg)
801  */
802 static int bcm_read_op(struct list_head *ops, struct bcm_msg_head *msg_head,
803 		       int ifindex)
804 {
805 	struct bcm_op *op = bcm_find_op(ops, msg_head->can_id, ifindex);
806 
807 	if (!op)
808 		return -EINVAL;
809 
810 	/* put current values into msg_head */
811 	msg_head->flags   = op->flags;
812 	msg_head->count   = op->count;
813 	msg_head->ival1   = op->ival1;
814 	msg_head->ival2   = op->ival2;
815 	msg_head->nframes = op->nframes;
816 
817 	bcm_send_to_user(op, msg_head, op->frames, 0);
818 
819 	return MHSIZ;
820 }
821 
822 /*
823  * bcm_tx_setup - create or update a bcm tx op (for bcm_sendmsg)
824  */
825 static int bcm_tx_setup(struct bcm_msg_head *msg_head, struct msghdr *msg,
826 			int ifindex, struct sock *sk)
827 {
828 	struct bcm_sock *bo = bcm_sk(sk);
829 	struct bcm_op *op;
830 	unsigned int i;
831 	int err;
832 
833 	/* we need a real device to send frames */
834 	if (!ifindex)
835 		return -ENODEV;
836 
837 	/* check nframes boundaries - we need at least one can_frame */
838 	if (msg_head->nframes < 1 || msg_head->nframes > MAX_NFRAMES)
839 		return -EINVAL;
840 
841 	/* check the given can_id */
842 	op = bcm_find_op(&bo->tx_ops, msg_head->can_id, ifindex);
843 
844 	if (op) {
845 		/* update existing BCM operation */
846 
847 		/*
848 		 * Do we need more space for the can_frames than currently
849 		 * allocated? -> This is a _really_ unusual use-case and
850 		 * therefore (complexity / locking) it is not supported.
851 		 */
852 		if (msg_head->nframes > op->nframes)
853 			return -E2BIG;
854 
855 		/* update can_frames content */
856 		for (i = 0; i < msg_head->nframes; i++) {
857 			err = memcpy_fromiovec((u8 *)&op->frames[i],
858 					       msg->msg_iov, CFSIZ);
859 
860 			if (op->frames[i].can_dlc > 8)
861 				err = -EINVAL;
862 
863 			if (err < 0)
864 				return err;
865 
866 			if (msg_head->flags & TX_CP_CAN_ID) {
867 				/* copy can_id into frame */
868 				op->frames[i].can_id = msg_head->can_id;
869 			}
870 		}
871 
872 	} else {
873 		/* insert new BCM operation for the given can_id */
874 
875 		op = kzalloc(OPSIZ, GFP_KERNEL);
876 		if (!op)
877 			return -ENOMEM;
878 
879 		op->can_id    = msg_head->can_id;
880 
881 		/* create array for can_frames and copy the data */
882 		if (msg_head->nframes > 1) {
883 			op->frames = kmalloc(msg_head->nframes * CFSIZ,
884 					     GFP_KERNEL);
885 			if (!op->frames) {
886 				kfree(op);
887 				return -ENOMEM;
888 			}
889 		} else
890 			op->frames = &op->sframe;
891 
892 		for (i = 0; i < msg_head->nframes; i++) {
893 			err = memcpy_fromiovec((u8 *)&op->frames[i],
894 					       msg->msg_iov, CFSIZ);
895 
896 			if (op->frames[i].can_dlc > 8)
897 				err = -EINVAL;
898 
899 			if (err < 0) {
900 				if (op->frames != &op->sframe)
901 					kfree(op->frames);
902 				kfree(op);
903 				return err;
904 			}
905 
906 			if (msg_head->flags & TX_CP_CAN_ID) {
907 				/* copy can_id into frame */
908 				op->frames[i].can_id = msg_head->can_id;
909 			}
910 		}
911 
912 		/* tx_ops never compare with previous received messages */
913 		op->last_frames = NULL;
914 
915 		/* bcm_can_tx / bcm_tx_timeout_handler needs this */
916 		op->sk = sk;
917 		op->ifindex = ifindex;
918 
919 		/* initialize uninitialized (kzalloc) structure */
920 		hrtimer_init(&op->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
921 		op->timer.function = bcm_tx_timeout_handler;
922 
923 		/* initialize tasklet for tx countevent notification */
924 		tasklet_init(&op->tsklet, bcm_tx_timeout_tsklet,
925 			     (unsigned long) op);
926 
927 		/* currently unused in tx_ops */
928 		hrtimer_init(&op->thrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
929 
930 		/* add this bcm_op to the list of the tx_ops */
931 		list_add(&op->list, &bo->tx_ops);
932 
933 	} /* if ((op = bcm_find_op(&bo->tx_ops, msg_head->can_id, ifindex))) */
934 
935 	if (op->nframes != msg_head->nframes) {
936 		op->nframes   = msg_head->nframes;
937 		/* start multiple frame transmission with index 0 */
938 		op->currframe = 0;
939 	}
940 
941 	/* check flags */
942 
943 	op->flags = msg_head->flags;
944 
945 	if (op->flags & TX_RESET_MULTI_IDX) {
946 		/* start multiple frame transmission with index 0 */
947 		op->currframe = 0;
948 	}
949 
950 	if (op->flags & SETTIMER) {
951 		/* set timer values */
952 		op->count = msg_head->count;
953 		op->ival1 = msg_head->ival1;
954 		op->ival2 = msg_head->ival2;
955 		op->kt_ival1 = timeval_to_ktime(msg_head->ival1);
956 		op->kt_ival2 = timeval_to_ktime(msg_head->ival2);
957 
958 		/* disable an active timer due to zero values? */
959 		if (!op->kt_ival1.tv64 && !op->kt_ival2.tv64)
960 			hrtimer_cancel(&op->timer);
961 	}
962 
963 	if (op->flags & STARTTIMER) {
964 		hrtimer_cancel(&op->timer);
965 		/* spec: send can_frame when starting timer */
966 		op->flags |= TX_ANNOUNCE;
967 	}
968 
969 	if (op->flags & TX_ANNOUNCE) {
970 		bcm_can_tx(op);
971 		if (op->count)
972 			op->count--;
973 	}
974 
975 	if (op->flags & STARTTIMER)
976 		bcm_tx_start_timer(op);
977 
978 	return msg_head->nframes * CFSIZ + MHSIZ;
979 }
980 
981 /*
982  * bcm_rx_setup - create or update a bcm rx op (for bcm_sendmsg)
983  */
984 static int bcm_rx_setup(struct bcm_msg_head *msg_head, struct msghdr *msg,
985 			int ifindex, struct sock *sk)
986 {
987 	struct bcm_sock *bo = bcm_sk(sk);
988 	struct bcm_op *op;
989 	int do_rx_register;
990 	int err = 0;
991 
992 	if ((msg_head->flags & RX_FILTER_ID) || (!(msg_head->nframes))) {
993 		/* be robust against wrong usage ... */
994 		msg_head->flags |= RX_FILTER_ID;
995 		/* ignore trailing garbage */
996 		msg_head->nframes = 0;
997 	}
998 
999 	/* the first element contains the mux-mask => MAX_NFRAMES + 1  */
1000 	if (msg_head->nframes > MAX_NFRAMES + 1)
1001 		return -EINVAL;
1002 
1003 	if ((msg_head->flags & RX_RTR_FRAME) &&
1004 	    ((msg_head->nframes != 1) ||
1005 	     (!(msg_head->can_id & CAN_RTR_FLAG))))
1006 		return -EINVAL;
1007 
1008 	/* check the given can_id */
1009 	op = bcm_find_op(&bo->rx_ops, msg_head->can_id, ifindex);
1010 	if (op) {
1011 		/* update existing BCM operation */
1012 
1013 		/*
1014 		 * Do we need more space for the can_frames than currently
1015 		 * allocated? -> This is a _really_ unusual use-case and
1016 		 * therefore (complexity / locking) it is not supported.
1017 		 */
1018 		if (msg_head->nframes > op->nframes)
1019 			return -E2BIG;
1020 
1021 		if (msg_head->nframes) {
1022 			/* update can_frames content */
1023 			err = memcpy_fromiovec((u8 *)op->frames,
1024 					       msg->msg_iov,
1025 					       msg_head->nframes * CFSIZ);
1026 			if (err < 0)
1027 				return err;
1028 
1029 			/* clear last_frames to indicate 'nothing received' */
1030 			memset(op->last_frames, 0, msg_head->nframes * CFSIZ);
1031 		}
1032 
1033 		op->nframes = msg_head->nframes;
1034 
1035 		/* Only an update -> do not call can_rx_register() */
1036 		do_rx_register = 0;
1037 
1038 	} else {
1039 		/* insert new BCM operation for the given can_id */
1040 		op = kzalloc(OPSIZ, GFP_KERNEL);
1041 		if (!op)
1042 			return -ENOMEM;
1043 
1044 		op->can_id    = msg_head->can_id;
1045 		op->nframes   = msg_head->nframes;
1046 
1047 		if (msg_head->nframes > 1) {
1048 			/* create array for can_frames and copy the data */
1049 			op->frames = kmalloc(msg_head->nframes * CFSIZ,
1050 					     GFP_KERNEL);
1051 			if (!op->frames) {
1052 				kfree(op);
1053 				return -ENOMEM;
1054 			}
1055 
1056 			/* create and init array for received can_frames */
1057 			op->last_frames = kzalloc(msg_head->nframes * CFSIZ,
1058 						  GFP_KERNEL);
1059 			if (!op->last_frames) {
1060 				kfree(op->frames);
1061 				kfree(op);
1062 				return -ENOMEM;
1063 			}
1064 
1065 		} else {
1066 			op->frames = &op->sframe;
1067 			op->last_frames = &op->last_sframe;
1068 		}
1069 
1070 		if (msg_head->nframes) {
1071 			err = memcpy_fromiovec((u8 *)op->frames, msg->msg_iov,
1072 					       msg_head->nframes * CFSIZ);
1073 			if (err < 0) {
1074 				if (op->frames != &op->sframe)
1075 					kfree(op->frames);
1076 				if (op->last_frames != &op->last_sframe)
1077 					kfree(op->last_frames);
1078 				kfree(op);
1079 				return err;
1080 			}
1081 		}
1082 
1083 		/* bcm_can_tx / bcm_tx_timeout_handler needs this */
1084 		op->sk = sk;
1085 		op->ifindex = ifindex;
1086 
1087 		/* initialize uninitialized (kzalloc) structure */
1088 		hrtimer_init(&op->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
1089 		op->timer.function = bcm_rx_timeout_handler;
1090 
1091 		/* initialize tasklet for rx timeout notification */
1092 		tasklet_init(&op->tsklet, bcm_rx_timeout_tsklet,
1093 			     (unsigned long) op);
1094 
1095 		hrtimer_init(&op->thrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
1096 		op->thrtimer.function = bcm_rx_thr_handler;
1097 
1098 		/* initialize tasklet for rx throttle handling */
1099 		tasklet_init(&op->thrtsklet, bcm_rx_thr_tsklet,
1100 			     (unsigned long) op);
1101 
1102 		/* add this bcm_op to the list of the rx_ops */
1103 		list_add(&op->list, &bo->rx_ops);
1104 
1105 		/* call can_rx_register() */
1106 		do_rx_register = 1;
1107 
1108 	} /* if ((op = bcm_find_op(&bo->rx_ops, msg_head->can_id, ifindex))) */
1109 
1110 	/* check flags */
1111 	op->flags = msg_head->flags;
1112 
1113 	if (op->flags & RX_RTR_FRAME) {
1114 
1115 		/* no timers in RTR-mode */
1116 		hrtimer_cancel(&op->thrtimer);
1117 		hrtimer_cancel(&op->timer);
1118 
1119 		/*
1120 		 * funny feature in RX(!)_SETUP only for RTR-mode:
1121 		 * copy can_id into frame BUT without RTR-flag to
1122 		 * prevent a full-load-loopback-test ... ;-]
1123 		 */
1124 		if ((op->flags & TX_CP_CAN_ID) ||
1125 		    (op->frames[0].can_id == op->can_id))
1126 			op->frames[0].can_id = op->can_id & ~CAN_RTR_FLAG;
1127 
1128 	} else {
1129 		if (op->flags & SETTIMER) {
1130 
1131 			/* set timer value */
1132 			op->ival1 = msg_head->ival1;
1133 			op->ival2 = msg_head->ival2;
1134 			op->kt_ival1 = timeval_to_ktime(msg_head->ival1);
1135 			op->kt_ival2 = timeval_to_ktime(msg_head->ival2);
1136 
1137 			/* disable an active timer due to zero value? */
1138 			if (!op->kt_ival1.tv64)
1139 				hrtimer_cancel(&op->timer);
1140 
1141 			/*
1142 			 * In any case cancel the throttle timer, flush
1143 			 * potentially blocked msgs and reset throttle handling
1144 			 */
1145 			op->kt_lastmsg = ktime_set(0, 0);
1146 			hrtimer_cancel(&op->thrtimer);
1147 			bcm_rx_thr_flush(op, 1);
1148 		}
1149 
1150 		if ((op->flags & STARTTIMER) && op->kt_ival1.tv64)
1151 			hrtimer_start(&op->timer, op->kt_ival1,
1152 				      HRTIMER_MODE_REL);
1153 	}
1154 
1155 	/* now we can register for can_ids, if we added a new bcm_op */
1156 	if (do_rx_register) {
1157 		if (ifindex) {
1158 			struct net_device *dev;
1159 
1160 			dev = dev_get_by_index(&init_net, ifindex);
1161 			if (dev) {
1162 				err = can_rx_register(dev, op->can_id,
1163 						      REGMASK(op->can_id),
1164 						      bcm_rx_handler, op,
1165 						      "bcm");
1166 
1167 				op->rx_reg_dev = dev;
1168 				dev_put(dev);
1169 			}
1170 
1171 		} else
1172 			err = can_rx_register(NULL, op->can_id,
1173 					      REGMASK(op->can_id),
1174 					      bcm_rx_handler, op, "bcm");
1175 		if (err) {
1176 			/* this bcm rx op is broken -> remove it */
1177 			list_del(&op->list);
1178 			bcm_remove_op(op);
1179 			return err;
1180 		}
1181 	}
1182 
1183 	return msg_head->nframes * CFSIZ + MHSIZ;
1184 }
1185 
1186 /*
1187  * bcm_tx_send - send a single CAN frame to the CAN interface (for bcm_sendmsg)
1188  */
1189 static int bcm_tx_send(struct msghdr *msg, int ifindex, struct sock *sk)
1190 {
1191 	struct sk_buff *skb;
1192 	struct net_device *dev;
1193 	int err;
1194 
1195 	/* we need a real device to send frames */
1196 	if (!ifindex)
1197 		return -ENODEV;
1198 
1199 	skb = alloc_skb(CFSIZ, GFP_KERNEL);
1200 
1201 	if (!skb)
1202 		return -ENOMEM;
1203 
1204 	err = memcpy_fromiovec(skb_put(skb, CFSIZ), msg->msg_iov, CFSIZ);
1205 	if (err < 0) {
1206 		kfree_skb(skb);
1207 		return err;
1208 	}
1209 
1210 	dev = dev_get_by_index(&init_net, ifindex);
1211 	if (!dev) {
1212 		kfree_skb(skb);
1213 		return -ENODEV;
1214 	}
1215 
1216 	skb->dev = dev;
1217 	skb->sk  = sk;
1218 	err = can_send(skb, 1); /* send with loopback */
1219 	dev_put(dev);
1220 
1221 	if (err)
1222 		return err;
1223 
1224 	return CFSIZ + MHSIZ;
1225 }
1226 
1227 /*
1228  * bcm_sendmsg - process BCM commands (opcodes) from the userspace
1229  */
1230 static int bcm_sendmsg(struct kiocb *iocb, struct socket *sock,
1231 		       struct msghdr *msg, size_t size)
1232 {
1233 	struct sock *sk = sock->sk;
1234 	struct bcm_sock *bo = bcm_sk(sk);
1235 	int ifindex = bo->ifindex; /* default ifindex for this bcm_op */
1236 	struct bcm_msg_head msg_head;
1237 	int ret; /* read bytes or error codes as return value */
1238 
1239 	if (!bo->bound)
1240 		return -ENOTCONN;
1241 
1242 	/* check for valid message length from userspace */
1243 	if (size < MHSIZ || (size - MHSIZ) % CFSIZ)
1244 		return -EINVAL;
1245 
1246 	/* check for alternative ifindex for this bcm_op */
1247 
1248 	if (!ifindex && msg->msg_name) {
1249 		/* no bound device as default => check msg_name */
1250 		struct sockaddr_can *addr =
1251 			(struct sockaddr_can *)msg->msg_name;
1252 
1253 		if (msg->msg_namelen < sizeof(*addr))
1254 			return -EINVAL;
1255 
1256 		if (addr->can_family != AF_CAN)
1257 			return -EINVAL;
1258 
1259 		/* ifindex from sendto() */
1260 		ifindex = addr->can_ifindex;
1261 
1262 		if (ifindex) {
1263 			struct net_device *dev;
1264 
1265 			dev = dev_get_by_index(&init_net, ifindex);
1266 			if (!dev)
1267 				return -ENODEV;
1268 
1269 			if (dev->type != ARPHRD_CAN) {
1270 				dev_put(dev);
1271 				return -ENODEV;
1272 			}
1273 
1274 			dev_put(dev);
1275 		}
1276 	}
1277 
1278 	/* read message head information */
1279 
1280 	ret = memcpy_fromiovec((u8 *)&msg_head, msg->msg_iov, MHSIZ);
1281 	if (ret < 0)
1282 		return ret;
1283 
1284 	lock_sock(sk);
1285 
1286 	switch (msg_head.opcode) {
1287 
1288 	case TX_SETUP:
1289 		ret = bcm_tx_setup(&msg_head, msg, ifindex, sk);
1290 		break;
1291 
1292 	case RX_SETUP:
1293 		ret = bcm_rx_setup(&msg_head, msg, ifindex, sk);
1294 		break;
1295 
1296 	case TX_DELETE:
1297 		if (bcm_delete_tx_op(&bo->tx_ops, msg_head.can_id, ifindex))
1298 			ret = MHSIZ;
1299 		else
1300 			ret = -EINVAL;
1301 		break;
1302 
1303 	case RX_DELETE:
1304 		if (bcm_delete_rx_op(&bo->rx_ops, msg_head.can_id, ifindex))
1305 			ret = MHSIZ;
1306 		else
1307 			ret = -EINVAL;
1308 		break;
1309 
1310 	case TX_READ:
1311 		/* reuse msg_head for the reply to TX_READ */
1312 		msg_head.opcode  = TX_STATUS;
1313 		ret = bcm_read_op(&bo->tx_ops, &msg_head, ifindex);
1314 		break;
1315 
1316 	case RX_READ:
1317 		/* reuse msg_head for the reply to RX_READ */
1318 		msg_head.opcode  = RX_STATUS;
1319 		ret = bcm_read_op(&bo->rx_ops, &msg_head, ifindex);
1320 		break;
1321 
1322 	case TX_SEND:
1323 		/* we need exactly one can_frame behind the msg head */
1324 		if ((msg_head.nframes != 1) || (size != CFSIZ + MHSIZ))
1325 			ret = -EINVAL;
1326 		else
1327 			ret = bcm_tx_send(msg, ifindex, sk);
1328 		break;
1329 
1330 	default:
1331 		ret = -EINVAL;
1332 		break;
1333 	}
1334 
1335 	release_sock(sk);
1336 
1337 	return ret;
1338 }
1339 
1340 /*
1341  * notification handler for netdevice status changes
1342  */
1343 static int bcm_notifier(struct notifier_block *nb, unsigned long msg,
1344 			void *data)
1345 {
1346 	struct net_device *dev = (struct net_device *)data;
1347 	struct bcm_sock *bo = container_of(nb, struct bcm_sock, notifier);
1348 	struct sock *sk = &bo->sk;
1349 	struct bcm_op *op;
1350 	int notify_enodev = 0;
1351 
1352 	if (!net_eq(dev_net(dev), &init_net))
1353 		return NOTIFY_DONE;
1354 
1355 	if (dev->type != ARPHRD_CAN)
1356 		return NOTIFY_DONE;
1357 
1358 	switch (msg) {
1359 
1360 	case NETDEV_UNREGISTER:
1361 		lock_sock(sk);
1362 
1363 		/* remove device specific receive entries */
1364 		list_for_each_entry(op, &bo->rx_ops, list)
1365 			if (op->rx_reg_dev == dev)
1366 				bcm_rx_unreg(dev, op);
1367 
1368 		/* remove device reference, if this is our bound device */
1369 		if (bo->bound && bo->ifindex == dev->ifindex) {
1370 			bo->bound   = 0;
1371 			bo->ifindex = 0;
1372 			notify_enodev = 1;
1373 		}
1374 
1375 		release_sock(sk);
1376 
1377 		if (notify_enodev) {
1378 			sk->sk_err = ENODEV;
1379 			if (!sock_flag(sk, SOCK_DEAD))
1380 				sk->sk_error_report(sk);
1381 		}
1382 		break;
1383 
1384 	case NETDEV_DOWN:
1385 		if (bo->bound && bo->ifindex == dev->ifindex) {
1386 			sk->sk_err = ENETDOWN;
1387 			if (!sock_flag(sk, SOCK_DEAD))
1388 				sk->sk_error_report(sk);
1389 		}
1390 	}
1391 
1392 	return NOTIFY_DONE;
1393 }
1394 
1395 /*
1396  * initial settings for all BCM sockets to be set at socket creation time
1397  */
1398 static int bcm_init(struct sock *sk)
1399 {
1400 	struct bcm_sock *bo = bcm_sk(sk);
1401 
1402 	bo->bound            = 0;
1403 	bo->ifindex          = 0;
1404 	bo->dropped_usr_msgs = 0;
1405 	bo->bcm_proc_read    = NULL;
1406 
1407 	INIT_LIST_HEAD(&bo->tx_ops);
1408 	INIT_LIST_HEAD(&bo->rx_ops);
1409 
1410 	/* set notifier */
1411 	bo->notifier.notifier_call = bcm_notifier;
1412 
1413 	register_netdevice_notifier(&bo->notifier);
1414 
1415 	return 0;
1416 }
1417 
1418 /*
1419  * standard socket functions
1420  */
1421 static int bcm_release(struct socket *sock)
1422 {
1423 	struct sock *sk = sock->sk;
1424 	struct bcm_sock *bo;
1425 	struct bcm_op *op, *next;
1426 
1427 	if (sk == NULL)
1428 		return 0;
1429 
1430 	bo = bcm_sk(sk);
1431 
1432 	/* remove bcm_ops, timer, rx_unregister(), etc. */
1433 
1434 	unregister_netdevice_notifier(&bo->notifier);
1435 
1436 	lock_sock(sk);
1437 
1438 	list_for_each_entry_safe(op, next, &bo->tx_ops, list)
1439 		bcm_remove_op(op);
1440 
1441 	list_for_each_entry_safe(op, next, &bo->rx_ops, list) {
1442 		/*
1443 		 * Don't care if we're bound or not (due to netdev problems)
1444 		 * can_rx_unregister() is always a save thing to do here.
1445 		 */
1446 		if (op->ifindex) {
1447 			/*
1448 			 * Only remove subscriptions that had not
1449 			 * been removed due to NETDEV_UNREGISTER
1450 			 * in bcm_notifier()
1451 			 */
1452 			if (op->rx_reg_dev) {
1453 				struct net_device *dev;
1454 
1455 				dev = dev_get_by_index(&init_net, op->ifindex);
1456 				if (dev) {
1457 					bcm_rx_unreg(dev, op);
1458 					dev_put(dev);
1459 				}
1460 			}
1461 		} else
1462 			can_rx_unregister(NULL, op->can_id,
1463 					  REGMASK(op->can_id),
1464 					  bcm_rx_handler, op);
1465 
1466 		bcm_remove_op(op);
1467 	}
1468 
1469 	/* remove procfs entry */
1470 	if (proc_dir && bo->bcm_proc_read)
1471 		remove_proc_entry(bo->procname, proc_dir);
1472 
1473 	/* remove device reference */
1474 	if (bo->bound) {
1475 		bo->bound   = 0;
1476 		bo->ifindex = 0;
1477 	}
1478 
1479 	sock_orphan(sk);
1480 	sock->sk = NULL;
1481 
1482 	release_sock(sk);
1483 	sock_put(sk);
1484 
1485 	return 0;
1486 }
1487 
1488 static int bcm_connect(struct socket *sock, struct sockaddr *uaddr, int len,
1489 		       int flags)
1490 {
1491 	struct sockaddr_can *addr = (struct sockaddr_can *)uaddr;
1492 	struct sock *sk = sock->sk;
1493 	struct bcm_sock *bo = bcm_sk(sk);
1494 
1495 	if (len < sizeof(*addr))
1496 		return -EINVAL;
1497 
1498 	if (bo->bound)
1499 		return -EISCONN;
1500 
1501 	/* bind a device to this socket */
1502 	if (addr->can_ifindex) {
1503 		struct net_device *dev;
1504 
1505 		dev = dev_get_by_index(&init_net, addr->can_ifindex);
1506 		if (!dev)
1507 			return -ENODEV;
1508 
1509 		if (dev->type != ARPHRD_CAN) {
1510 			dev_put(dev);
1511 			return -ENODEV;
1512 		}
1513 
1514 		bo->ifindex = dev->ifindex;
1515 		dev_put(dev);
1516 
1517 	} else {
1518 		/* no interface reference for ifindex = 0 ('any' CAN device) */
1519 		bo->ifindex = 0;
1520 	}
1521 
1522 	bo->bound = 1;
1523 
1524 	if (proc_dir) {
1525 		/* unique socket address as filename */
1526 		sprintf(bo->procname, "%lu", sock_i_ino(sk));
1527 		bo->bcm_proc_read = proc_create_data(bo->procname, 0644,
1528 						     proc_dir,
1529 						     &bcm_proc_fops, sk);
1530 	}
1531 
1532 	return 0;
1533 }
1534 
1535 static int bcm_recvmsg(struct kiocb *iocb, struct socket *sock,
1536 		       struct msghdr *msg, size_t size, int flags)
1537 {
1538 	struct sock *sk = sock->sk;
1539 	struct sk_buff *skb;
1540 	int error = 0;
1541 	int noblock;
1542 	int err;
1543 
1544 	noblock =  flags & MSG_DONTWAIT;
1545 	flags   &= ~MSG_DONTWAIT;
1546 	skb = skb_recv_datagram(sk, flags, noblock, &error);
1547 	if (!skb)
1548 		return error;
1549 
1550 	if (skb->len < size)
1551 		size = skb->len;
1552 
1553 	err = memcpy_toiovec(msg->msg_iov, skb->data, size);
1554 	if (err < 0) {
1555 		skb_free_datagram(sk, skb);
1556 		return err;
1557 	}
1558 
1559 	sock_recv_ts_and_drops(msg, sk, skb);
1560 
1561 	if (msg->msg_name) {
1562 		msg->msg_namelen = sizeof(struct sockaddr_can);
1563 		memcpy(msg->msg_name, skb->cb, msg->msg_namelen);
1564 	}
1565 
1566 	skb_free_datagram(sk, skb);
1567 
1568 	return size;
1569 }
1570 
1571 static const struct proto_ops bcm_ops = {
1572 	.family        = PF_CAN,
1573 	.release       = bcm_release,
1574 	.bind          = sock_no_bind,
1575 	.connect       = bcm_connect,
1576 	.socketpair    = sock_no_socketpair,
1577 	.accept        = sock_no_accept,
1578 	.getname       = sock_no_getname,
1579 	.poll          = datagram_poll,
1580 	.ioctl         = can_ioctl,	/* use can_ioctl() from af_can.c */
1581 	.listen        = sock_no_listen,
1582 	.shutdown      = sock_no_shutdown,
1583 	.setsockopt    = sock_no_setsockopt,
1584 	.getsockopt    = sock_no_getsockopt,
1585 	.sendmsg       = bcm_sendmsg,
1586 	.recvmsg       = bcm_recvmsg,
1587 	.mmap          = sock_no_mmap,
1588 	.sendpage      = sock_no_sendpage,
1589 };
1590 
1591 static struct proto bcm_proto __read_mostly = {
1592 	.name       = "CAN_BCM",
1593 	.owner      = THIS_MODULE,
1594 	.obj_size   = sizeof(struct bcm_sock),
1595 	.init       = bcm_init,
1596 };
1597 
1598 static const struct can_proto bcm_can_proto = {
1599 	.type       = SOCK_DGRAM,
1600 	.protocol   = CAN_BCM,
1601 	.ops        = &bcm_ops,
1602 	.prot       = &bcm_proto,
1603 };
1604 
1605 static int __init bcm_module_init(void)
1606 {
1607 	int err;
1608 
1609 	printk(banner);
1610 
1611 	err = can_proto_register(&bcm_can_proto);
1612 	if (err < 0) {
1613 		printk(KERN_ERR "can: registration of bcm protocol failed\n");
1614 		return err;
1615 	}
1616 
1617 	/* create /proc/net/can-bcm directory */
1618 	proc_dir = proc_mkdir("can-bcm", init_net.proc_net);
1619 	return 0;
1620 }
1621 
1622 static void __exit bcm_module_exit(void)
1623 {
1624 	can_proto_unregister(&bcm_can_proto);
1625 
1626 	if (proc_dir)
1627 		proc_net_remove(&init_net, "can-bcm");
1628 }
1629 
1630 module_init(bcm_module_init);
1631 module_exit(bcm_module_exit);
1632