xref: /linux/drivers/s390/net/ctcm_fsms.c (revision e9fb13bfec7e017130ddc5c1b5466340470f4900)
1 /*
2  * drivers/s390/net/ctcm_fsms.c
3  *
4  * Copyright IBM Corp. 2001, 2007
5  * Authors:	Fritz Elfert (felfert@millenux.com)
6  * 		Peter Tiedemann (ptiedem@de.ibm.com)
7  *	MPC additions :
8  *		Belinda Thompson (belindat@us.ibm.com)
9  *		Andy Richter (richtera@us.ibm.com)
10  */
11 
12 #undef DEBUG
13 #undef DEBUGDATA
14 #undef DEBUGCCW
15 
16 #define KMSG_COMPONENT "ctcm"
17 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
18 
19 #include <linux/module.h>
20 #include <linux/init.h>
21 #include <linux/kernel.h>
22 #include <linux/slab.h>
23 #include <linux/errno.h>
24 #include <linux/types.h>
25 #include <linux/interrupt.h>
26 #include <linux/timer.h>
27 #include <linux/bitops.h>
28 
29 #include <linux/signal.h>
30 #include <linux/string.h>
31 
32 #include <linux/ip.h>
33 #include <linux/if_arp.h>
34 #include <linux/tcp.h>
35 #include <linux/skbuff.h>
36 #include <linux/ctype.h>
37 #include <net/dst.h>
38 
39 #include <linux/io.h>
40 #include <asm/ccwdev.h>
41 #include <asm/ccwgroup.h>
42 #include <linux/uaccess.h>
43 
44 #include <asm/idals.h>
45 
46 #include "fsm.h"
47 
48 #include "ctcm_dbug.h"
49 #include "ctcm_main.h"
50 #include "ctcm_fsms.h"
51 
52 const char *dev_state_names[] = {
53 	[DEV_STATE_STOPPED]		= "Stopped",
54 	[DEV_STATE_STARTWAIT_RXTX]	= "StartWait RXTX",
55 	[DEV_STATE_STARTWAIT_RX]	= "StartWait RX",
56 	[DEV_STATE_STARTWAIT_TX]	= "StartWait TX",
57 	[DEV_STATE_STOPWAIT_RXTX]	= "StopWait RXTX",
58 	[DEV_STATE_STOPWAIT_RX]		= "StopWait RX",
59 	[DEV_STATE_STOPWAIT_TX]		= "StopWait TX",
60 	[DEV_STATE_RUNNING]		= "Running",
61 };
62 
63 const char *dev_event_names[] = {
64 	[DEV_EVENT_START]	= "Start",
65 	[DEV_EVENT_STOP]	= "Stop",
66 	[DEV_EVENT_RXUP]	= "RX up",
67 	[DEV_EVENT_TXUP]	= "TX up",
68 	[DEV_EVENT_RXDOWN]	= "RX down",
69 	[DEV_EVENT_TXDOWN]	= "TX down",
70 	[DEV_EVENT_RESTART]	= "Restart",
71 };
72 
73 const char *ctc_ch_event_names[] = {
74 	[CTC_EVENT_IO_SUCCESS]	= "ccw_device success",
75 	[CTC_EVENT_IO_EBUSY]	= "ccw_device busy",
76 	[CTC_EVENT_IO_ENODEV]	= "ccw_device enodev",
77 	[CTC_EVENT_IO_UNKNOWN]	= "ccw_device unknown",
78 	[CTC_EVENT_ATTNBUSY]	= "Status ATTN & BUSY",
79 	[CTC_EVENT_ATTN]	= "Status ATTN",
80 	[CTC_EVENT_BUSY]	= "Status BUSY",
81 	[CTC_EVENT_UC_RCRESET]	= "Unit check remote reset",
82 	[CTC_EVENT_UC_RSRESET]	= "Unit check remote system reset",
83 	[CTC_EVENT_UC_TXTIMEOUT] = "Unit check TX timeout",
84 	[CTC_EVENT_UC_TXPARITY]	= "Unit check TX parity",
85 	[CTC_EVENT_UC_HWFAIL]	= "Unit check Hardware failure",
86 	[CTC_EVENT_UC_RXPARITY]	= "Unit check RX parity",
87 	[CTC_EVENT_UC_ZERO]	= "Unit check ZERO",
88 	[CTC_EVENT_UC_UNKNOWN]	= "Unit check Unknown",
89 	[CTC_EVENT_SC_UNKNOWN]	= "SubChannel check Unknown",
90 	[CTC_EVENT_MC_FAIL]	= "Machine check failure",
91 	[CTC_EVENT_MC_GOOD]	= "Machine check operational",
92 	[CTC_EVENT_IRQ]		= "IRQ normal",
93 	[CTC_EVENT_FINSTAT]	= "IRQ final",
94 	[CTC_EVENT_TIMER]	= "Timer",
95 	[CTC_EVENT_START]	= "Start",
96 	[CTC_EVENT_STOP]	= "Stop",
97 	/*
98 	* additional MPC events
99 	*/
100 	[CTC_EVENT_SEND_XID]	= "XID Exchange",
101 	[CTC_EVENT_RSWEEP_TIMER] = "MPC Group Sweep Timer",
102 };
103 
104 const char *ctc_ch_state_names[] = {
105 	[CTC_STATE_IDLE]	= "Idle",
106 	[CTC_STATE_STOPPED]	= "Stopped",
107 	[CTC_STATE_STARTWAIT]	= "StartWait",
108 	[CTC_STATE_STARTRETRY]	= "StartRetry",
109 	[CTC_STATE_SETUPWAIT]	= "SetupWait",
110 	[CTC_STATE_RXINIT]	= "RX init",
111 	[CTC_STATE_TXINIT]	= "TX init",
112 	[CTC_STATE_RX]		= "RX",
113 	[CTC_STATE_TX]		= "TX",
114 	[CTC_STATE_RXIDLE]	= "RX idle",
115 	[CTC_STATE_TXIDLE]	= "TX idle",
116 	[CTC_STATE_RXERR]	= "RX error",
117 	[CTC_STATE_TXERR]	= "TX error",
118 	[CTC_STATE_TERM]	= "Terminating",
119 	[CTC_STATE_DTERM]	= "Restarting",
120 	[CTC_STATE_NOTOP]	= "Not operational",
121 	/*
122 	* additional MPC states
123 	*/
124 	[CH_XID0_PENDING]	= "Pending XID0 Start",
125 	[CH_XID0_INPROGRESS]	= "In XID0 Negotiations ",
126 	[CH_XID7_PENDING]	= "Pending XID7 P1 Start",
127 	[CH_XID7_PENDING1]	= "Active XID7 P1 Exchange ",
128 	[CH_XID7_PENDING2]	= "Pending XID7 P2 Start ",
129 	[CH_XID7_PENDING3]	= "Active XID7 P2 Exchange ",
130 	[CH_XID7_PENDING4]	= "XID7 Complete - Pending READY ",
131 };
132 
133 static void ctcm_action_nop(fsm_instance *fi, int event, void *arg);
134 
135 /*
136  * ----- static ctcm actions for channel statemachine -----
137  *
138 */
139 static void chx_txdone(fsm_instance *fi, int event, void *arg);
140 static void chx_rx(fsm_instance *fi, int event, void *arg);
141 static void chx_rxidle(fsm_instance *fi, int event, void *arg);
142 static void chx_firstio(fsm_instance *fi, int event, void *arg);
143 static void ctcm_chx_setmode(fsm_instance *fi, int event, void *arg);
144 static void ctcm_chx_start(fsm_instance *fi, int event, void *arg);
145 static void ctcm_chx_haltio(fsm_instance *fi, int event, void *arg);
146 static void ctcm_chx_stopped(fsm_instance *fi, int event, void *arg);
147 static void ctcm_chx_stop(fsm_instance *fi, int event, void *arg);
148 static void ctcm_chx_fail(fsm_instance *fi, int event, void *arg);
149 static void ctcm_chx_setuperr(fsm_instance *fi, int event, void *arg);
150 static void ctcm_chx_restart(fsm_instance *fi, int event, void *arg);
151 static void ctcm_chx_rxiniterr(fsm_instance *fi, int event, void *arg);
152 static void ctcm_chx_rxinitfail(fsm_instance *fi, int event, void *arg);
153 static void ctcm_chx_rxdisc(fsm_instance *fi, int event, void *arg);
154 static void ctcm_chx_txiniterr(fsm_instance *fi, int event, void *arg);
155 static void ctcm_chx_txretry(fsm_instance *fi, int event, void *arg);
156 static void ctcm_chx_iofatal(fsm_instance *fi, int event, void *arg);
157 
158 /*
159  * ----- static ctcmpc actions for ctcmpc channel statemachine -----
160  *
161 */
162 static void ctcmpc_chx_txdone(fsm_instance *fi, int event, void *arg);
163 static void ctcmpc_chx_rx(fsm_instance *fi, int event, void *arg);
164 static void ctcmpc_chx_firstio(fsm_instance *fi, int event, void *arg);
165 /* shared :
166 static void ctcm_chx_setmode(fsm_instance *fi, int event, void *arg);
167 static void ctcm_chx_start(fsm_instance *fi, int event, void *arg);
168 static void ctcm_chx_haltio(fsm_instance *fi, int event, void *arg);
169 static void ctcm_chx_stopped(fsm_instance *fi, int event, void *arg);
170 static void ctcm_chx_stop(fsm_instance *fi, int event, void *arg);
171 static void ctcm_chx_fail(fsm_instance *fi, int event, void *arg);
172 static void ctcm_chx_setuperr(fsm_instance *fi, int event, void *arg);
173 static void ctcm_chx_restart(fsm_instance *fi, int event, void *arg);
174 static void ctcm_chx_rxiniterr(fsm_instance *fi, int event, void *arg);
175 static void ctcm_chx_rxinitfail(fsm_instance *fi, int event, void *arg);
176 static void ctcm_chx_rxdisc(fsm_instance *fi, int event, void *arg);
177 static void ctcm_chx_txiniterr(fsm_instance *fi, int event, void *arg);
178 static void ctcm_chx_txretry(fsm_instance *fi, int event, void *arg);
179 static void ctcm_chx_iofatal(fsm_instance *fi, int event, void *arg);
180 */
181 static void ctcmpc_chx_attn(fsm_instance *fsm, int event, void *arg);
182 static void ctcmpc_chx_attnbusy(fsm_instance *, int, void *);
183 static void ctcmpc_chx_resend(fsm_instance *, int, void *);
184 static void ctcmpc_chx_send_sweep(fsm_instance *fsm, int event, void *arg);
185 
186 /**
187  * Check return code of a preceding ccw_device call, halt_IO etc...
188  *
189  * ch	:	The channel, the error belongs to.
190  * Returns the error code (!= 0) to inspect.
191  */
192 void ctcm_ccw_check_rc(struct channel *ch, int rc, char *msg)
193 {
194 	CTCM_DBF_TEXT_(ERROR, CTC_DBF_ERROR,
195 		"%s(%s): %s: %04x\n",
196 		CTCM_FUNTAIL, ch->id, msg, rc);
197 	switch (rc) {
198 	case -EBUSY:
199 		pr_info("%s: The communication peer is busy\n",
200 			ch->id);
201 		fsm_event(ch->fsm, CTC_EVENT_IO_EBUSY, ch);
202 		break;
203 	case -ENODEV:
204 		pr_err("%s: The specified target device is not valid\n",
205 		       ch->id);
206 		fsm_event(ch->fsm, CTC_EVENT_IO_ENODEV, ch);
207 		break;
208 	default:
209 		pr_err("An I/O operation resulted in error %04x\n",
210 		       rc);
211 		fsm_event(ch->fsm, CTC_EVENT_IO_UNKNOWN, ch);
212 	}
213 }
214 
215 void ctcm_purge_skb_queue(struct sk_buff_head *q)
216 {
217 	struct sk_buff *skb;
218 
219 	CTCM_DBF_TEXT(TRACE, CTC_DBF_DEBUG, __func__);
220 
221 	while ((skb = skb_dequeue(q))) {
222 		atomic_dec(&skb->users);
223 		dev_kfree_skb_any(skb);
224 	}
225 }
226 
227 /**
228  * NOP action for statemachines
229  */
230 static void ctcm_action_nop(fsm_instance *fi, int event, void *arg)
231 {
232 }
233 
234 /*
235  * Actions for channel - statemachines.
236  */
237 
238 /**
239  * Normal data has been send. Free the corresponding
240  * skb (it's in io_queue), reset dev->tbusy and
241  * revert to idle state.
242  *
243  * fi		An instance of a channel statemachine.
244  * event	The event, just happened.
245  * arg		Generic pointer, casted from channel * upon call.
246  */
247 static void chx_txdone(fsm_instance *fi, int event, void *arg)
248 {
249 	struct channel *ch = arg;
250 	struct net_device *dev = ch->netdev;
251 	struct ctcm_priv *priv = dev->ml_priv;
252 	struct sk_buff *skb;
253 	int first = 1;
254 	int i;
255 	unsigned long duration;
256 	struct timespec done_stamp = current_kernel_time(); /* xtime */
257 
258 	CTCM_PR_DEBUG("%s(%s): %s\n", __func__, ch->id, dev->name);
259 
260 	duration =
261 	    (done_stamp.tv_sec - ch->prof.send_stamp.tv_sec) * 1000000 +
262 	    (done_stamp.tv_nsec - ch->prof.send_stamp.tv_nsec) / 1000;
263 	if (duration > ch->prof.tx_time)
264 		ch->prof.tx_time = duration;
265 
266 	if (ch->irb->scsw.cmd.count != 0)
267 		CTCM_DBF_TEXT_(TRACE, CTC_DBF_DEBUG,
268 			"%s(%s): TX not complete, remaining %d bytes",
269 			     CTCM_FUNTAIL, dev->name, ch->irb->scsw.cmd.count);
270 	fsm_deltimer(&ch->timer);
271 	while ((skb = skb_dequeue(&ch->io_queue))) {
272 		priv->stats.tx_packets++;
273 		priv->stats.tx_bytes += skb->len - LL_HEADER_LENGTH;
274 		if (first) {
275 			priv->stats.tx_bytes += 2;
276 			first = 0;
277 		}
278 		atomic_dec(&skb->users);
279 		dev_kfree_skb_irq(skb);
280 	}
281 	spin_lock(&ch->collect_lock);
282 	clear_normalized_cda(&ch->ccw[4]);
283 	if (ch->collect_len > 0) {
284 		int rc;
285 
286 		if (ctcm_checkalloc_buffer(ch)) {
287 			spin_unlock(&ch->collect_lock);
288 			return;
289 		}
290 		ch->trans_skb->data = ch->trans_skb_data;
291 		skb_reset_tail_pointer(ch->trans_skb);
292 		ch->trans_skb->len = 0;
293 		if (ch->prof.maxmulti < (ch->collect_len + 2))
294 			ch->prof.maxmulti = ch->collect_len + 2;
295 		if (ch->prof.maxcqueue < skb_queue_len(&ch->collect_queue))
296 			ch->prof.maxcqueue = skb_queue_len(&ch->collect_queue);
297 		*((__u16 *)skb_put(ch->trans_skb, 2)) = ch->collect_len + 2;
298 		i = 0;
299 		while ((skb = skb_dequeue(&ch->collect_queue))) {
300 			skb_copy_from_linear_data(skb,
301 				skb_put(ch->trans_skb, skb->len), skb->len);
302 			priv->stats.tx_packets++;
303 			priv->stats.tx_bytes += skb->len - LL_HEADER_LENGTH;
304 			atomic_dec(&skb->users);
305 			dev_kfree_skb_irq(skb);
306 			i++;
307 		}
308 		ch->collect_len = 0;
309 		spin_unlock(&ch->collect_lock);
310 		ch->ccw[1].count = ch->trans_skb->len;
311 		fsm_addtimer(&ch->timer, CTCM_TIME_5_SEC, CTC_EVENT_TIMER, ch);
312 		ch->prof.send_stamp = current_kernel_time(); /* xtime */
313 		rc = ccw_device_start(ch->cdev, &ch->ccw[0],
314 						(unsigned long)ch, 0xff, 0);
315 		ch->prof.doios_multi++;
316 		if (rc != 0) {
317 			priv->stats.tx_dropped += i;
318 			priv->stats.tx_errors += i;
319 			fsm_deltimer(&ch->timer);
320 			ctcm_ccw_check_rc(ch, rc, "chained TX");
321 		}
322 	} else {
323 		spin_unlock(&ch->collect_lock);
324 		fsm_newstate(fi, CTC_STATE_TXIDLE);
325 	}
326 	ctcm_clear_busy_do(dev);
327 }
328 
329 /**
330  * Initial data is sent.
331  * Notify device statemachine that we are up and
332  * running.
333  *
334  * fi		An instance of a channel statemachine.
335  * event	The event, just happened.
336  * arg		Generic pointer, casted from channel * upon call.
337  */
338 void ctcm_chx_txidle(fsm_instance *fi, int event, void *arg)
339 {
340 	struct channel *ch = arg;
341 	struct net_device *dev = ch->netdev;
342 	struct ctcm_priv *priv = dev->ml_priv;
343 
344 	CTCM_PR_DEBUG("%s(%s): %s\n", __func__, ch->id, dev->name);
345 
346 	fsm_deltimer(&ch->timer);
347 	fsm_newstate(fi, CTC_STATE_TXIDLE);
348 	fsm_event(priv->fsm, DEV_EVENT_TXUP, ch->netdev);
349 }
350 
351 /**
352  * Got normal data, check for sanity, queue it up, allocate new buffer
353  * trigger bottom half, and initiate next read.
354  *
355  * fi		An instance of a channel statemachine.
356  * event	The event, just happened.
357  * arg		Generic pointer, casted from channel * upon call.
358  */
359 static void chx_rx(fsm_instance *fi, int event, void *arg)
360 {
361 	struct channel *ch = arg;
362 	struct net_device *dev = ch->netdev;
363 	struct ctcm_priv *priv = dev->ml_priv;
364 	int len = ch->max_bufsize - ch->irb->scsw.cmd.count;
365 	struct sk_buff *skb = ch->trans_skb;
366 	__u16 block_len = *((__u16 *)skb->data);
367 	int check_len;
368 	int rc;
369 
370 	fsm_deltimer(&ch->timer);
371 	if (len < 8) {
372 		CTCM_DBF_TEXT_(TRACE, CTC_DBF_NOTICE,
373 			"%s(%s): got packet with length %d < 8\n",
374 					CTCM_FUNTAIL, dev->name, len);
375 		priv->stats.rx_dropped++;
376 		priv->stats.rx_length_errors++;
377 						goto again;
378 	}
379 	if (len > ch->max_bufsize) {
380 		CTCM_DBF_TEXT_(TRACE, CTC_DBF_NOTICE,
381 			"%s(%s): got packet with length %d > %d\n",
382 				CTCM_FUNTAIL, dev->name, len, ch->max_bufsize);
383 		priv->stats.rx_dropped++;
384 		priv->stats.rx_length_errors++;
385 						goto again;
386 	}
387 
388 	/*
389 	 * VM TCP seems to have a bug sending 2 trailing bytes of garbage.
390 	 */
391 	switch (ch->protocol) {
392 	case CTCM_PROTO_S390:
393 	case CTCM_PROTO_OS390:
394 		check_len = block_len + 2;
395 		break;
396 	default:
397 		check_len = block_len;
398 		break;
399 	}
400 	if ((len < block_len) || (len > check_len)) {
401 		CTCM_DBF_TEXT_(TRACE, CTC_DBF_NOTICE,
402 			"%s(%s): got block length %d != rx length %d\n",
403 				CTCM_FUNTAIL, dev->name, block_len, len);
404 		if (do_debug)
405 			ctcmpc_dump_skb(skb, 0);
406 
407 		*((__u16 *)skb->data) = len;
408 		priv->stats.rx_dropped++;
409 		priv->stats.rx_length_errors++;
410 						goto again;
411 	}
412 	if (block_len > 2) {
413 		*((__u16 *)skb->data) = block_len - 2;
414 		ctcm_unpack_skb(ch, skb);
415 	}
416  again:
417 	skb->data = ch->trans_skb_data;
418 	skb_reset_tail_pointer(skb);
419 	skb->len = 0;
420 	if (ctcm_checkalloc_buffer(ch))
421 		return;
422 	ch->ccw[1].count = ch->max_bufsize;
423 	rc = ccw_device_start(ch->cdev, &ch->ccw[0],
424 					(unsigned long)ch, 0xff, 0);
425 	if (rc != 0)
426 		ctcm_ccw_check_rc(ch, rc, "normal RX");
427 }
428 
429 /**
430  * Initialize connection by sending a __u16 of value 0.
431  *
432  * fi		An instance of a channel statemachine.
433  * event	The event, just happened.
434  * arg		Generic pointer, casted from channel * upon call.
435  */
436 static void chx_firstio(fsm_instance *fi, int event, void *arg)
437 {
438 	int rc;
439 	struct channel *ch = arg;
440 	int fsmstate = fsm_getstate(fi);
441 
442 	CTCM_DBF_TEXT_(TRACE, CTC_DBF_NOTICE,
443 		"%s(%s) : %02x",
444 		CTCM_FUNTAIL, ch->id, fsmstate);
445 
446 	ch->sense_rc = 0;	/* reset unit check report control */
447 	if (fsmstate == CTC_STATE_TXIDLE)
448 		CTCM_DBF_TEXT_(TRACE, CTC_DBF_DEBUG,
449 			"%s(%s): remote side issued READ?, init.\n",
450 				CTCM_FUNTAIL, ch->id);
451 	fsm_deltimer(&ch->timer);
452 	if (ctcm_checkalloc_buffer(ch))
453 		return;
454 	if ((fsmstate == CTC_STATE_SETUPWAIT) &&
455 	    (ch->protocol == CTCM_PROTO_OS390)) {
456 		/* OS/390 resp. z/OS */
457 		if (CHANNEL_DIRECTION(ch->flags) == CTCM_READ) {
458 			*((__u16 *)ch->trans_skb->data) = CTCM_INITIAL_BLOCKLEN;
459 			fsm_addtimer(&ch->timer, CTCM_TIME_5_SEC,
460 				     CTC_EVENT_TIMER, ch);
461 			chx_rxidle(fi, event, arg);
462 		} else {
463 			struct net_device *dev = ch->netdev;
464 			struct ctcm_priv *priv = dev->ml_priv;
465 			fsm_newstate(fi, CTC_STATE_TXIDLE);
466 			fsm_event(priv->fsm, DEV_EVENT_TXUP, dev);
467 		}
468 		return;
469 	}
470 	/*
471 	 * Don't setup a timer for receiving the initial RX frame
472 	 * if in compatibility mode, since VM TCP delays the initial
473 	 * frame until it has some data to send.
474 	 */
475 	if ((CHANNEL_DIRECTION(ch->flags) == CTCM_WRITE) ||
476 	    (ch->protocol != CTCM_PROTO_S390))
477 		fsm_addtimer(&ch->timer, CTCM_TIME_5_SEC, CTC_EVENT_TIMER, ch);
478 
479 	*((__u16 *)ch->trans_skb->data) = CTCM_INITIAL_BLOCKLEN;
480 	ch->ccw[1].count = 2;	/* Transfer only length */
481 
482 	fsm_newstate(fi, (CHANNEL_DIRECTION(ch->flags) == CTCM_READ)
483 		     ? CTC_STATE_RXINIT : CTC_STATE_TXINIT);
484 	rc = ccw_device_start(ch->cdev, &ch->ccw[0],
485 					(unsigned long)ch, 0xff, 0);
486 	if (rc != 0) {
487 		fsm_deltimer(&ch->timer);
488 		fsm_newstate(fi, CTC_STATE_SETUPWAIT);
489 		ctcm_ccw_check_rc(ch, rc, "init IO");
490 	}
491 	/*
492 	 * If in compatibility mode since we don't setup a timer, we
493 	 * also signal RX channel up immediately. This enables us
494 	 * to send packets early which in turn usually triggers some
495 	 * reply from VM TCP which brings up the RX channel to it's
496 	 * final state.
497 	 */
498 	if ((CHANNEL_DIRECTION(ch->flags) == CTCM_READ) &&
499 	    (ch->protocol == CTCM_PROTO_S390)) {
500 		struct net_device *dev = ch->netdev;
501 		struct ctcm_priv *priv = dev->ml_priv;
502 		fsm_event(priv->fsm, DEV_EVENT_RXUP, dev);
503 	}
504 }
505 
506 /**
507  * Got initial data, check it. If OK,
508  * notify device statemachine that we are up and
509  * running.
510  *
511  * fi		An instance of a channel statemachine.
512  * event	The event, just happened.
513  * arg		Generic pointer, casted from channel * upon call.
514  */
515 static void chx_rxidle(fsm_instance *fi, int event, void *arg)
516 {
517 	struct channel *ch = arg;
518 	struct net_device *dev = ch->netdev;
519 	struct ctcm_priv *priv = dev->ml_priv;
520 	__u16 buflen;
521 	int rc;
522 
523 	fsm_deltimer(&ch->timer);
524 	buflen = *((__u16 *)ch->trans_skb->data);
525 	CTCM_PR_DEBUG("%s: %s: Initial RX count = %d\n",
526 			__func__, dev->name, buflen);
527 
528 	if (buflen >= CTCM_INITIAL_BLOCKLEN) {
529 		if (ctcm_checkalloc_buffer(ch))
530 			return;
531 		ch->ccw[1].count = ch->max_bufsize;
532 		fsm_newstate(fi, CTC_STATE_RXIDLE);
533 		rc = ccw_device_start(ch->cdev, &ch->ccw[0],
534 						(unsigned long)ch, 0xff, 0);
535 		if (rc != 0) {
536 			fsm_newstate(fi, CTC_STATE_RXINIT);
537 			ctcm_ccw_check_rc(ch, rc, "initial RX");
538 		} else
539 			fsm_event(priv->fsm, DEV_EVENT_RXUP, dev);
540 	} else {
541 		CTCM_PR_DEBUG("%s: %s: Initial RX count %d not %d\n",
542 				__func__, dev->name,
543 					buflen, CTCM_INITIAL_BLOCKLEN);
544 		chx_firstio(fi, event, arg);
545 	}
546 }
547 
548 /**
549  * Set channel into extended mode.
550  *
551  * fi		An instance of a channel statemachine.
552  * event	The event, just happened.
553  * arg		Generic pointer, casted from channel * upon call.
554  */
555 static void ctcm_chx_setmode(fsm_instance *fi, int event, void *arg)
556 {
557 	struct channel *ch = arg;
558 	int rc;
559 	unsigned long saveflags = 0;
560 	int timeout = CTCM_TIME_5_SEC;
561 
562 	fsm_deltimer(&ch->timer);
563 	if (IS_MPC(ch)) {
564 		timeout = 1500;
565 		CTCM_PR_DEBUG("enter %s: cp=%i ch=0x%p id=%s\n",
566 				__func__, smp_processor_id(), ch, ch->id);
567 	}
568 	fsm_addtimer(&ch->timer, timeout, CTC_EVENT_TIMER, ch);
569 	fsm_newstate(fi, CTC_STATE_SETUPWAIT);
570 	CTCM_CCW_DUMP((char *)&ch->ccw[6], sizeof(struct ccw1) * 2);
571 
572 	if (event == CTC_EVENT_TIMER)	/* only for timer not yet locked */
573 		spin_lock_irqsave(get_ccwdev_lock(ch->cdev), saveflags);
574 			/* Such conditional locking is undeterministic in
575 			 * static view. => ignore sparse warnings here. */
576 
577 	rc = ccw_device_start(ch->cdev, &ch->ccw[6],
578 					(unsigned long)ch, 0xff, 0);
579 	if (event == CTC_EVENT_TIMER)	/* see above comments */
580 		spin_unlock_irqrestore(get_ccwdev_lock(ch->cdev), saveflags);
581 	if (rc != 0) {
582 		fsm_deltimer(&ch->timer);
583 		fsm_newstate(fi, CTC_STATE_STARTWAIT);
584 		ctcm_ccw_check_rc(ch, rc, "set Mode");
585 	} else
586 		ch->retry = 0;
587 }
588 
589 /**
590  * Setup channel.
591  *
592  * fi		An instance of a channel statemachine.
593  * event	The event, just happened.
594  * arg		Generic pointer, casted from channel * upon call.
595  */
596 static void ctcm_chx_start(fsm_instance *fi, int event, void *arg)
597 {
598 	struct channel *ch	= arg;
599 	unsigned long saveflags;
600 	int rc;
601 
602 	CTCM_DBF_TEXT_(SETUP, CTC_DBF_INFO, "%s(%s): %s",
603 		CTCM_FUNTAIL, ch->id,
604 		(CHANNEL_DIRECTION(ch->flags) == CTCM_READ) ? "RX" : "TX");
605 
606 	if (ch->trans_skb != NULL) {
607 		clear_normalized_cda(&ch->ccw[1]);
608 		dev_kfree_skb(ch->trans_skb);
609 		ch->trans_skb = NULL;
610 	}
611 	if (CHANNEL_DIRECTION(ch->flags) == CTCM_READ) {
612 		ch->ccw[1].cmd_code = CCW_CMD_READ;
613 		ch->ccw[1].flags = CCW_FLAG_SLI;
614 		ch->ccw[1].count = 0;
615 	} else {
616 		ch->ccw[1].cmd_code = CCW_CMD_WRITE;
617 		ch->ccw[1].flags = CCW_FLAG_SLI | CCW_FLAG_CC;
618 		ch->ccw[1].count = 0;
619 	}
620 	if (ctcm_checkalloc_buffer(ch)) {
621 		CTCM_DBF_TEXT_(TRACE, CTC_DBF_DEBUG,
622 			"%s(%s): %s trans_skb alloc delayed "
623 			"until first transfer",
624 			CTCM_FUNTAIL, ch->id,
625 			(CHANNEL_DIRECTION(ch->flags) == CTCM_READ) ?
626 				"RX" : "TX");
627 	}
628 	ch->ccw[0].cmd_code = CCW_CMD_PREPARE;
629 	ch->ccw[0].flags = CCW_FLAG_SLI | CCW_FLAG_CC;
630 	ch->ccw[0].count = 0;
631 	ch->ccw[0].cda = 0;
632 	ch->ccw[2].cmd_code = CCW_CMD_NOOP;	/* jointed CE + DE */
633 	ch->ccw[2].flags = CCW_FLAG_SLI;
634 	ch->ccw[2].count = 0;
635 	ch->ccw[2].cda = 0;
636 	memcpy(&ch->ccw[3], &ch->ccw[0], sizeof(struct ccw1) * 3);
637 	ch->ccw[4].cda = 0;
638 	ch->ccw[4].flags &= ~CCW_FLAG_IDA;
639 
640 	fsm_newstate(fi, CTC_STATE_STARTWAIT);
641 	fsm_addtimer(&ch->timer, 1000, CTC_EVENT_TIMER, ch);
642 	spin_lock_irqsave(get_ccwdev_lock(ch->cdev), saveflags);
643 	rc = ccw_device_halt(ch->cdev, (unsigned long)ch);
644 	spin_unlock_irqrestore(get_ccwdev_lock(ch->cdev), saveflags);
645 	if (rc != 0) {
646 		if (rc != -EBUSY)
647 			fsm_deltimer(&ch->timer);
648 		ctcm_ccw_check_rc(ch, rc, "initial HaltIO");
649 	}
650 }
651 
652 /**
653  * Shutdown a channel.
654  *
655  * fi		An instance of a channel statemachine.
656  * event	The event, just happened.
657  * arg		Generic pointer, casted from channel * upon call.
658  */
659 static void ctcm_chx_haltio(fsm_instance *fi, int event, void *arg)
660 {
661 	struct channel *ch = arg;
662 	unsigned long saveflags = 0;
663 	int rc;
664 	int oldstate;
665 
666 	fsm_deltimer(&ch->timer);
667 	if (IS_MPC(ch))
668 		fsm_deltimer(&ch->sweep_timer);
669 
670 	fsm_addtimer(&ch->timer, CTCM_TIME_5_SEC, CTC_EVENT_TIMER, ch);
671 
672 	if (event == CTC_EVENT_STOP)	/* only for STOP not yet locked */
673 		spin_lock_irqsave(get_ccwdev_lock(ch->cdev), saveflags);
674 			/* Such conditional locking is undeterministic in
675 			 * static view. => ignore sparse warnings here. */
676 	oldstate = fsm_getstate(fi);
677 	fsm_newstate(fi, CTC_STATE_TERM);
678 	rc = ccw_device_halt(ch->cdev, (unsigned long)ch);
679 
680 	if (event == CTC_EVENT_STOP)
681 		spin_unlock_irqrestore(get_ccwdev_lock(ch->cdev), saveflags);
682 			/* see remark above about conditional locking */
683 
684 	if (rc != 0 && rc != -EBUSY) {
685 		fsm_deltimer(&ch->timer);
686 		if (event != CTC_EVENT_STOP) {
687 			fsm_newstate(fi, oldstate);
688 			ctcm_ccw_check_rc(ch, rc, (char *)__func__);
689 		}
690 	}
691 }
692 
693 /**
694  * Cleanup helper for chx_fail and chx_stopped
695  * cleanup channels queue and notify interface statemachine.
696  *
697  * fi		An instance of a channel statemachine.
698  * state	The next state (depending on caller).
699  * ch		The channel to operate on.
700  */
701 static void ctcm_chx_cleanup(fsm_instance *fi, int state,
702 		struct channel *ch)
703 {
704 	struct net_device *dev = ch->netdev;
705 	struct ctcm_priv *priv = dev->ml_priv;
706 
707 	CTCM_DBF_TEXT_(SETUP, CTC_DBF_NOTICE,
708 			"%s(%s): %s[%d]\n",
709 			CTCM_FUNTAIL, dev->name, ch->id, state);
710 
711 	fsm_deltimer(&ch->timer);
712 	if (IS_MPC(ch))
713 		fsm_deltimer(&ch->sweep_timer);
714 
715 	fsm_newstate(fi, state);
716 	if (state == CTC_STATE_STOPPED && ch->trans_skb != NULL) {
717 		clear_normalized_cda(&ch->ccw[1]);
718 		dev_kfree_skb_any(ch->trans_skb);
719 		ch->trans_skb = NULL;
720 	}
721 
722 	ch->th_seg = 0x00;
723 	ch->th_seq_num = 0x00;
724 	if (CHANNEL_DIRECTION(ch->flags) == CTCM_READ) {
725 		skb_queue_purge(&ch->io_queue);
726 		fsm_event(priv->fsm, DEV_EVENT_RXDOWN, dev);
727 	} else {
728 		ctcm_purge_skb_queue(&ch->io_queue);
729 		if (IS_MPC(ch))
730 			ctcm_purge_skb_queue(&ch->sweep_queue);
731 		spin_lock(&ch->collect_lock);
732 		ctcm_purge_skb_queue(&ch->collect_queue);
733 		ch->collect_len = 0;
734 		spin_unlock(&ch->collect_lock);
735 		fsm_event(priv->fsm, DEV_EVENT_TXDOWN, dev);
736 	}
737 }
738 
739 /**
740  * A channel has successfully been halted.
741  * Cleanup it's queue and notify interface statemachine.
742  *
743  * fi		An instance of a channel statemachine.
744  * event	The event, just happened.
745  * arg		Generic pointer, casted from channel * upon call.
746  */
747 static void ctcm_chx_stopped(fsm_instance *fi, int event, void *arg)
748 {
749 	ctcm_chx_cleanup(fi, CTC_STATE_STOPPED, arg);
750 }
751 
752 /**
753  * A stop command from device statemachine arrived and we are in
754  * not operational mode. Set state to stopped.
755  *
756  * fi		An instance of a channel statemachine.
757  * event	The event, just happened.
758  * arg		Generic pointer, casted from channel * upon call.
759  */
760 static void ctcm_chx_stop(fsm_instance *fi, int event, void *arg)
761 {
762 	fsm_newstate(fi, CTC_STATE_STOPPED);
763 }
764 
765 /**
766  * A machine check for no path, not operational status or gone device has
767  * happened.
768  * Cleanup queue and notify interface statemachine.
769  *
770  * fi		An instance of a channel statemachine.
771  * event	The event, just happened.
772  * arg		Generic pointer, casted from channel * upon call.
773  */
774 static void ctcm_chx_fail(fsm_instance *fi, int event, void *arg)
775 {
776 	ctcm_chx_cleanup(fi, CTC_STATE_NOTOP, arg);
777 }
778 
779 /**
780  * Handle error during setup of channel.
781  *
782  * fi		An instance of a channel statemachine.
783  * event	The event, just happened.
784  * arg		Generic pointer, casted from channel * upon call.
785  */
786 static void ctcm_chx_setuperr(fsm_instance *fi, int event, void *arg)
787 {
788 	struct channel *ch = arg;
789 	struct net_device *dev = ch->netdev;
790 	struct ctcm_priv *priv = dev->ml_priv;
791 
792 	/*
793 	 * Special case: Got UC_RCRESET on setmode.
794 	 * This means that remote side isn't setup. In this case
795 	 * simply retry after some 10 secs...
796 	 */
797 	if ((fsm_getstate(fi) == CTC_STATE_SETUPWAIT) &&
798 	    ((event == CTC_EVENT_UC_RCRESET) ||
799 	     (event == CTC_EVENT_UC_RSRESET))) {
800 		fsm_newstate(fi, CTC_STATE_STARTRETRY);
801 		fsm_deltimer(&ch->timer);
802 		fsm_addtimer(&ch->timer, CTCM_TIME_5_SEC, CTC_EVENT_TIMER, ch);
803 		if (!IS_MPC(ch) &&
804 		    (CHANNEL_DIRECTION(ch->flags) == CTCM_READ)) {
805 			int rc = ccw_device_halt(ch->cdev, (unsigned long)ch);
806 			if (rc != 0)
807 				ctcm_ccw_check_rc(ch, rc,
808 					"HaltIO in chx_setuperr");
809 		}
810 		return;
811 	}
812 
813 	CTCM_DBF_TEXT_(ERROR, CTC_DBF_CRIT,
814 		"%s(%s) : %s error during %s channel setup state=%s\n",
815 		CTCM_FUNTAIL, dev->name, ctc_ch_event_names[event],
816 		(CHANNEL_DIRECTION(ch->flags) == CTCM_READ) ? "RX" : "TX",
817 		fsm_getstate_str(fi));
818 
819 	if (CHANNEL_DIRECTION(ch->flags) == CTCM_READ) {
820 		fsm_newstate(fi, CTC_STATE_RXERR);
821 		fsm_event(priv->fsm, DEV_EVENT_RXDOWN, dev);
822 	} else {
823 		fsm_newstate(fi, CTC_STATE_TXERR);
824 		fsm_event(priv->fsm, DEV_EVENT_TXDOWN, dev);
825 	}
826 }
827 
828 /**
829  * Restart a channel after an error.
830  *
831  * fi		An instance of a channel statemachine.
832  * event	The event, just happened.
833  * arg		Generic pointer, casted from channel * upon call.
834  */
835 static void ctcm_chx_restart(fsm_instance *fi, int event, void *arg)
836 {
837 	struct channel *ch = arg;
838 	struct net_device *dev = ch->netdev;
839 	unsigned long saveflags = 0;
840 	int oldstate;
841 	int rc;
842 
843 	CTCM_DBF_TEXT_(TRACE, CTC_DBF_NOTICE,
844 		"%s: %s[%d] of %s\n",
845 			CTCM_FUNTAIL, ch->id, event, dev->name);
846 
847 	fsm_deltimer(&ch->timer);
848 
849 	fsm_addtimer(&ch->timer, CTCM_TIME_5_SEC, CTC_EVENT_TIMER, ch);
850 	oldstate = fsm_getstate(fi);
851 	fsm_newstate(fi, CTC_STATE_STARTWAIT);
852 	if (event == CTC_EVENT_TIMER)	/* only for timer not yet locked */
853 		spin_lock_irqsave(get_ccwdev_lock(ch->cdev), saveflags);
854 			/* Such conditional locking is a known problem for
855 			 * sparse because its undeterministic in static view.
856 			 * Warnings should be ignored here. */
857 	rc = ccw_device_halt(ch->cdev, (unsigned long)ch);
858 	if (event == CTC_EVENT_TIMER)
859 		spin_unlock_irqrestore(get_ccwdev_lock(ch->cdev), saveflags);
860 	if (rc != 0) {
861 		if (rc != -EBUSY) {
862 		    fsm_deltimer(&ch->timer);
863 		    fsm_newstate(fi, oldstate);
864 		}
865 		ctcm_ccw_check_rc(ch, rc, "HaltIO in ctcm_chx_restart");
866 	}
867 }
868 
869 /**
870  * Handle error during RX initial handshake (exchange of
871  * 0-length block header)
872  *
873  * fi		An instance of a channel statemachine.
874  * event	The event, just happened.
875  * arg		Generic pointer, casted from channel * upon call.
876  */
877 static void ctcm_chx_rxiniterr(fsm_instance *fi, int event, void *arg)
878 {
879 	struct channel *ch = arg;
880 	struct net_device *dev = ch->netdev;
881 	struct ctcm_priv *priv = dev->ml_priv;
882 
883 	if (event == CTC_EVENT_TIMER) {
884 		if (!IS_MPCDEV(dev))
885 			/* TODO : check if MPC deletes timer somewhere */
886 			fsm_deltimer(&ch->timer);
887 		if (ch->retry++ < 3)
888 			ctcm_chx_restart(fi, event, arg);
889 		else {
890 			fsm_newstate(fi, CTC_STATE_RXERR);
891 			fsm_event(priv->fsm, DEV_EVENT_RXDOWN, dev);
892 		}
893 	} else {
894 		CTCM_DBF_TEXT_(ERROR, CTC_DBF_ERROR,
895 			"%s(%s): %s in %s", CTCM_FUNTAIL, ch->id,
896 			ctc_ch_event_names[event], fsm_getstate_str(fi));
897 
898 		dev_warn(&dev->dev,
899 			"Initialization failed with RX/TX init handshake "
900 			"error %s\n", ctc_ch_event_names[event]);
901 	}
902 }
903 
904 /**
905  * Notify device statemachine if we gave up initialization
906  * of RX channel.
907  *
908  * fi		An instance of a channel statemachine.
909  * event	The event, just happened.
910  * arg		Generic pointer, casted from channel * upon call.
911  */
912 static void ctcm_chx_rxinitfail(fsm_instance *fi, int event, void *arg)
913 {
914 	struct channel *ch = arg;
915 	struct net_device *dev = ch->netdev;
916 	struct ctcm_priv *priv = dev->ml_priv;
917 
918 	CTCM_DBF_TEXT_(ERROR, CTC_DBF_ERROR,
919 			"%s(%s): RX %s busy, init. fail",
920 				CTCM_FUNTAIL, dev->name, ch->id);
921 	fsm_newstate(fi, CTC_STATE_RXERR);
922 	fsm_event(priv->fsm, DEV_EVENT_RXDOWN, dev);
923 }
924 
925 /**
926  * Handle RX Unit check remote reset (remote disconnected)
927  *
928  * fi		An instance of a channel statemachine.
929  * event	The event, just happened.
930  * arg		Generic pointer, casted from channel * upon call.
931  */
932 static void ctcm_chx_rxdisc(fsm_instance *fi, int event, void *arg)
933 {
934 	struct channel *ch = arg;
935 	struct channel *ch2;
936 	struct net_device *dev = ch->netdev;
937 	struct ctcm_priv *priv = dev->ml_priv;
938 
939 	CTCM_DBF_TEXT_(TRACE, CTC_DBF_NOTICE,
940 			"%s: %s: remote disconnect - re-init ...",
941 				CTCM_FUNTAIL, dev->name);
942 	fsm_deltimer(&ch->timer);
943 	/*
944 	 * Notify device statemachine
945 	 */
946 	fsm_event(priv->fsm, DEV_EVENT_RXDOWN, dev);
947 	fsm_event(priv->fsm, DEV_EVENT_TXDOWN, dev);
948 
949 	fsm_newstate(fi, CTC_STATE_DTERM);
950 	ch2 = priv->channel[CTCM_WRITE];
951 	fsm_newstate(ch2->fsm, CTC_STATE_DTERM);
952 
953 	ccw_device_halt(ch->cdev, (unsigned long)ch);
954 	ccw_device_halt(ch2->cdev, (unsigned long)ch2);
955 }
956 
957 /**
958  * Handle error during TX channel initialization.
959  *
960  * fi		An instance of a channel statemachine.
961  * event	The event, just happened.
962  * arg		Generic pointer, casted from channel * upon call.
963  */
964 static void ctcm_chx_txiniterr(fsm_instance *fi, int event, void *arg)
965 {
966 	struct channel *ch = arg;
967 	struct net_device *dev = ch->netdev;
968 	struct ctcm_priv *priv = dev->ml_priv;
969 
970 	if (event == CTC_EVENT_TIMER) {
971 		fsm_deltimer(&ch->timer);
972 		if (ch->retry++ < 3)
973 			ctcm_chx_restart(fi, event, arg);
974 		else {
975 			fsm_newstate(fi, CTC_STATE_TXERR);
976 			fsm_event(priv->fsm, DEV_EVENT_TXDOWN, dev);
977 		}
978 	} else {
979 		CTCM_DBF_TEXT_(ERROR, CTC_DBF_ERROR,
980 			"%s(%s): %s in %s", CTCM_FUNTAIL, ch->id,
981 			ctc_ch_event_names[event], fsm_getstate_str(fi));
982 
983 		dev_warn(&dev->dev,
984 			"Initialization failed with RX/TX init handshake "
985 			"error %s\n", ctc_ch_event_names[event]);
986 	}
987 }
988 
989 /**
990  * Handle TX timeout by retrying operation.
991  *
992  * fi		An instance of a channel statemachine.
993  * event	The event, just happened.
994  * arg		Generic pointer, casted from channel * upon call.
995  */
996 static void ctcm_chx_txretry(fsm_instance *fi, int event, void *arg)
997 {
998 	struct channel *ch = arg;
999 	struct net_device *dev = ch->netdev;
1000 	struct ctcm_priv *priv = dev->ml_priv;
1001 	struct sk_buff *skb;
1002 
1003 	CTCM_PR_DEBUG("Enter: %s: cp=%i ch=0x%p id=%s\n",
1004 			__func__, smp_processor_id(), ch, ch->id);
1005 
1006 	fsm_deltimer(&ch->timer);
1007 	if (ch->retry++ > 3) {
1008 		struct mpc_group *gptr = priv->mpcg;
1009 		CTCM_DBF_TEXT_(TRACE, CTC_DBF_INFO,
1010 				"%s: %s: retries exceeded",
1011 					CTCM_FUNTAIL, ch->id);
1012 		fsm_event(priv->fsm, DEV_EVENT_TXDOWN, dev);
1013 		/* call restart if not MPC or if MPC and mpcg fsm is ready.
1014 			use gptr as mpc indicator */
1015 		if (!(gptr && (fsm_getstate(gptr->fsm) != MPCG_STATE_READY)))
1016 			ctcm_chx_restart(fi, event, arg);
1017 				goto done;
1018 	}
1019 
1020 	CTCM_DBF_TEXT_(TRACE, CTC_DBF_DEBUG,
1021 			"%s : %s: retry %d",
1022 				CTCM_FUNTAIL, ch->id, ch->retry);
1023 	skb = skb_peek(&ch->io_queue);
1024 	if (skb) {
1025 		int rc = 0;
1026 		unsigned long saveflags = 0;
1027 		clear_normalized_cda(&ch->ccw[4]);
1028 		ch->ccw[4].count = skb->len;
1029 		if (set_normalized_cda(&ch->ccw[4], skb->data)) {
1030 			CTCM_DBF_TEXT_(TRACE, CTC_DBF_INFO,
1031 				"%s: %s: IDAL alloc failed",
1032 						CTCM_FUNTAIL, ch->id);
1033 			fsm_event(priv->fsm, DEV_EVENT_TXDOWN, dev);
1034 			ctcm_chx_restart(fi, event, arg);
1035 				goto done;
1036 		}
1037 		fsm_addtimer(&ch->timer, 1000, CTC_EVENT_TIMER, ch);
1038 		if (event == CTC_EVENT_TIMER) /* for TIMER not yet locked */
1039 			spin_lock_irqsave(get_ccwdev_lock(ch->cdev), saveflags);
1040 			/* Such conditional locking is a known problem for
1041 			 * sparse because its undeterministic in static view.
1042 			 * Warnings should be ignored here. */
1043 		if (do_debug_ccw)
1044 			ctcmpc_dumpit((char *)&ch->ccw[3],
1045 					sizeof(struct ccw1) * 3);
1046 
1047 		rc = ccw_device_start(ch->cdev, &ch->ccw[3],
1048 						(unsigned long)ch, 0xff, 0);
1049 		if (event == CTC_EVENT_TIMER)
1050 			spin_unlock_irqrestore(get_ccwdev_lock(ch->cdev),
1051 					saveflags);
1052 		if (rc != 0) {
1053 			fsm_deltimer(&ch->timer);
1054 			ctcm_ccw_check_rc(ch, rc, "TX in chx_txretry");
1055 			ctcm_purge_skb_queue(&ch->io_queue);
1056 		}
1057 	}
1058 done:
1059 	return;
1060 }
1061 
1062 /**
1063  * Handle fatal errors during an I/O command.
1064  *
1065  * fi		An instance of a channel statemachine.
1066  * event	The event, just happened.
1067  * arg		Generic pointer, casted from channel * upon call.
1068  */
1069 static void ctcm_chx_iofatal(fsm_instance *fi, int event, void *arg)
1070 {
1071 	struct channel *ch = arg;
1072 	struct net_device *dev = ch->netdev;
1073 	struct ctcm_priv *priv = dev->ml_priv;
1074 	int rd = CHANNEL_DIRECTION(ch->flags);
1075 
1076 	fsm_deltimer(&ch->timer);
1077 	CTCM_DBF_TEXT_(ERROR, CTC_DBF_ERROR,
1078 		"%s: %s: %s unrecoverable channel error",
1079 			CTCM_FUNTAIL, ch->id, rd == CTCM_READ ? "RX" : "TX");
1080 
1081 	if (IS_MPC(ch)) {
1082 		priv->stats.tx_dropped++;
1083 		priv->stats.tx_errors++;
1084 	}
1085 	if (rd == CTCM_READ) {
1086 		fsm_newstate(fi, CTC_STATE_RXERR);
1087 		fsm_event(priv->fsm, DEV_EVENT_RXDOWN, dev);
1088 	} else {
1089 		fsm_newstate(fi, CTC_STATE_TXERR);
1090 		fsm_event(priv->fsm, DEV_EVENT_TXDOWN, dev);
1091 	}
1092 }
1093 
1094 /*
1095  * The ctcm statemachine for a channel.
1096  */
1097 const fsm_node ch_fsm[] = {
1098 	{ CTC_STATE_STOPPED,	CTC_EVENT_STOP,		ctcm_action_nop  },
1099 	{ CTC_STATE_STOPPED,	CTC_EVENT_START,	ctcm_chx_start  },
1100 	{ CTC_STATE_STOPPED,	CTC_EVENT_FINSTAT,	ctcm_action_nop  },
1101 	{ CTC_STATE_STOPPED,	CTC_EVENT_MC_FAIL,	ctcm_action_nop  },
1102 
1103 	{ CTC_STATE_NOTOP,	CTC_EVENT_STOP,		ctcm_chx_stop  },
1104 	{ CTC_STATE_NOTOP,	CTC_EVENT_START,	ctcm_action_nop  },
1105 	{ CTC_STATE_NOTOP,	CTC_EVENT_FINSTAT,	ctcm_action_nop  },
1106 	{ CTC_STATE_NOTOP,	CTC_EVENT_MC_FAIL,	ctcm_action_nop  },
1107 	{ CTC_STATE_NOTOP,	CTC_EVENT_MC_GOOD,	ctcm_chx_start  },
1108 
1109 	{ CTC_STATE_STARTWAIT,	CTC_EVENT_STOP,		ctcm_chx_haltio  },
1110 	{ CTC_STATE_STARTWAIT,	CTC_EVENT_START,	ctcm_action_nop  },
1111 	{ CTC_STATE_STARTWAIT,	CTC_EVENT_FINSTAT,	ctcm_chx_setmode  },
1112 	{ CTC_STATE_STARTWAIT,	CTC_EVENT_TIMER,	ctcm_chx_setuperr  },
1113 	{ CTC_STATE_STARTWAIT,	CTC_EVENT_IO_ENODEV,	ctcm_chx_iofatal  },
1114 	{ CTC_STATE_STARTWAIT,	CTC_EVENT_MC_FAIL,	ctcm_chx_fail  },
1115 
1116 	{ CTC_STATE_STARTRETRY,	CTC_EVENT_STOP,		ctcm_chx_haltio  },
1117 	{ CTC_STATE_STARTRETRY,	CTC_EVENT_TIMER,	ctcm_chx_setmode  },
1118 	{ CTC_STATE_STARTRETRY,	CTC_EVENT_FINSTAT,	ctcm_action_nop  },
1119 	{ CTC_STATE_STARTRETRY,	CTC_EVENT_MC_FAIL,	ctcm_chx_fail  },
1120 
1121 	{ CTC_STATE_SETUPWAIT,	CTC_EVENT_STOP,		ctcm_chx_haltio  },
1122 	{ CTC_STATE_SETUPWAIT,	CTC_EVENT_START,	ctcm_action_nop  },
1123 	{ CTC_STATE_SETUPWAIT,	CTC_EVENT_FINSTAT,	chx_firstio  },
1124 	{ CTC_STATE_SETUPWAIT,	CTC_EVENT_UC_RCRESET,	ctcm_chx_setuperr  },
1125 	{ CTC_STATE_SETUPWAIT,	CTC_EVENT_UC_RSRESET,	ctcm_chx_setuperr  },
1126 	{ CTC_STATE_SETUPWAIT,	CTC_EVENT_TIMER,	ctcm_chx_setmode  },
1127 	{ CTC_STATE_SETUPWAIT,	CTC_EVENT_IO_ENODEV,	ctcm_chx_iofatal  },
1128 	{ CTC_STATE_SETUPWAIT,	CTC_EVENT_MC_FAIL,	ctcm_chx_fail  },
1129 
1130 	{ CTC_STATE_RXINIT,	CTC_EVENT_STOP,		ctcm_chx_haltio  },
1131 	{ CTC_STATE_RXINIT,	CTC_EVENT_START,	ctcm_action_nop  },
1132 	{ CTC_STATE_RXINIT,	CTC_EVENT_FINSTAT,	chx_rxidle  },
1133 	{ CTC_STATE_RXINIT,	CTC_EVENT_UC_RCRESET,	ctcm_chx_rxiniterr  },
1134 	{ CTC_STATE_RXINIT,	CTC_EVENT_UC_RSRESET,	ctcm_chx_rxiniterr  },
1135 	{ CTC_STATE_RXINIT,	CTC_EVENT_TIMER,	ctcm_chx_rxiniterr  },
1136 	{ CTC_STATE_RXINIT,	CTC_EVENT_ATTNBUSY,	ctcm_chx_rxinitfail  },
1137 	{ CTC_STATE_RXINIT,	CTC_EVENT_IO_ENODEV,	ctcm_chx_iofatal  },
1138 	{ CTC_STATE_RXINIT,	CTC_EVENT_UC_ZERO,	chx_firstio  },
1139 	{ CTC_STATE_RXINIT,	CTC_EVENT_MC_FAIL,	ctcm_chx_fail  },
1140 
1141 	{ CTC_STATE_RXIDLE,	CTC_EVENT_STOP,		ctcm_chx_haltio  },
1142 	{ CTC_STATE_RXIDLE,	CTC_EVENT_START,	ctcm_action_nop  },
1143 	{ CTC_STATE_RXIDLE,	CTC_EVENT_FINSTAT,	chx_rx  },
1144 	{ CTC_STATE_RXIDLE,	CTC_EVENT_UC_RCRESET,	ctcm_chx_rxdisc  },
1145 	{ CTC_STATE_RXIDLE,	CTC_EVENT_IO_ENODEV,	ctcm_chx_iofatal  },
1146 	{ CTC_STATE_RXIDLE,	CTC_EVENT_MC_FAIL,	ctcm_chx_fail  },
1147 	{ CTC_STATE_RXIDLE,	CTC_EVENT_UC_ZERO,	chx_rx  },
1148 
1149 	{ CTC_STATE_TXINIT,	CTC_EVENT_STOP,		ctcm_chx_haltio  },
1150 	{ CTC_STATE_TXINIT,	CTC_EVENT_START,	ctcm_action_nop  },
1151 	{ CTC_STATE_TXINIT,	CTC_EVENT_FINSTAT,	ctcm_chx_txidle  },
1152 	{ CTC_STATE_TXINIT,	CTC_EVENT_UC_RCRESET,	ctcm_chx_txiniterr  },
1153 	{ CTC_STATE_TXINIT,	CTC_EVENT_UC_RSRESET,	ctcm_chx_txiniterr  },
1154 	{ CTC_STATE_TXINIT,	CTC_EVENT_TIMER,	ctcm_chx_txiniterr  },
1155 	{ CTC_STATE_TXINIT,	CTC_EVENT_IO_ENODEV,	ctcm_chx_iofatal  },
1156 	{ CTC_STATE_TXINIT,	CTC_EVENT_MC_FAIL,	ctcm_chx_fail  },
1157 
1158 	{ CTC_STATE_TXIDLE,	CTC_EVENT_STOP,		ctcm_chx_haltio  },
1159 	{ CTC_STATE_TXIDLE,	CTC_EVENT_START,	ctcm_action_nop  },
1160 	{ CTC_STATE_TXIDLE,	CTC_EVENT_FINSTAT,	chx_firstio  },
1161 	{ CTC_STATE_TXIDLE,	CTC_EVENT_UC_RCRESET,	ctcm_action_nop  },
1162 	{ CTC_STATE_TXIDLE,	CTC_EVENT_UC_RSRESET,	ctcm_action_nop  },
1163 	{ CTC_STATE_TXIDLE,	CTC_EVENT_IO_ENODEV,	ctcm_chx_iofatal  },
1164 	{ CTC_STATE_TXIDLE,	CTC_EVENT_MC_FAIL,	ctcm_chx_fail  },
1165 
1166 	{ CTC_STATE_TERM,	CTC_EVENT_STOP,		ctcm_action_nop  },
1167 	{ CTC_STATE_TERM,	CTC_EVENT_START,	ctcm_chx_restart  },
1168 	{ CTC_STATE_TERM,	CTC_EVENT_FINSTAT,	ctcm_chx_stopped  },
1169 	{ CTC_STATE_TERM,	CTC_EVENT_UC_RCRESET,	ctcm_action_nop  },
1170 	{ CTC_STATE_TERM,	CTC_EVENT_UC_RSRESET,	ctcm_action_nop  },
1171 	{ CTC_STATE_TERM,	CTC_EVENT_MC_FAIL,	ctcm_chx_fail  },
1172 
1173 	{ CTC_STATE_DTERM,	CTC_EVENT_STOP,		ctcm_chx_haltio  },
1174 	{ CTC_STATE_DTERM,	CTC_EVENT_START,	ctcm_chx_restart  },
1175 	{ CTC_STATE_DTERM,	CTC_EVENT_FINSTAT,	ctcm_chx_setmode  },
1176 	{ CTC_STATE_DTERM,	CTC_EVENT_UC_RCRESET,	ctcm_action_nop  },
1177 	{ CTC_STATE_DTERM,	CTC_EVENT_UC_RSRESET,	ctcm_action_nop  },
1178 	{ CTC_STATE_DTERM,	CTC_EVENT_MC_FAIL,	ctcm_chx_fail  },
1179 
1180 	{ CTC_STATE_TX,		CTC_EVENT_STOP,		ctcm_chx_haltio  },
1181 	{ CTC_STATE_TX,		CTC_EVENT_START,	ctcm_action_nop  },
1182 	{ CTC_STATE_TX,		CTC_EVENT_FINSTAT,	chx_txdone  },
1183 	{ CTC_STATE_TX,		CTC_EVENT_UC_RCRESET,	ctcm_chx_txretry  },
1184 	{ CTC_STATE_TX,		CTC_EVENT_UC_RSRESET,	ctcm_chx_txretry  },
1185 	{ CTC_STATE_TX,		CTC_EVENT_TIMER,	ctcm_chx_txretry  },
1186 	{ CTC_STATE_TX,		CTC_EVENT_IO_ENODEV,	ctcm_chx_iofatal  },
1187 	{ CTC_STATE_TX,		CTC_EVENT_MC_FAIL,	ctcm_chx_fail  },
1188 
1189 	{ CTC_STATE_RXERR,	CTC_EVENT_STOP,		ctcm_chx_haltio  },
1190 	{ CTC_STATE_TXERR,	CTC_EVENT_STOP,		ctcm_chx_haltio  },
1191 	{ CTC_STATE_TXERR,	CTC_EVENT_MC_FAIL,	ctcm_chx_fail  },
1192 	{ CTC_STATE_RXERR,	CTC_EVENT_MC_FAIL,	ctcm_chx_fail  },
1193 };
1194 
1195 int ch_fsm_len = ARRAY_SIZE(ch_fsm);
1196 
1197 /*
1198  * MPC actions for mpc channel statemachine
1199  * handling of MPC protocol requires extra
1200  * statemachine and actions which are prefixed ctcmpc_ .
1201  * The ctc_ch_states and ctc_ch_state_names,
1202  * ctc_ch_events and ctc_ch_event_names share the ctcm definitions
1203  * which are expanded by some elements.
1204  */
1205 
1206 /*
1207  * Actions for mpc channel statemachine.
1208  */
1209 
1210 /**
1211  * Normal data has been send. Free the corresponding
1212  * skb (it's in io_queue), reset dev->tbusy and
1213  * revert to idle state.
1214  *
1215  * fi		An instance of a channel statemachine.
1216  * event	The event, just happened.
1217  * arg		Generic pointer, casted from channel * upon call.
1218  */
1219 static void ctcmpc_chx_txdone(fsm_instance *fi, int event, void *arg)
1220 {
1221 	struct channel		*ch = arg;
1222 	struct net_device	*dev = ch->netdev;
1223 	struct ctcm_priv	*priv = dev->ml_priv;
1224 	struct mpc_group	*grp = priv->mpcg;
1225 	struct sk_buff		*skb;
1226 	int		first = 1;
1227 	int		i;
1228 	__u32		data_space;
1229 	unsigned long	duration;
1230 	struct sk_buff	*peekskb;
1231 	int		rc;
1232 	struct th_header *header;
1233 	struct pdu	*p_header;
1234 	struct timespec done_stamp = current_kernel_time(); /* xtime */
1235 
1236 	CTCM_PR_DEBUG("Enter %s: %s cp:%i\n",
1237 			__func__, dev->name, smp_processor_id());
1238 
1239 	duration =
1240 		(done_stamp.tv_sec - ch->prof.send_stamp.tv_sec) * 1000000 +
1241 		(done_stamp.tv_nsec - ch->prof.send_stamp.tv_nsec) / 1000;
1242 	if (duration > ch->prof.tx_time)
1243 		ch->prof.tx_time = duration;
1244 
1245 	if (ch->irb->scsw.cmd.count != 0)
1246 		CTCM_DBF_TEXT_(MPC_TRACE, CTC_DBF_DEBUG,
1247 			"%s(%s): TX not complete, remaining %d bytes",
1248 			     CTCM_FUNTAIL, dev->name, ch->irb->scsw.cmd.count);
1249 	fsm_deltimer(&ch->timer);
1250 	while ((skb = skb_dequeue(&ch->io_queue))) {
1251 		priv->stats.tx_packets++;
1252 		priv->stats.tx_bytes += skb->len - TH_HEADER_LENGTH;
1253 		if (first) {
1254 			priv->stats.tx_bytes += 2;
1255 			first = 0;
1256 		}
1257 		atomic_dec(&skb->users);
1258 		dev_kfree_skb_irq(skb);
1259 	}
1260 	spin_lock(&ch->collect_lock);
1261 	clear_normalized_cda(&ch->ccw[4]);
1262 	if ((ch->collect_len <= 0) || (grp->in_sweep != 0)) {
1263 		spin_unlock(&ch->collect_lock);
1264 		fsm_newstate(fi, CTC_STATE_TXIDLE);
1265 				goto done;
1266 	}
1267 
1268 	if (ctcm_checkalloc_buffer(ch)) {
1269 		spin_unlock(&ch->collect_lock);
1270 				goto done;
1271 	}
1272 	ch->trans_skb->data = ch->trans_skb_data;
1273 	skb_reset_tail_pointer(ch->trans_skb);
1274 	ch->trans_skb->len = 0;
1275 	if (ch->prof.maxmulti < (ch->collect_len + TH_HEADER_LENGTH))
1276 		ch->prof.maxmulti = ch->collect_len + TH_HEADER_LENGTH;
1277 	if (ch->prof.maxcqueue < skb_queue_len(&ch->collect_queue))
1278 		ch->prof.maxcqueue = skb_queue_len(&ch->collect_queue);
1279 	i = 0;
1280 	p_header = NULL;
1281 	data_space = grp->group_max_buflen - TH_HEADER_LENGTH;
1282 
1283 	CTCM_PR_DBGDATA("%s: building trans_skb from collect_q"
1284 		       " data_space:%04x\n",
1285 		       __func__, data_space);
1286 
1287 	while ((skb = skb_dequeue(&ch->collect_queue))) {
1288 		memcpy(skb_put(ch->trans_skb, skb->len), skb->data, skb->len);
1289 		p_header = (struct pdu *)
1290 			(skb_tail_pointer(ch->trans_skb) - skb->len);
1291 		p_header->pdu_flag = 0x00;
1292 		if (skb->protocol == ntohs(ETH_P_SNAP))
1293 			p_header->pdu_flag |= 0x60;
1294 		else
1295 			p_header->pdu_flag |= 0x20;
1296 
1297 		CTCM_PR_DBGDATA("%s: trans_skb len:%04x \n",
1298 				__func__, ch->trans_skb->len);
1299 		CTCM_PR_DBGDATA("%s: pdu header and data for up"
1300 				" to 32 bytes sent to vtam\n", __func__);
1301 		CTCM_D3_DUMP((char *)p_header, min_t(int, skb->len, 32));
1302 
1303 		ch->collect_len -= skb->len;
1304 		data_space -= skb->len;
1305 		priv->stats.tx_packets++;
1306 		priv->stats.tx_bytes += skb->len;
1307 		atomic_dec(&skb->users);
1308 		dev_kfree_skb_any(skb);
1309 		peekskb = skb_peek(&ch->collect_queue);
1310 		if (peekskb->len > data_space)
1311 			break;
1312 		i++;
1313 	}
1314 	/* p_header points to the last one we handled */
1315 	if (p_header)
1316 		p_header->pdu_flag |= PDU_LAST;	/*Say it's the last one*/
1317 	header = kzalloc(TH_HEADER_LENGTH, gfp_type());
1318 	if (!header) {
1319 		spin_unlock(&ch->collect_lock);
1320 		fsm_event(priv->mpcg->fsm, MPCG_EVENT_INOP, dev);
1321 				goto done;
1322 	}
1323 	header->th_ch_flag = TH_HAS_PDU;  /* Normal data */
1324 	ch->th_seq_num++;
1325 	header->th_seq_num = ch->th_seq_num;
1326 
1327 	CTCM_PR_DBGDATA("%s: ToVTAM_th_seq= %08x\n" ,
1328 					__func__, ch->th_seq_num);
1329 
1330 	memcpy(skb_push(ch->trans_skb, TH_HEADER_LENGTH), header,
1331 		TH_HEADER_LENGTH);	/* put the TH on the packet */
1332 
1333 	kfree(header);
1334 
1335 	CTCM_PR_DBGDATA("%s: trans_skb len:%04x \n",
1336 		       __func__, ch->trans_skb->len);
1337 	CTCM_PR_DBGDATA("%s: up-to-50 bytes of trans_skb "
1338 			"data to vtam from collect_q\n", __func__);
1339 	CTCM_D3_DUMP((char *)ch->trans_skb->data,
1340 				min_t(int, ch->trans_skb->len, 50));
1341 
1342 	spin_unlock(&ch->collect_lock);
1343 	clear_normalized_cda(&ch->ccw[1]);
1344 	if (set_normalized_cda(&ch->ccw[1], ch->trans_skb->data)) {
1345 		dev_kfree_skb_any(ch->trans_skb);
1346 		ch->trans_skb = NULL;
1347 		CTCM_DBF_TEXT_(MPC_TRACE, CTC_DBF_ERROR,
1348 			"%s: %s: IDAL alloc failed",
1349 				CTCM_FUNTAIL, ch->id);
1350 		fsm_event(priv->mpcg->fsm, MPCG_EVENT_INOP, dev);
1351 		return;
1352 	}
1353 	ch->ccw[1].count = ch->trans_skb->len;
1354 	fsm_addtimer(&ch->timer, CTCM_TIME_5_SEC, CTC_EVENT_TIMER, ch);
1355 	ch->prof.send_stamp = current_kernel_time(); /* xtime */
1356 	if (do_debug_ccw)
1357 		ctcmpc_dumpit((char *)&ch->ccw[0], sizeof(struct ccw1) * 3);
1358 	rc = ccw_device_start(ch->cdev, &ch->ccw[0],
1359 					(unsigned long)ch, 0xff, 0);
1360 	ch->prof.doios_multi++;
1361 	if (rc != 0) {
1362 		priv->stats.tx_dropped += i;
1363 		priv->stats.tx_errors += i;
1364 		fsm_deltimer(&ch->timer);
1365 		ctcm_ccw_check_rc(ch, rc, "chained TX");
1366 	}
1367 done:
1368 	ctcm_clear_busy(dev);
1369 	return;
1370 }
1371 
1372 /**
1373  * Got normal data, check for sanity, queue it up, allocate new buffer
1374  * trigger bottom half, and initiate next read.
1375  *
1376  * fi		An instance of a channel statemachine.
1377  * event	The event, just happened.
1378  * arg		Generic pointer, casted from channel * upon call.
1379  */
1380 static void ctcmpc_chx_rx(fsm_instance *fi, int event, void *arg)
1381 {
1382 	struct channel		*ch = arg;
1383 	struct net_device	*dev = ch->netdev;
1384 	struct ctcm_priv	*priv = dev->ml_priv;
1385 	struct mpc_group	*grp = priv->mpcg;
1386 	struct sk_buff		*skb = ch->trans_skb;
1387 	struct sk_buff		*new_skb;
1388 	unsigned long		saveflags = 0;	/* avoids compiler warning */
1389 	int len	= ch->max_bufsize - ch->irb->scsw.cmd.count;
1390 
1391 	CTCM_PR_DEBUG("%s: %s: cp:%i %s maxbuf : %04x, len: %04x\n",
1392 			CTCM_FUNTAIL, dev->name, smp_processor_id(),
1393 				ch->id, ch->max_bufsize, len);
1394 	fsm_deltimer(&ch->timer);
1395 
1396 	if (skb == NULL) {
1397 		CTCM_DBF_TEXT_(MPC_ERROR, CTC_DBF_ERROR,
1398 			"%s(%s): TRANS_SKB = NULL",
1399 				CTCM_FUNTAIL, dev->name);
1400 			goto again;
1401 	}
1402 
1403 	if (len < TH_HEADER_LENGTH) {
1404 		CTCM_DBF_TEXT_(MPC_ERROR, CTC_DBF_ERROR,
1405 				"%s(%s): packet length %d to short",
1406 					CTCM_FUNTAIL, dev->name, len);
1407 		priv->stats.rx_dropped++;
1408 		priv->stats.rx_length_errors++;
1409 	} else {
1410 		/* must have valid th header or game over */
1411 		__u32	block_len = len;
1412 		len = TH_HEADER_LENGTH + XID2_LENGTH + 4;
1413 		new_skb = __dev_alloc_skb(ch->max_bufsize, GFP_ATOMIC);
1414 
1415 		if (new_skb == NULL) {
1416 			CTCM_DBF_TEXT_(MPC_ERROR, CTC_DBF_ERROR,
1417 				"%s(%d): skb allocation failed",
1418 						CTCM_FUNTAIL, dev->name);
1419 			fsm_event(priv->mpcg->fsm, MPCG_EVENT_INOP, dev);
1420 					goto again;
1421 		}
1422 		switch (fsm_getstate(grp->fsm)) {
1423 		case MPCG_STATE_RESET:
1424 		case MPCG_STATE_INOP:
1425 			dev_kfree_skb_any(new_skb);
1426 			break;
1427 		case MPCG_STATE_FLOWC:
1428 		case MPCG_STATE_READY:
1429 			memcpy(skb_put(new_skb, block_len),
1430 					       skb->data, block_len);
1431 			skb_queue_tail(&ch->io_queue, new_skb);
1432 			tasklet_schedule(&ch->ch_tasklet);
1433 			break;
1434 		default:
1435 			memcpy(skb_put(new_skb, len), skb->data, len);
1436 			skb_queue_tail(&ch->io_queue, new_skb);
1437 			tasklet_hi_schedule(&ch->ch_tasklet);
1438 			break;
1439 		}
1440 	}
1441 
1442 again:
1443 	switch (fsm_getstate(grp->fsm)) {
1444 	int rc, dolock;
1445 	case MPCG_STATE_FLOWC:
1446 	case MPCG_STATE_READY:
1447 		if (ctcm_checkalloc_buffer(ch))
1448 			break;
1449 		ch->trans_skb->data = ch->trans_skb_data;
1450 		skb_reset_tail_pointer(ch->trans_skb);
1451 		ch->trans_skb->len = 0;
1452 		ch->ccw[1].count = ch->max_bufsize;
1453 			if (do_debug_ccw)
1454 			ctcmpc_dumpit((char *)&ch->ccw[0],
1455 					sizeof(struct ccw1) * 3);
1456 		dolock = !in_irq();
1457 		if (dolock)
1458 			spin_lock_irqsave(
1459 				get_ccwdev_lock(ch->cdev), saveflags);
1460 		rc = ccw_device_start(ch->cdev, &ch->ccw[0],
1461 						(unsigned long)ch, 0xff, 0);
1462 		if (dolock) /* see remark about conditional locking */
1463 			spin_unlock_irqrestore(
1464 				get_ccwdev_lock(ch->cdev), saveflags);
1465 		if (rc != 0)
1466 			ctcm_ccw_check_rc(ch, rc, "normal RX");
1467 	default:
1468 		break;
1469 	}
1470 
1471 	CTCM_PR_DEBUG("Exit %s: %s, ch=0x%p, id=%s\n",
1472 			__func__, dev->name, ch, ch->id);
1473 
1474 }
1475 
1476 /**
1477  * Initialize connection by sending a __u16 of value 0.
1478  *
1479  * fi		An instance of a channel statemachine.
1480  * event	The event, just happened.
1481  * arg		Generic pointer, casted from channel * upon call.
1482  */
1483 static void ctcmpc_chx_firstio(fsm_instance *fi, int event, void *arg)
1484 {
1485 	struct channel		*ch = arg;
1486 	struct net_device	*dev = ch->netdev;
1487 	struct ctcm_priv	*priv = dev->ml_priv;
1488 	struct mpc_group	*gptr = priv->mpcg;
1489 
1490 	CTCM_PR_DEBUG("Enter %s: id=%s, ch=0x%p\n",
1491 				__func__, ch->id, ch);
1492 
1493 	CTCM_DBF_TEXT_(MPC_TRACE, CTC_DBF_INFO,
1494 			"%s: %s: chstate:%i, grpstate:%i, prot:%i\n",
1495 			CTCM_FUNTAIL, ch->id, fsm_getstate(fi),
1496 			fsm_getstate(gptr->fsm), ch->protocol);
1497 
1498 	if (fsm_getstate(fi) == CTC_STATE_TXIDLE)
1499 		MPC_DBF_DEV_NAME(TRACE, dev, "remote side issued READ? ");
1500 
1501 	fsm_deltimer(&ch->timer);
1502 	if (ctcm_checkalloc_buffer(ch))
1503 				goto done;
1504 
1505 	switch (fsm_getstate(fi)) {
1506 	case CTC_STATE_STARTRETRY:
1507 	case CTC_STATE_SETUPWAIT:
1508 		if (CHANNEL_DIRECTION(ch->flags) == CTCM_READ) {
1509 			ctcmpc_chx_rxidle(fi, event, arg);
1510 		} else {
1511 			fsm_newstate(fi, CTC_STATE_TXIDLE);
1512 			fsm_event(priv->fsm, DEV_EVENT_TXUP, dev);
1513 		}
1514 				goto done;
1515 	default:
1516 		break;
1517 	};
1518 
1519 	fsm_newstate(fi, (CHANNEL_DIRECTION(ch->flags) == CTCM_READ)
1520 		     ? CTC_STATE_RXINIT : CTC_STATE_TXINIT);
1521 
1522 done:
1523 	CTCM_PR_DEBUG("Exit %s: id=%s, ch=0x%p\n",
1524 				__func__, ch->id, ch);
1525 	return;
1526 }
1527 
1528 /**
1529  * Got initial data, check it. If OK,
1530  * notify device statemachine that we are up and
1531  * running.
1532  *
1533  * fi		An instance of a channel statemachine.
1534  * event	The event, just happened.
1535  * arg		Generic pointer, casted from channel * upon call.
1536  */
1537 void ctcmpc_chx_rxidle(fsm_instance *fi, int event, void *arg)
1538 {
1539 	struct channel *ch = arg;
1540 	struct net_device *dev = ch->netdev;
1541 	struct ctcm_priv  *priv = dev->ml_priv;
1542 	struct mpc_group  *grp = priv->mpcg;
1543 	int rc;
1544 	unsigned long saveflags = 0;	/* avoids compiler warning */
1545 
1546 	fsm_deltimer(&ch->timer);
1547 	CTCM_PR_DEBUG("%s: %s: %s: cp:%i, chstate:%i grpstate:%i\n",
1548 			__func__, ch->id, dev->name, smp_processor_id(),
1549 				fsm_getstate(fi), fsm_getstate(grp->fsm));
1550 
1551 	fsm_newstate(fi, CTC_STATE_RXIDLE);
1552 	/* XID processing complete */
1553 
1554 	switch (fsm_getstate(grp->fsm)) {
1555 	case MPCG_STATE_FLOWC:
1556 	case MPCG_STATE_READY:
1557 		if (ctcm_checkalloc_buffer(ch))
1558 				goto done;
1559 		ch->trans_skb->data = ch->trans_skb_data;
1560 		skb_reset_tail_pointer(ch->trans_skb);
1561 		ch->trans_skb->len = 0;
1562 		ch->ccw[1].count = ch->max_bufsize;
1563 		CTCM_CCW_DUMP((char *)&ch->ccw[0], sizeof(struct ccw1) * 3);
1564 		if (event == CTC_EVENT_START)
1565 			/* see remark about conditional locking */
1566 			spin_lock_irqsave(get_ccwdev_lock(ch->cdev), saveflags);
1567 		rc = ccw_device_start(ch->cdev, &ch->ccw[0],
1568 						(unsigned long)ch, 0xff, 0);
1569 		if (event == CTC_EVENT_START)
1570 			spin_unlock_irqrestore(
1571 					get_ccwdev_lock(ch->cdev), saveflags);
1572 		if (rc != 0) {
1573 			fsm_newstate(fi, CTC_STATE_RXINIT);
1574 			ctcm_ccw_check_rc(ch, rc, "initial RX");
1575 				goto done;
1576 		}
1577 		break;
1578 	default:
1579 		break;
1580 	}
1581 
1582 	fsm_event(priv->fsm, DEV_EVENT_RXUP, dev);
1583 done:
1584 	return;
1585 }
1586 
1587 /*
1588  * ctcmpc channel FSM action
1589  * called from several points in ctcmpc_ch_fsm
1590  * ctcmpc only
1591  */
1592 static void ctcmpc_chx_attn(fsm_instance *fsm, int event, void *arg)
1593 {
1594 	struct channel	  *ch     = arg;
1595 	struct net_device *dev    = ch->netdev;
1596 	struct ctcm_priv  *priv   = dev->ml_priv;
1597 	struct mpc_group  *grp = priv->mpcg;
1598 
1599 	CTCM_PR_DEBUG("%s(%s): %s(ch=0x%p), cp=%i, ChStat:%s, GrpStat:%s\n",
1600 		__func__, dev->name, ch->id, ch, smp_processor_id(),
1601 			fsm_getstate_str(ch->fsm), fsm_getstate_str(grp->fsm));
1602 
1603 	switch (fsm_getstate(grp->fsm)) {
1604 	case MPCG_STATE_XID2INITW:
1605 		/* ok..start yside xid exchanges */
1606 		if (!ch->in_mpcgroup)
1607 			break;
1608 		if (fsm_getstate(ch->fsm) ==  CH_XID0_PENDING) {
1609 			fsm_deltimer(&grp->timer);
1610 			fsm_addtimer(&grp->timer,
1611 				MPC_XID_TIMEOUT_VALUE,
1612 				MPCG_EVENT_TIMER, dev);
1613 			fsm_event(grp->fsm, MPCG_EVENT_XID0DO, ch);
1614 
1615 		} else if (fsm_getstate(ch->fsm) < CH_XID7_PENDING1)
1616 			/* attn rcvd before xid0 processed via bh */
1617 			fsm_newstate(ch->fsm, CH_XID7_PENDING1);
1618 		break;
1619 	case MPCG_STATE_XID2INITX:
1620 	case MPCG_STATE_XID0IOWAIT:
1621 	case MPCG_STATE_XID0IOWAIX:
1622 		/* attn rcvd before xid0 processed on ch
1623 		but mid-xid0 processing for group    */
1624 		if (fsm_getstate(ch->fsm) < CH_XID7_PENDING1)
1625 			fsm_newstate(ch->fsm, CH_XID7_PENDING1);
1626 		break;
1627 	case MPCG_STATE_XID7INITW:
1628 	case MPCG_STATE_XID7INITX:
1629 	case MPCG_STATE_XID7INITI:
1630 	case MPCG_STATE_XID7INITZ:
1631 		switch (fsm_getstate(ch->fsm)) {
1632 		case CH_XID7_PENDING:
1633 			fsm_newstate(ch->fsm, CH_XID7_PENDING1);
1634 			break;
1635 		case CH_XID7_PENDING2:
1636 			fsm_newstate(ch->fsm, CH_XID7_PENDING3);
1637 			break;
1638 		}
1639 		fsm_event(grp->fsm, MPCG_EVENT_XID7DONE, dev);
1640 		break;
1641 	}
1642 
1643 	return;
1644 }
1645 
1646 /*
1647  * ctcmpc channel FSM action
1648  * called from one point in ctcmpc_ch_fsm
1649  * ctcmpc only
1650  */
1651 static void ctcmpc_chx_attnbusy(fsm_instance *fsm, int event, void *arg)
1652 {
1653 	struct channel	  *ch     = arg;
1654 	struct net_device *dev    = ch->netdev;
1655 	struct ctcm_priv  *priv   = dev->ml_priv;
1656 	struct mpc_group  *grp    = priv->mpcg;
1657 
1658 	CTCM_PR_DEBUG("%s(%s): %s\n  ChState:%s GrpState:%s\n",
1659 			__func__, dev->name, ch->id,
1660 			fsm_getstate_str(ch->fsm), fsm_getstate_str(grp->fsm));
1661 
1662 	fsm_deltimer(&ch->timer);
1663 
1664 	switch (fsm_getstate(grp->fsm)) {
1665 	case MPCG_STATE_XID0IOWAIT:
1666 		/* vtam wants to be primary.start yside xid exchanges*/
1667 		/* only receive one attn-busy at a time so must not  */
1668 		/* change state each time			     */
1669 		grp->changed_side = 1;
1670 		fsm_newstate(grp->fsm, MPCG_STATE_XID2INITW);
1671 		break;
1672 	case MPCG_STATE_XID2INITW:
1673 		if (grp->changed_side == 1) {
1674 			grp->changed_side = 2;
1675 			break;
1676 		}
1677 		/* process began via call to establish_conn	 */
1678 		/* so must report failure instead of reverting	 */
1679 		/* back to ready-for-xid passive state		 */
1680 		if (grp->estconnfunc)
1681 				goto done;
1682 		/* this attnbusy is NOT the result of xside xid  */
1683 		/* collisions so yside must have been triggered  */
1684 		/* by an ATTN that was not intended to start XID */
1685 		/* processing. Revert back to ready-for-xid and  */
1686 		/* wait for ATTN interrupt to signal xid start	 */
1687 		if (fsm_getstate(ch->fsm) == CH_XID0_INPROGRESS) {
1688 			fsm_newstate(ch->fsm, CH_XID0_PENDING) ;
1689 			fsm_deltimer(&grp->timer);
1690 				goto done;
1691 		}
1692 		fsm_event(grp->fsm, MPCG_EVENT_INOP, dev);
1693 				goto done;
1694 	case MPCG_STATE_XID2INITX:
1695 		/* XID2 was received before ATTN Busy for second
1696 		   channel.Send yside xid for second channel.
1697 		*/
1698 		if (grp->changed_side == 1) {
1699 			grp->changed_side = 2;
1700 			break;
1701 		}
1702 	case MPCG_STATE_XID0IOWAIX:
1703 	case MPCG_STATE_XID7INITW:
1704 	case MPCG_STATE_XID7INITX:
1705 	case MPCG_STATE_XID7INITI:
1706 	case MPCG_STATE_XID7INITZ:
1707 	default:
1708 		/* multiple attn-busy indicates too out-of-sync      */
1709 		/* and they are certainly not being received as part */
1710 		/* of valid mpc group negotiations..		     */
1711 		fsm_event(grp->fsm, MPCG_EVENT_INOP, dev);
1712 				goto done;
1713 	}
1714 
1715 	if (grp->changed_side == 1) {
1716 		fsm_deltimer(&grp->timer);
1717 		fsm_addtimer(&grp->timer, MPC_XID_TIMEOUT_VALUE,
1718 			     MPCG_EVENT_TIMER, dev);
1719 	}
1720 	if (ch->in_mpcgroup)
1721 		fsm_event(grp->fsm, MPCG_EVENT_XID0DO, ch);
1722 	else
1723 		CTCM_DBF_TEXT_(MPC_ERROR, CTC_DBF_ERROR,
1724 			"%s(%s): channel %s not added to group",
1725 				CTCM_FUNTAIL, dev->name, ch->id);
1726 
1727 done:
1728 	return;
1729 }
1730 
1731 /*
1732  * ctcmpc channel FSM action
1733  * called from several points in ctcmpc_ch_fsm
1734  * ctcmpc only
1735  */
1736 static void ctcmpc_chx_resend(fsm_instance *fsm, int event, void *arg)
1737 {
1738 	struct channel	   *ch	   = arg;
1739 	struct net_device  *dev    = ch->netdev;
1740 	struct ctcm_priv   *priv   = dev->ml_priv;
1741 	struct mpc_group   *grp    = priv->mpcg;
1742 
1743 	fsm_event(grp->fsm, MPCG_EVENT_XID0DO, ch);
1744 	return;
1745 }
1746 
1747 /*
1748  * ctcmpc channel FSM action
1749  * called from several points in ctcmpc_ch_fsm
1750  * ctcmpc only
1751  */
1752 static void ctcmpc_chx_send_sweep(fsm_instance *fsm, int event, void *arg)
1753 {
1754 	struct channel *ach = arg;
1755 	struct net_device *dev = ach->netdev;
1756 	struct ctcm_priv *priv = dev->ml_priv;
1757 	struct mpc_group *grp = priv->mpcg;
1758 	struct channel *wch = priv->channel[CTCM_WRITE];
1759 	struct channel *rch = priv->channel[CTCM_READ];
1760 	struct sk_buff *skb;
1761 	struct th_sweep *header;
1762 	int rc = 0;
1763 	unsigned long saveflags = 0;
1764 
1765 	CTCM_PR_DEBUG("ctcmpc enter: %s(): cp=%i ch=0x%p id=%s\n",
1766 			__func__, smp_processor_id(), ach, ach->id);
1767 
1768 	if (grp->in_sweep == 0)
1769 				goto done;
1770 
1771 	CTCM_PR_DBGDATA("%s: 1: ToVTAM_th_seq= %08x\n" ,
1772 				__func__, wch->th_seq_num);
1773 	CTCM_PR_DBGDATA("%s: 1: FromVTAM_th_seq= %08x\n" ,
1774 				__func__, rch->th_seq_num);
1775 
1776 	if (fsm_getstate(wch->fsm) != CTC_STATE_TXIDLE) {
1777 		/* give the previous IO time to complete */
1778 		fsm_addtimer(&wch->sweep_timer,
1779 			200, CTC_EVENT_RSWEEP_TIMER, wch);
1780 				goto done;
1781 	}
1782 
1783 	skb = skb_dequeue(&wch->sweep_queue);
1784 	if (!skb)
1785 				goto done;
1786 
1787 	if (set_normalized_cda(&wch->ccw[4], skb->data)) {
1788 		grp->in_sweep = 0;
1789 		ctcm_clear_busy_do(dev);
1790 		dev_kfree_skb_any(skb);
1791 		fsm_event(grp->fsm, MPCG_EVENT_INOP, dev);
1792 				goto done;
1793 	} else {
1794 		atomic_inc(&skb->users);
1795 		skb_queue_tail(&wch->io_queue, skb);
1796 	}
1797 
1798 	/* send out the sweep */
1799 	wch->ccw[4].count = skb->len;
1800 
1801 	header = (struct th_sweep *)skb->data;
1802 	switch (header->th.th_ch_flag) {
1803 	case TH_SWEEP_REQ:
1804 		grp->sweep_req_pend_num--;
1805 		break;
1806 	case TH_SWEEP_RESP:
1807 		grp->sweep_rsp_pend_num--;
1808 		break;
1809 	}
1810 
1811 	header->sw.th_last_seq = wch->th_seq_num;
1812 
1813 	CTCM_CCW_DUMP((char *)&wch->ccw[3], sizeof(struct ccw1) * 3);
1814 	CTCM_PR_DBGDATA("%s: sweep packet\n", __func__);
1815 	CTCM_D3_DUMP((char *)header, TH_SWEEP_LENGTH);
1816 
1817 	fsm_addtimer(&wch->timer, CTCM_TIME_5_SEC, CTC_EVENT_TIMER, wch);
1818 	fsm_newstate(wch->fsm, CTC_STATE_TX);
1819 
1820 	spin_lock_irqsave(get_ccwdev_lock(wch->cdev), saveflags);
1821 	wch->prof.send_stamp = current_kernel_time(); /* xtime */
1822 	rc = ccw_device_start(wch->cdev, &wch->ccw[3],
1823 					(unsigned long) wch, 0xff, 0);
1824 	spin_unlock_irqrestore(get_ccwdev_lock(wch->cdev), saveflags);
1825 
1826 	if ((grp->sweep_req_pend_num == 0) &&
1827 	   (grp->sweep_rsp_pend_num == 0)) {
1828 		grp->in_sweep = 0;
1829 		rch->th_seq_num = 0x00;
1830 		wch->th_seq_num = 0x00;
1831 		ctcm_clear_busy_do(dev);
1832 	}
1833 
1834 	CTCM_PR_DBGDATA("%s: To-/From-VTAM_th_seq = %08x/%08x\n" ,
1835 			__func__, wch->th_seq_num, rch->th_seq_num);
1836 
1837 	if (rc != 0)
1838 		ctcm_ccw_check_rc(wch, rc, "send sweep");
1839 
1840 done:
1841 	return;
1842 }
1843 
1844 
1845 /*
1846  * The ctcmpc statemachine for a channel.
1847  */
1848 
1849 const fsm_node ctcmpc_ch_fsm[] = {
1850 	{ CTC_STATE_STOPPED,	CTC_EVENT_STOP,		ctcm_action_nop  },
1851 	{ CTC_STATE_STOPPED,	CTC_EVENT_START,	ctcm_chx_start  },
1852 	{ CTC_STATE_STOPPED,	CTC_EVENT_IO_ENODEV,	ctcm_chx_iofatal  },
1853 	{ CTC_STATE_STOPPED,	CTC_EVENT_FINSTAT,	ctcm_action_nop  },
1854 	{ CTC_STATE_STOPPED,	CTC_EVENT_MC_FAIL,	ctcm_action_nop  },
1855 
1856 	{ CTC_STATE_NOTOP,	CTC_EVENT_STOP,		ctcm_chx_stop  },
1857 	{ CTC_STATE_NOTOP,	CTC_EVENT_START,	ctcm_action_nop  },
1858 	{ CTC_STATE_NOTOP,	CTC_EVENT_FINSTAT,	ctcm_action_nop  },
1859 	{ CTC_STATE_NOTOP,	CTC_EVENT_MC_FAIL,	ctcm_action_nop  },
1860 	{ CTC_STATE_NOTOP,	CTC_EVENT_MC_GOOD,	ctcm_chx_start  },
1861 	{ CTC_STATE_NOTOP,	CTC_EVENT_UC_RCRESET,	ctcm_chx_stop  },
1862 	{ CTC_STATE_NOTOP,	CTC_EVENT_UC_RSRESET,	ctcm_chx_stop  },
1863 	{ CTC_STATE_NOTOP,	CTC_EVENT_IO_ENODEV,	ctcm_chx_iofatal  },
1864 
1865 	{ CTC_STATE_STARTWAIT,	CTC_EVENT_STOP,		ctcm_chx_haltio  },
1866 	{ CTC_STATE_STARTWAIT,	CTC_EVENT_START,	ctcm_action_nop  },
1867 	{ CTC_STATE_STARTWAIT,	CTC_EVENT_FINSTAT,	ctcm_chx_setmode  },
1868 	{ CTC_STATE_STARTWAIT,	CTC_EVENT_TIMER,	ctcm_chx_setuperr  },
1869 	{ CTC_STATE_STARTWAIT,	CTC_EVENT_IO_ENODEV,	ctcm_chx_iofatal  },
1870 	{ CTC_STATE_STARTWAIT,	CTC_EVENT_MC_FAIL,	ctcm_chx_fail  },
1871 
1872 	{ CTC_STATE_STARTRETRY,	CTC_EVENT_STOP,		ctcm_chx_haltio  },
1873 	{ CTC_STATE_STARTRETRY,	CTC_EVENT_TIMER,	ctcm_chx_setmode  },
1874 	{ CTC_STATE_STARTRETRY,	CTC_EVENT_FINSTAT,	ctcm_chx_setmode  },
1875 	{ CTC_STATE_STARTRETRY,	CTC_EVENT_MC_FAIL,	ctcm_chx_fail  },
1876 	{ CTC_STATE_STARTRETRY,	CTC_EVENT_IO_ENODEV,	ctcm_chx_iofatal  },
1877 
1878 	{ CTC_STATE_SETUPWAIT,	CTC_EVENT_STOP,		ctcm_chx_haltio  },
1879 	{ CTC_STATE_SETUPWAIT,	CTC_EVENT_START,	ctcm_action_nop  },
1880 	{ CTC_STATE_SETUPWAIT,	CTC_EVENT_FINSTAT,	ctcmpc_chx_firstio  },
1881 	{ CTC_STATE_SETUPWAIT,	CTC_EVENT_UC_RCRESET,	ctcm_chx_setuperr  },
1882 	{ CTC_STATE_SETUPWAIT,	CTC_EVENT_UC_RSRESET,	ctcm_chx_setuperr  },
1883 	{ CTC_STATE_SETUPWAIT,	CTC_EVENT_TIMER,	ctcm_chx_setmode  },
1884 	{ CTC_STATE_SETUPWAIT,	CTC_EVENT_IO_ENODEV,	ctcm_chx_iofatal  },
1885 	{ CTC_STATE_SETUPWAIT,	CTC_EVENT_MC_FAIL,	ctcm_chx_fail  },
1886 
1887 	{ CTC_STATE_RXINIT,	CTC_EVENT_STOP,		ctcm_chx_haltio  },
1888 	{ CTC_STATE_RXINIT,	CTC_EVENT_START,	ctcm_action_nop  },
1889 	{ CTC_STATE_RXINIT,	CTC_EVENT_FINSTAT,	ctcmpc_chx_rxidle  },
1890 	{ CTC_STATE_RXINIT,	CTC_EVENT_UC_RCRESET,	ctcm_chx_rxiniterr  },
1891 	{ CTC_STATE_RXINIT,	CTC_EVENT_UC_RSRESET,	ctcm_chx_rxiniterr  },
1892 	{ CTC_STATE_RXINIT,	CTC_EVENT_TIMER,	ctcm_chx_rxiniterr  },
1893 	{ CTC_STATE_RXINIT,	CTC_EVENT_ATTNBUSY,	ctcm_chx_rxinitfail  },
1894 	{ CTC_STATE_RXINIT,	CTC_EVENT_IO_ENODEV,	ctcm_chx_iofatal  },
1895 	{ CTC_STATE_RXINIT,	CTC_EVENT_UC_ZERO,	ctcmpc_chx_firstio  },
1896 	{ CTC_STATE_RXINIT,	CTC_EVENT_MC_FAIL,	ctcm_chx_fail  },
1897 
1898 	{ CH_XID0_PENDING,	CTC_EVENT_FINSTAT,	ctcm_action_nop  },
1899 	{ CH_XID0_PENDING,	CTC_EVENT_ATTN,		ctcmpc_chx_attn  },
1900 	{ CH_XID0_PENDING,	CTC_EVENT_STOP,		ctcm_chx_haltio  },
1901 	{ CH_XID0_PENDING,	CTC_EVENT_START,	ctcm_action_nop  },
1902 	{ CH_XID0_PENDING,	CTC_EVENT_IO_ENODEV,	ctcm_chx_iofatal  },
1903 	{ CH_XID0_PENDING,	CTC_EVENT_MC_FAIL,	ctcm_chx_fail  },
1904 	{ CH_XID0_PENDING,	CTC_EVENT_UC_RCRESET,	ctcm_chx_setuperr  },
1905 	{ CH_XID0_PENDING,	CTC_EVENT_UC_RSRESET,	ctcm_chx_setuperr  },
1906 	{ CH_XID0_PENDING,	CTC_EVENT_UC_RSRESET,	ctcm_chx_setuperr  },
1907 	{ CH_XID0_PENDING,	CTC_EVENT_ATTNBUSY,	ctcm_chx_iofatal  },
1908 
1909 	{ CH_XID0_INPROGRESS,	CTC_EVENT_FINSTAT,	ctcmpc_chx_rx  },
1910 	{ CH_XID0_INPROGRESS,	CTC_EVENT_ATTN,		ctcmpc_chx_attn  },
1911 	{ CH_XID0_INPROGRESS,	CTC_EVENT_STOP,		ctcm_chx_haltio  },
1912 	{ CH_XID0_INPROGRESS,	CTC_EVENT_START,	ctcm_action_nop  },
1913 	{ CH_XID0_INPROGRESS,	CTC_EVENT_IO_ENODEV,	ctcm_chx_iofatal  },
1914 	{ CH_XID0_INPROGRESS,	CTC_EVENT_MC_FAIL,	ctcm_chx_fail  },
1915 	{ CH_XID0_INPROGRESS,	CTC_EVENT_UC_ZERO,	ctcmpc_chx_rx  },
1916 	{ CH_XID0_INPROGRESS,	CTC_EVENT_UC_RCRESET,	ctcm_chx_setuperr },
1917 	{ CH_XID0_INPROGRESS,	CTC_EVENT_ATTNBUSY,	ctcmpc_chx_attnbusy  },
1918 	{ CH_XID0_INPROGRESS,	CTC_EVENT_TIMER,	ctcmpc_chx_resend  },
1919 	{ CH_XID0_INPROGRESS,	CTC_EVENT_IO_EBUSY,	ctcm_chx_fail  },
1920 
1921 	{ CH_XID7_PENDING,	CTC_EVENT_FINSTAT,	ctcmpc_chx_rx  },
1922 	{ CH_XID7_PENDING,	CTC_EVENT_ATTN,		ctcmpc_chx_attn  },
1923 	{ CH_XID7_PENDING,	CTC_EVENT_STOP,		ctcm_chx_haltio  },
1924 	{ CH_XID7_PENDING,	CTC_EVENT_START,	ctcm_action_nop  },
1925 	{ CH_XID7_PENDING,	CTC_EVENT_IO_ENODEV,	ctcm_chx_iofatal  },
1926 	{ CH_XID7_PENDING,	CTC_EVENT_MC_FAIL,	ctcm_chx_fail  },
1927 	{ CH_XID7_PENDING,	CTC_EVENT_UC_ZERO,	ctcmpc_chx_rx  },
1928 	{ CH_XID7_PENDING,	CTC_EVENT_UC_RCRESET,	ctcm_chx_setuperr  },
1929 	{ CH_XID7_PENDING,	CTC_EVENT_UC_RSRESET,	ctcm_chx_setuperr  },
1930 	{ CH_XID7_PENDING,	CTC_EVENT_UC_RSRESET,	ctcm_chx_setuperr  },
1931 	{ CH_XID7_PENDING,	CTC_EVENT_ATTNBUSY,	ctcm_chx_iofatal  },
1932 	{ CH_XID7_PENDING,	CTC_EVENT_TIMER,	ctcmpc_chx_resend  },
1933 	{ CH_XID7_PENDING,	CTC_EVENT_IO_EBUSY,	ctcm_chx_fail  },
1934 
1935 	{ CH_XID7_PENDING1,	CTC_EVENT_FINSTAT,	ctcmpc_chx_rx  },
1936 	{ CH_XID7_PENDING1,	CTC_EVENT_ATTN,		ctcmpc_chx_attn  },
1937 	{ CH_XID7_PENDING1,	CTC_EVENT_STOP,		ctcm_chx_haltio  },
1938 	{ CH_XID7_PENDING1,	CTC_EVENT_START,	ctcm_action_nop  },
1939 	{ CH_XID7_PENDING1,	CTC_EVENT_IO_ENODEV,	ctcm_chx_iofatal  },
1940 	{ CH_XID7_PENDING1,	CTC_EVENT_MC_FAIL,	ctcm_chx_fail  },
1941 	{ CH_XID7_PENDING1,	CTC_EVENT_UC_ZERO,	ctcmpc_chx_rx  },
1942 	{ CH_XID7_PENDING1,	CTC_EVENT_UC_RCRESET,	ctcm_chx_setuperr  },
1943 	{ CH_XID7_PENDING1,	CTC_EVENT_UC_RSRESET,	ctcm_chx_setuperr  },
1944 	{ CH_XID7_PENDING1,	CTC_EVENT_ATTNBUSY,	ctcm_chx_iofatal  },
1945 	{ CH_XID7_PENDING1,	CTC_EVENT_TIMER,	ctcmpc_chx_resend  },
1946 	{ CH_XID7_PENDING1,	CTC_EVENT_IO_EBUSY,	ctcm_chx_fail  },
1947 
1948 	{ CH_XID7_PENDING2,	CTC_EVENT_FINSTAT,	ctcmpc_chx_rx  },
1949 	{ CH_XID7_PENDING2,	CTC_EVENT_ATTN,		ctcmpc_chx_attn  },
1950 	{ CH_XID7_PENDING2,	CTC_EVENT_STOP,		ctcm_chx_haltio  },
1951 	{ CH_XID7_PENDING2,	CTC_EVENT_START,	ctcm_action_nop  },
1952 	{ CH_XID7_PENDING2,	CTC_EVENT_IO_ENODEV,	ctcm_chx_iofatal  },
1953 	{ CH_XID7_PENDING2,	CTC_EVENT_MC_FAIL,	ctcm_chx_fail  },
1954 	{ CH_XID7_PENDING2,	CTC_EVENT_UC_ZERO,	ctcmpc_chx_rx  },
1955 	{ CH_XID7_PENDING2,	CTC_EVENT_UC_RCRESET,	ctcm_chx_setuperr  },
1956 	{ CH_XID7_PENDING2,	CTC_EVENT_UC_RSRESET,	ctcm_chx_setuperr  },
1957 	{ CH_XID7_PENDING2,	CTC_EVENT_ATTNBUSY,	ctcm_chx_iofatal  },
1958 	{ CH_XID7_PENDING2,	CTC_EVENT_TIMER,	ctcmpc_chx_resend  },
1959 	{ CH_XID7_PENDING2,	CTC_EVENT_IO_EBUSY,	ctcm_chx_fail  },
1960 
1961 	{ CH_XID7_PENDING3,	CTC_EVENT_FINSTAT,	ctcmpc_chx_rx  },
1962 	{ CH_XID7_PENDING3,	CTC_EVENT_ATTN,		ctcmpc_chx_attn  },
1963 	{ CH_XID7_PENDING3,	CTC_EVENT_STOP,		ctcm_chx_haltio  },
1964 	{ CH_XID7_PENDING3,	CTC_EVENT_START,	ctcm_action_nop  },
1965 	{ CH_XID7_PENDING3,	CTC_EVENT_IO_ENODEV,	ctcm_chx_iofatal  },
1966 	{ CH_XID7_PENDING3,	CTC_EVENT_MC_FAIL,	ctcm_chx_fail  },
1967 	{ CH_XID7_PENDING3,	CTC_EVENT_UC_ZERO,	ctcmpc_chx_rx  },
1968 	{ CH_XID7_PENDING3,	CTC_EVENT_UC_RCRESET,	ctcm_chx_setuperr  },
1969 	{ CH_XID7_PENDING3,	CTC_EVENT_UC_RSRESET,	ctcm_chx_setuperr  },
1970 	{ CH_XID7_PENDING3,	CTC_EVENT_ATTNBUSY,	ctcm_chx_iofatal  },
1971 	{ CH_XID7_PENDING3,	CTC_EVENT_TIMER,	ctcmpc_chx_resend  },
1972 	{ CH_XID7_PENDING3,	CTC_EVENT_IO_EBUSY,	ctcm_chx_fail  },
1973 
1974 	{ CH_XID7_PENDING4,	CTC_EVENT_FINSTAT,	ctcmpc_chx_rx  },
1975 	{ CH_XID7_PENDING4,	CTC_EVENT_ATTN,		ctcmpc_chx_attn  },
1976 	{ CH_XID7_PENDING4,	CTC_EVENT_STOP,		ctcm_chx_haltio  },
1977 	{ CH_XID7_PENDING4,	CTC_EVENT_START,	ctcm_action_nop  },
1978 	{ CH_XID7_PENDING4,	CTC_EVENT_IO_ENODEV,	ctcm_chx_iofatal  },
1979 	{ CH_XID7_PENDING4,	CTC_EVENT_MC_FAIL,	ctcm_chx_fail  },
1980 	{ CH_XID7_PENDING4,	CTC_EVENT_UC_ZERO,	ctcmpc_chx_rx  },
1981 	{ CH_XID7_PENDING4,	CTC_EVENT_UC_RCRESET,	ctcm_chx_setuperr  },
1982 	{ CH_XID7_PENDING4,	CTC_EVENT_UC_RSRESET,	ctcm_chx_setuperr  },
1983 	{ CH_XID7_PENDING4,	CTC_EVENT_ATTNBUSY,	ctcm_chx_iofatal  },
1984 	{ CH_XID7_PENDING4,	CTC_EVENT_TIMER,	ctcmpc_chx_resend  },
1985 	{ CH_XID7_PENDING4,	CTC_EVENT_IO_EBUSY,	ctcm_chx_fail  },
1986 
1987 	{ CTC_STATE_RXIDLE,	CTC_EVENT_STOP,		ctcm_chx_haltio  },
1988 	{ CTC_STATE_RXIDLE,	CTC_EVENT_START,	ctcm_action_nop  },
1989 	{ CTC_STATE_RXIDLE,	CTC_EVENT_FINSTAT,	ctcmpc_chx_rx  },
1990 	{ CTC_STATE_RXIDLE,	CTC_EVENT_UC_RCRESET,	ctcm_chx_rxdisc  },
1991 	{ CTC_STATE_RXIDLE,	CTC_EVENT_UC_RSRESET,	ctcm_chx_fail  },
1992 	{ CTC_STATE_RXIDLE,	CTC_EVENT_IO_ENODEV,	ctcm_chx_iofatal  },
1993 	{ CTC_STATE_RXIDLE,	CTC_EVENT_MC_FAIL,	ctcm_chx_fail  },
1994 	{ CTC_STATE_RXIDLE,	CTC_EVENT_UC_ZERO,	ctcmpc_chx_rx  },
1995 
1996 	{ CTC_STATE_TXINIT,	CTC_EVENT_STOP,		ctcm_chx_haltio  },
1997 	{ CTC_STATE_TXINIT,	CTC_EVENT_START,	ctcm_action_nop  },
1998 	{ CTC_STATE_TXINIT,	CTC_EVENT_FINSTAT,	ctcm_chx_txidle  },
1999 	{ CTC_STATE_TXINIT,	CTC_EVENT_UC_RCRESET,	ctcm_chx_txiniterr  },
2000 	{ CTC_STATE_TXINIT,	CTC_EVENT_UC_RSRESET,	ctcm_chx_txiniterr  },
2001 	{ CTC_STATE_TXINIT,	CTC_EVENT_TIMER,	ctcm_chx_txiniterr  },
2002 	{ CTC_STATE_TXINIT,	CTC_EVENT_IO_ENODEV,	ctcm_chx_iofatal  },
2003 	{ CTC_STATE_TXINIT,	CTC_EVENT_MC_FAIL,	ctcm_chx_fail  },
2004 	{ CTC_STATE_TXINIT,	CTC_EVENT_RSWEEP_TIMER,	ctcmpc_chx_send_sweep },
2005 
2006 	{ CTC_STATE_TXIDLE,	CTC_EVENT_STOP,		ctcm_chx_haltio  },
2007 	{ CTC_STATE_TXIDLE,	CTC_EVENT_START,	ctcm_action_nop  },
2008 	{ CTC_STATE_TXIDLE,	CTC_EVENT_FINSTAT,	ctcmpc_chx_firstio  },
2009 	{ CTC_STATE_TXIDLE,	CTC_EVENT_UC_RCRESET,	ctcm_chx_fail  },
2010 	{ CTC_STATE_TXIDLE,	CTC_EVENT_UC_RSRESET,	ctcm_chx_fail  },
2011 	{ CTC_STATE_TXIDLE,	CTC_EVENT_IO_ENODEV,	ctcm_chx_iofatal  },
2012 	{ CTC_STATE_TXIDLE,	CTC_EVENT_MC_FAIL,	ctcm_chx_fail  },
2013 	{ CTC_STATE_TXIDLE,	CTC_EVENT_RSWEEP_TIMER,	ctcmpc_chx_send_sweep },
2014 
2015 	{ CTC_STATE_TERM,	CTC_EVENT_STOP,		ctcm_action_nop  },
2016 	{ CTC_STATE_TERM,	CTC_EVENT_START,	ctcm_chx_restart  },
2017 	{ CTC_STATE_TERM,	CTC_EVENT_FINSTAT,	ctcm_chx_stopped  },
2018 	{ CTC_STATE_TERM,	CTC_EVENT_UC_RCRESET,	ctcm_action_nop  },
2019 	{ CTC_STATE_TERM,	CTC_EVENT_UC_RSRESET,	ctcm_action_nop  },
2020 	{ CTC_STATE_TERM,	CTC_EVENT_MC_FAIL,	ctcm_chx_fail  },
2021 	{ CTC_STATE_TERM,	CTC_EVENT_IO_EBUSY,	ctcm_chx_fail  },
2022 	{ CTC_STATE_TERM,	CTC_EVENT_IO_ENODEV,	ctcm_chx_iofatal  },
2023 
2024 	{ CTC_STATE_DTERM,	CTC_EVENT_STOP,		ctcm_chx_haltio  },
2025 	{ CTC_STATE_DTERM,	CTC_EVENT_START,	ctcm_chx_restart  },
2026 	{ CTC_STATE_DTERM,	CTC_EVENT_FINSTAT,	ctcm_chx_setmode  },
2027 	{ CTC_STATE_DTERM,	CTC_EVENT_UC_RCRESET,	ctcm_action_nop  },
2028 	{ CTC_STATE_DTERM,	CTC_EVENT_UC_RSRESET,	ctcm_action_nop  },
2029 	{ CTC_STATE_DTERM,	CTC_EVENT_MC_FAIL,	ctcm_chx_fail  },
2030 	{ CTC_STATE_DTERM,	CTC_EVENT_IO_ENODEV,	ctcm_chx_iofatal  },
2031 
2032 	{ CTC_STATE_TX,		CTC_EVENT_STOP,		ctcm_chx_haltio  },
2033 	{ CTC_STATE_TX,		CTC_EVENT_START,	ctcm_action_nop  },
2034 	{ CTC_STATE_TX,		CTC_EVENT_FINSTAT,	ctcmpc_chx_txdone  },
2035 	{ CTC_STATE_TX,		CTC_EVENT_UC_RCRESET,	ctcm_chx_fail  },
2036 	{ CTC_STATE_TX,		CTC_EVENT_UC_RSRESET,	ctcm_chx_fail  },
2037 	{ CTC_STATE_TX,		CTC_EVENT_TIMER,	ctcm_chx_txretry  },
2038 	{ CTC_STATE_TX,		CTC_EVENT_IO_ENODEV,	ctcm_chx_iofatal  },
2039 	{ CTC_STATE_TX,		CTC_EVENT_MC_FAIL,	ctcm_chx_fail  },
2040 	{ CTC_STATE_TX,		CTC_EVENT_RSWEEP_TIMER,	ctcmpc_chx_send_sweep },
2041 	{ CTC_STATE_TX,		CTC_EVENT_IO_EBUSY,	ctcm_chx_fail  },
2042 
2043 	{ CTC_STATE_RXERR,	CTC_EVENT_STOP,		ctcm_chx_haltio  },
2044 	{ CTC_STATE_TXERR,	CTC_EVENT_STOP,		ctcm_chx_haltio  },
2045 	{ CTC_STATE_TXERR,	CTC_EVENT_IO_ENODEV,	ctcm_chx_iofatal  },
2046 	{ CTC_STATE_TXERR,	CTC_EVENT_MC_FAIL,	ctcm_chx_fail  },
2047 	{ CTC_STATE_RXERR,	CTC_EVENT_MC_FAIL,	ctcm_chx_fail  },
2048 };
2049 
2050 int mpc_ch_fsm_len = ARRAY_SIZE(ctcmpc_ch_fsm);
2051 
2052 /*
2053  * Actions for interface - statemachine.
2054  */
2055 
2056 /**
2057  * Startup channels by sending CTC_EVENT_START to each channel.
2058  *
2059  * fi		An instance of an interface statemachine.
2060  * event	The event, just happened.
2061  * arg		Generic pointer, casted from struct net_device * upon call.
2062  */
2063 static void dev_action_start(fsm_instance *fi, int event, void *arg)
2064 {
2065 	struct net_device *dev = arg;
2066 	struct ctcm_priv *priv = dev->ml_priv;
2067 	int direction;
2068 
2069 	CTCMY_DBF_DEV_NAME(SETUP, dev, "");
2070 
2071 	fsm_deltimer(&priv->restart_timer);
2072 	fsm_newstate(fi, DEV_STATE_STARTWAIT_RXTX);
2073 	if (IS_MPC(priv))
2074 		priv->mpcg->channels_terminating = 0;
2075 	for (direction = CTCM_READ; direction <= CTCM_WRITE; direction++) {
2076 		struct channel *ch = priv->channel[direction];
2077 		fsm_event(ch->fsm, CTC_EVENT_START, ch);
2078 	}
2079 }
2080 
2081 /**
2082  * Shutdown channels by sending CTC_EVENT_STOP to each channel.
2083  *
2084  * fi		An instance of an interface statemachine.
2085  * event	The event, just happened.
2086  * arg		Generic pointer, casted from struct net_device * upon call.
2087  */
2088 static void dev_action_stop(fsm_instance *fi, int event, void *arg)
2089 {
2090 	int direction;
2091 	struct net_device *dev = arg;
2092 	struct ctcm_priv *priv = dev->ml_priv;
2093 
2094 	CTCMY_DBF_DEV_NAME(SETUP, dev, "");
2095 
2096 	fsm_newstate(fi, DEV_STATE_STOPWAIT_RXTX);
2097 	for (direction = CTCM_READ; direction <= CTCM_WRITE; direction++) {
2098 		struct channel *ch = priv->channel[direction];
2099 		fsm_event(ch->fsm, CTC_EVENT_STOP, ch);
2100 		ch->th_seq_num = 0x00;
2101 		CTCM_PR_DEBUG("%s: CH_th_seq= %08x\n",
2102 				__func__, ch->th_seq_num);
2103 	}
2104 	if (IS_MPC(priv))
2105 		fsm_newstate(priv->mpcg->fsm, MPCG_STATE_RESET);
2106 }
2107 
2108 static void dev_action_restart(fsm_instance *fi, int event, void *arg)
2109 {
2110 	int restart_timer;
2111 	struct net_device *dev = arg;
2112 	struct ctcm_priv *priv = dev->ml_priv;
2113 
2114 	CTCMY_DBF_DEV_NAME(TRACE, dev, "");
2115 
2116 	if (IS_MPC(priv)) {
2117 		restart_timer = CTCM_TIME_1_SEC;
2118 	} else {
2119 		restart_timer = CTCM_TIME_5_SEC;
2120 	}
2121 	dev_info(&dev->dev, "Restarting device\n");
2122 
2123 	dev_action_stop(fi, event, arg);
2124 	fsm_event(priv->fsm, DEV_EVENT_STOP, dev);
2125 	if (IS_MPC(priv))
2126 		fsm_newstate(priv->mpcg->fsm, MPCG_STATE_RESET);
2127 
2128 	/* going back into start sequence too quickly can	  */
2129 	/* result in the other side becoming unreachable   due	  */
2130 	/* to sense reported when IO is aborted			  */
2131 	fsm_addtimer(&priv->restart_timer, restart_timer,
2132 			DEV_EVENT_START, dev);
2133 }
2134 
2135 /**
2136  * Called from channel statemachine
2137  * when a channel is up and running.
2138  *
2139  * fi		An instance of an interface statemachine.
2140  * event	The event, just happened.
2141  * arg		Generic pointer, casted from struct net_device * upon call.
2142  */
2143 static void dev_action_chup(fsm_instance *fi, int event, void *arg)
2144 {
2145 	struct net_device *dev = arg;
2146 	struct ctcm_priv *priv = dev->ml_priv;
2147 	int dev_stat = fsm_getstate(fi);
2148 
2149 	CTCM_DBF_TEXT_(SETUP, CTC_DBF_NOTICE,
2150 			"%s(%s): priv = %p [%d,%d]\n ",	CTCM_FUNTAIL,
2151 				dev->name, dev->ml_priv, dev_stat, event);
2152 
2153 	switch (fsm_getstate(fi)) {
2154 	case DEV_STATE_STARTWAIT_RXTX:
2155 		if (event == DEV_EVENT_RXUP)
2156 			fsm_newstate(fi, DEV_STATE_STARTWAIT_TX);
2157 		else
2158 			fsm_newstate(fi, DEV_STATE_STARTWAIT_RX);
2159 		break;
2160 	case DEV_STATE_STARTWAIT_RX:
2161 		if (event == DEV_EVENT_RXUP) {
2162 			fsm_newstate(fi, DEV_STATE_RUNNING);
2163 			dev_info(&dev->dev,
2164 				"Connected with remote side\n");
2165 			ctcm_clear_busy(dev);
2166 		}
2167 		break;
2168 	case DEV_STATE_STARTWAIT_TX:
2169 		if (event == DEV_EVENT_TXUP) {
2170 			fsm_newstate(fi, DEV_STATE_RUNNING);
2171 			dev_info(&dev->dev,
2172 				"Connected with remote side\n");
2173 			ctcm_clear_busy(dev);
2174 		}
2175 		break;
2176 	case DEV_STATE_STOPWAIT_TX:
2177 		if (event == DEV_EVENT_RXUP)
2178 			fsm_newstate(fi, DEV_STATE_STOPWAIT_RXTX);
2179 		break;
2180 	case DEV_STATE_STOPWAIT_RX:
2181 		if (event == DEV_EVENT_TXUP)
2182 			fsm_newstate(fi, DEV_STATE_STOPWAIT_RXTX);
2183 		break;
2184 	}
2185 
2186 	if (IS_MPC(priv)) {
2187 		if (event == DEV_EVENT_RXUP)
2188 			mpc_channel_action(priv->channel[CTCM_READ],
2189 				CTCM_READ, MPC_CHANNEL_ADD);
2190 		else
2191 			mpc_channel_action(priv->channel[CTCM_WRITE],
2192 				CTCM_WRITE, MPC_CHANNEL_ADD);
2193 	}
2194 }
2195 
2196 /**
2197  * Called from device statemachine
2198  * when a channel has been shutdown.
2199  *
2200  * fi		An instance of an interface statemachine.
2201  * event	The event, just happened.
2202  * arg		Generic pointer, casted from struct net_device * upon call.
2203  */
2204 static void dev_action_chdown(fsm_instance *fi, int event, void *arg)
2205 {
2206 
2207 	struct net_device *dev = arg;
2208 	struct ctcm_priv *priv = dev->ml_priv;
2209 
2210 	CTCMY_DBF_DEV_NAME(SETUP, dev, "");
2211 
2212 	switch (fsm_getstate(fi)) {
2213 	case DEV_STATE_RUNNING:
2214 		if (event == DEV_EVENT_TXDOWN)
2215 			fsm_newstate(fi, DEV_STATE_STARTWAIT_TX);
2216 		else
2217 			fsm_newstate(fi, DEV_STATE_STARTWAIT_RX);
2218 		break;
2219 	case DEV_STATE_STARTWAIT_RX:
2220 		if (event == DEV_EVENT_TXDOWN)
2221 			fsm_newstate(fi, DEV_STATE_STARTWAIT_RXTX);
2222 		break;
2223 	case DEV_STATE_STARTWAIT_TX:
2224 		if (event == DEV_EVENT_RXDOWN)
2225 			fsm_newstate(fi, DEV_STATE_STARTWAIT_RXTX);
2226 		break;
2227 	case DEV_STATE_STOPWAIT_RXTX:
2228 		if (event == DEV_EVENT_TXDOWN)
2229 			fsm_newstate(fi, DEV_STATE_STOPWAIT_RX);
2230 		else
2231 			fsm_newstate(fi, DEV_STATE_STOPWAIT_TX);
2232 		break;
2233 	case DEV_STATE_STOPWAIT_RX:
2234 		if (event == DEV_EVENT_RXDOWN)
2235 			fsm_newstate(fi, DEV_STATE_STOPPED);
2236 		break;
2237 	case DEV_STATE_STOPWAIT_TX:
2238 		if (event == DEV_EVENT_TXDOWN)
2239 			fsm_newstate(fi, DEV_STATE_STOPPED);
2240 		break;
2241 	}
2242 	if (IS_MPC(priv)) {
2243 		if (event == DEV_EVENT_RXDOWN)
2244 			mpc_channel_action(priv->channel[CTCM_READ],
2245 				CTCM_READ, MPC_CHANNEL_REMOVE);
2246 		else
2247 			mpc_channel_action(priv->channel[CTCM_WRITE],
2248 				CTCM_WRITE, MPC_CHANNEL_REMOVE);
2249 	}
2250 }
2251 
2252 const fsm_node dev_fsm[] = {
2253 	{ DEV_STATE_STOPPED,        DEV_EVENT_START,   dev_action_start   },
2254 	{ DEV_STATE_STOPWAIT_RXTX,  DEV_EVENT_START,   dev_action_start   },
2255 	{ DEV_STATE_STOPWAIT_RXTX,  DEV_EVENT_RXDOWN,  dev_action_chdown  },
2256 	{ DEV_STATE_STOPWAIT_RXTX,  DEV_EVENT_TXDOWN,  dev_action_chdown  },
2257 	{ DEV_STATE_STOPWAIT_RXTX,  DEV_EVENT_RESTART, dev_action_restart },
2258 	{ DEV_STATE_STOPWAIT_RX,    DEV_EVENT_START,   dev_action_start   },
2259 	{ DEV_STATE_STOPWAIT_RX,    DEV_EVENT_RXUP,    dev_action_chup    },
2260 	{ DEV_STATE_STOPWAIT_RX,    DEV_EVENT_TXUP,    dev_action_chup    },
2261 	{ DEV_STATE_STOPWAIT_RX,    DEV_EVENT_RXDOWN,  dev_action_chdown  },
2262 	{ DEV_STATE_STOPWAIT_RX,    DEV_EVENT_RESTART, dev_action_restart },
2263 	{ DEV_STATE_STOPWAIT_TX,    DEV_EVENT_START,   dev_action_start   },
2264 	{ DEV_STATE_STOPWAIT_TX,    DEV_EVENT_RXUP,    dev_action_chup    },
2265 	{ DEV_STATE_STOPWAIT_TX,    DEV_EVENT_TXUP,    dev_action_chup    },
2266 	{ DEV_STATE_STOPWAIT_TX,    DEV_EVENT_TXDOWN,  dev_action_chdown  },
2267 	{ DEV_STATE_STOPWAIT_TX,    DEV_EVENT_RESTART, dev_action_restart },
2268 	{ DEV_STATE_STARTWAIT_RXTX, DEV_EVENT_STOP,    dev_action_stop    },
2269 	{ DEV_STATE_STARTWAIT_RXTX, DEV_EVENT_RXUP,    dev_action_chup    },
2270 	{ DEV_STATE_STARTWAIT_RXTX, DEV_EVENT_TXUP,    dev_action_chup    },
2271 	{ DEV_STATE_STARTWAIT_RXTX, DEV_EVENT_RXDOWN,  dev_action_chdown  },
2272 	{ DEV_STATE_STARTWAIT_RXTX, DEV_EVENT_TXDOWN,  dev_action_chdown  },
2273 	{ DEV_STATE_STARTWAIT_RXTX, DEV_EVENT_RESTART, dev_action_restart },
2274 	{ DEV_STATE_STARTWAIT_TX,   DEV_EVENT_STOP,    dev_action_stop    },
2275 	{ DEV_STATE_STARTWAIT_TX,   DEV_EVENT_RXUP,    dev_action_chup    },
2276 	{ DEV_STATE_STARTWAIT_TX,   DEV_EVENT_TXUP,    dev_action_chup    },
2277 	{ DEV_STATE_STARTWAIT_TX,   DEV_EVENT_RXDOWN,  dev_action_chdown  },
2278 	{ DEV_STATE_STARTWAIT_TX,   DEV_EVENT_RESTART, dev_action_restart },
2279 	{ DEV_STATE_STARTWAIT_RX,   DEV_EVENT_STOP,    dev_action_stop    },
2280 	{ DEV_STATE_STARTWAIT_RX,   DEV_EVENT_RXUP,    dev_action_chup    },
2281 	{ DEV_STATE_STARTWAIT_RX,   DEV_EVENT_TXUP,    dev_action_chup    },
2282 	{ DEV_STATE_STARTWAIT_RX,   DEV_EVENT_TXDOWN,  dev_action_chdown  },
2283 	{ DEV_STATE_STARTWAIT_RX,   DEV_EVENT_RESTART, dev_action_restart },
2284 	{ DEV_STATE_RUNNING,        DEV_EVENT_STOP,    dev_action_stop    },
2285 	{ DEV_STATE_RUNNING,        DEV_EVENT_RXDOWN,  dev_action_chdown  },
2286 	{ DEV_STATE_RUNNING,        DEV_EVENT_TXDOWN,  dev_action_chdown  },
2287 	{ DEV_STATE_RUNNING,        DEV_EVENT_TXUP,    ctcm_action_nop    },
2288 	{ DEV_STATE_RUNNING,        DEV_EVENT_RXUP,    ctcm_action_nop    },
2289 	{ DEV_STATE_RUNNING,        DEV_EVENT_RESTART, dev_action_restart },
2290 };
2291 
2292 int dev_fsm_len = ARRAY_SIZE(dev_fsm);
2293 
2294 /* --- This is the END my friend --- */
2295 
2296