Add the rt linux 4.1.3-rt3 as base
[kvmfornfv.git] / kernel / drivers / s390 / net / ctcm_main.c
1 /*
2  * Copyright IBM Corp. 2001, 2009
3  * Author(s):
4  *      Original CTC driver(s):
5  *              Fritz Elfert (felfert@millenux.com)
6  *              Dieter Wellerdiek (wel@de.ibm.com)
7  *              Martin Schwidefsky (schwidefsky@de.ibm.com)
8  *              Denis Joseph Barrow (barrow_dj@yahoo.com)
9  *              Jochen Roehrig (roehrig@de.ibm.com)
10  *              Cornelia Huck <cornelia.huck@de.ibm.com>
11  *      MPC additions:
12  *              Belinda Thompson (belindat@us.ibm.com)
13  *              Andy Richter (richtera@us.ibm.com)
14  *      Revived by:
15  *              Peter Tiedemann (ptiedem@de.ibm.com)
16  */
17
18 #undef DEBUG
19 #undef DEBUGDATA
20 #undef DEBUGCCW
21
22 #define KMSG_COMPONENT "ctcm"
23 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
24
25 #include <linux/module.h>
26 #include <linux/init.h>
27 #include <linux/kernel.h>
28 #include <linux/slab.h>
29 #include <linux/errno.h>
30 #include <linux/types.h>
31 #include <linux/interrupt.h>
32 #include <linux/timer.h>
33 #include <linux/bitops.h>
34
35 #include <linux/signal.h>
36 #include <linux/string.h>
37
38 #include <linux/ip.h>
39 #include <linux/if_arp.h>
40 #include <linux/tcp.h>
41 #include <linux/skbuff.h>
42 #include <linux/ctype.h>
43 #include <net/dst.h>
44
45 #include <linux/io.h>
46 #include <asm/ccwdev.h>
47 #include <asm/ccwgroup.h>
48 #include <linux/uaccess.h>
49
50 #include <asm/idals.h>
51
52 #include "ctcm_fsms.h"
53 #include "ctcm_main.h"
54
55 /* Some common global variables */
56
57 /**
58  * The root device for ctcm group devices
59  */
60 static struct device *ctcm_root_dev;
61
62 /*
63  * Linked list of all detected channels.
64  */
65 struct channel *channels;
66
67 /**
68  * Unpack a just received skb and hand it over to
69  * upper layers.
70  *
71  *  ch          The channel where this skb has been received.
72  *  pskb        The received skb.
73  */
74 void ctcm_unpack_skb(struct channel *ch, struct sk_buff *pskb)
75 {
76         struct net_device *dev = ch->netdev;
77         struct ctcm_priv *priv = dev->ml_priv;
78         __u16 len = *((__u16 *) pskb->data);
79
80         skb_put(pskb, 2 + LL_HEADER_LENGTH);
81         skb_pull(pskb, 2);
82         pskb->dev = dev;
83         pskb->ip_summed = CHECKSUM_UNNECESSARY;
84         while (len > 0) {
85                 struct sk_buff *skb;
86                 int skblen;
87                 struct ll_header *header = (struct ll_header *)pskb->data;
88
89                 skb_pull(pskb, LL_HEADER_LENGTH);
90                 if ((ch->protocol == CTCM_PROTO_S390) &&
91                     (header->type != ETH_P_IP)) {
92                         if (!(ch->logflags & LOG_FLAG_ILLEGALPKT)) {
93                                 ch->logflags |= LOG_FLAG_ILLEGALPKT;
94                                 /*
95                                  * Check packet type only if we stick strictly
96                                  * to S/390's protocol of OS390. This only
97                                  * supports IP. Otherwise allow any packet
98                                  * type.
99                                  */
100                                 CTCM_DBF_TEXT_(ERROR, CTC_DBF_ERROR,
101                                         "%s(%s): Illegal packet type 0x%04x"
102                                         " - dropping",
103                                         CTCM_FUNTAIL, dev->name, header->type);
104                         }
105                         priv->stats.rx_dropped++;
106                         priv->stats.rx_frame_errors++;
107                         return;
108                 }
109                 pskb->protocol = ntohs(header->type);
110                 if ((header->length <= LL_HEADER_LENGTH) ||
111                     (len <= LL_HEADER_LENGTH)) {
112                         if (!(ch->logflags & LOG_FLAG_ILLEGALSIZE)) {
113                                 CTCM_DBF_TEXT_(ERROR, CTC_DBF_ERROR,
114                                         "%s(%s): Illegal packet size %d(%d,%d)"
115                                         "- dropping",
116                                         CTCM_FUNTAIL, dev->name,
117                                         header->length, dev->mtu, len);
118                                 ch->logflags |= LOG_FLAG_ILLEGALSIZE;
119                         }
120
121                         priv->stats.rx_dropped++;
122                         priv->stats.rx_length_errors++;
123                         return;
124                 }
125                 header->length -= LL_HEADER_LENGTH;
126                 len -= LL_HEADER_LENGTH;
127                 if ((header->length > skb_tailroom(pskb)) ||
128                         (header->length > len)) {
129                         if (!(ch->logflags & LOG_FLAG_OVERRUN)) {
130                                 CTCM_DBF_TEXT_(ERROR, CTC_DBF_ERROR,
131                                         "%s(%s): Packet size %d (overrun)"
132                                         " - dropping", CTCM_FUNTAIL,
133                                                 dev->name, header->length);
134                                 ch->logflags |= LOG_FLAG_OVERRUN;
135                         }
136
137                         priv->stats.rx_dropped++;
138                         priv->stats.rx_length_errors++;
139                         return;
140                 }
141                 skb_put(pskb, header->length);
142                 skb_reset_mac_header(pskb);
143                 len -= header->length;
144                 skb = dev_alloc_skb(pskb->len);
145                 if (!skb) {
146                         if (!(ch->logflags & LOG_FLAG_NOMEM)) {
147                                 CTCM_DBF_TEXT_(ERROR, CTC_DBF_ERROR,
148                                         "%s(%s): MEMORY allocation error",
149                                                 CTCM_FUNTAIL, dev->name);
150                                 ch->logflags |= LOG_FLAG_NOMEM;
151                         }
152                         priv->stats.rx_dropped++;
153                         return;
154                 }
155                 skb_copy_from_linear_data(pskb, skb_put(skb, pskb->len),
156                                           pskb->len);
157                 skb_reset_mac_header(skb);
158                 skb->dev = pskb->dev;
159                 skb->protocol = pskb->protocol;
160                 pskb->ip_summed = CHECKSUM_UNNECESSARY;
161                 skblen = skb->len;
162                 /*
163                  * reset logflags
164                  */
165                 ch->logflags = 0;
166                 priv->stats.rx_packets++;
167                 priv->stats.rx_bytes += skblen;
168                 netif_rx_ni(skb);
169                 if (len > 0) {
170                         skb_pull(pskb, header->length);
171                         if (skb_tailroom(pskb) < LL_HEADER_LENGTH) {
172                                 CTCM_DBF_DEV_NAME(TRACE, dev,
173                                         "Overrun in ctcm_unpack_skb");
174                                 ch->logflags |= LOG_FLAG_OVERRUN;
175                                 return;
176                         }
177                         skb_put(pskb, LL_HEADER_LENGTH);
178                 }
179         }
180 }
181
182 /**
183  * Release a specific channel in the channel list.
184  *
185  *  ch          Pointer to channel struct to be released.
186  */
187 static void channel_free(struct channel *ch)
188 {
189         CTCM_DBF_TEXT_(SETUP, CTC_DBF_INFO, "%s(%s)", CTCM_FUNTAIL, ch->id);
190         ch->flags &= ~CHANNEL_FLAGS_INUSE;
191         fsm_newstate(ch->fsm, CTC_STATE_IDLE);
192 }
193
194 /**
195  * Remove a specific channel in the channel list.
196  *
197  *  ch          Pointer to channel struct to be released.
198  */
199 static void channel_remove(struct channel *ch)
200 {
201         struct channel **c = &channels;
202         char chid[CTCM_ID_SIZE+1];
203         int ok = 0;
204
205         if (ch == NULL)
206                 return;
207         else
208                 strncpy(chid, ch->id, CTCM_ID_SIZE);
209
210         channel_free(ch);
211         while (*c) {
212                 if (*c == ch) {
213                         *c = ch->next;
214                         fsm_deltimer(&ch->timer);
215                         if (IS_MPC(ch))
216                                 fsm_deltimer(&ch->sweep_timer);
217
218                         kfree_fsm(ch->fsm);
219                         clear_normalized_cda(&ch->ccw[4]);
220                         if (ch->trans_skb != NULL) {
221                                 clear_normalized_cda(&ch->ccw[1]);
222                                 dev_kfree_skb_any(ch->trans_skb);
223                         }
224                         if (IS_MPC(ch)) {
225                                 tasklet_kill(&ch->ch_tasklet);
226                                 tasklet_kill(&ch->ch_disc_tasklet);
227                                 kfree(ch->discontact_th);
228                         }
229                         kfree(ch->ccw);
230                         kfree(ch->irb);
231                         kfree(ch);
232                         ok = 1;
233                         break;
234                 }
235                 c = &((*c)->next);
236         }
237
238         CTCM_DBF_TEXT_(SETUP, CTC_DBF_INFO, "%s(%s) %s", CTCM_FUNTAIL,
239                         chid, ok ? "OK" : "failed");
240 }
241
242 /**
243  * Get a specific channel from the channel list.
244  *
245  *  type        Type of channel we are interested in.
246  *  id          Id of channel we are interested in.
247  *  direction   Direction we want to use this channel for.
248  *
249  * returns Pointer to a channel or NULL if no matching channel available.
250  */
251 static struct channel *channel_get(enum ctcm_channel_types type,
252                                         char *id, int direction)
253 {
254         struct channel *ch = channels;
255
256         while (ch && (strncmp(ch->id, id, CTCM_ID_SIZE) || (ch->type != type)))
257                 ch = ch->next;
258         if (!ch) {
259                 CTCM_DBF_TEXT_(ERROR, CTC_DBF_ERROR,
260                                 "%s(%d, %s, %d) not found in channel list\n",
261                                 CTCM_FUNTAIL, type, id, direction);
262         } else {
263                 if (ch->flags & CHANNEL_FLAGS_INUSE)
264                         ch = NULL;
265                 else {
266                         ch->flags |= CHANNEL_FLAGS_INUSE;
267                         ch->flags &= ~CHANNEL_FLAGS_RWMASK;
268                         ch->flags |= (direction == CTCM_WRITE)
269                             ? CHANNEL_FLAGS_WRITE : CHANNEL_FLAGS_READ;
270                         fsm_newstate(ch->fsm, CTC_STATE_STOPPED);
271                 }
272         }
273         return ch;
274 }
275
276 static long ctcm_check_irb_error(struct ccw_device *cdev, struct irb *irb)
277 {
278         if (!IS_ERR(irb))
279                 return 0;
280
281         CTCM_DBF_TEXT_(ERROR, CTC_DBF_WARN,
282                         "irb error %ld on device %s\n",
283                                 PTR_ERR(irb), dev_name(&cdev->dev));
284
285         switch (PTR_ERR(irb)) {
286         case -EIO:
287                 dev_err(&cdev->dev,
288                         "An I/O-error occurred on the CTCM device\n");
289                 break;
290         case -ETIMEDOUT:
291                 dev_err(&cdev->dev,
292                         "An adapter hardware operation timed out\n");
293                 break;
294         default:
295                 dev_err(&cdev->dev,
296                         "An error occurred on the adapter hardware\n");
297         }
298         return PTR_ERR(irb);
299 }
300
301
302 /**
303  * Check sense of a unit check.
304  *
305  *  ch          The channel, the sense code belongs to.
306  *  sense       The sense code to inspect.
307  */
308 static inline void ccw_unit_check(struct channel *ch, __u8 sense)
309 {
310         CTCM_DBF_TEXT_(TRACE, CTC_DBF_DEBUG,
311                         "%s(%s): %02x",
312                                 CTCM_FUNTAIL, ch->id, sense);
313
314         if (sense & SNS0_INTERVENTION_REQ) {
315                 if (sense & 0x01) {
316                         if (ch->sense_rc != 0x01) {
317                                 pr_notice(
318                                         "%s: The communication peer has "
319                                         "disconnected\n", ch->id);
320                                 ch->sense_rc = 0x01;
321                         }
322                         fsm_event(ch->fsm, CTC_EVENT_UC_RCRESET, ch);
323                 } else {
324                         if (ch->sense_rc != SNS0_INTERVENTION_REQ) {
325                                 pr_notice(
326                                         "%s: The remote operating system is "
327                                         "not available\n", ch->id);
328                                 ch->sense_rc = SNS0_INTERVENTION_REQ;
329                         }
330                         fsm_event(ch->fsm, CTC_EVENT_UC_RSRESET, ch);
331                 }
332         } else if (sense & SNS0_EQUIPMENT_CHECK) {
333                 if (sense & SNS0_BUS_OUT_CHECK) {
334                         if (ch->sense_rc != SNS0_BUS_OUT_CHECK) {
335                                 CTCM_DBF_TEXT_(TRACE, CTC_DBF_WARN,
336                                         "%s(%s): remote HW error %02x",
337                                                 CTCM_FUNTAIL, ch->id, sense);
338                                 ch->sense_rc = SNS0_BUS_OUT_CHECK;
339                         }
340                         fsm_event(ch->fsm, CTC_EVENT_UC_HWFAIL, ch);
341                 } else {
342                         if (ch->sense_rc != SNS0_EQUIPMENT_CHECK) {
343                                 CTCM_DBF_TEXT_(TRACE, CTC_DBF_WARN,
344                                         "%s(%s): remote read parity error %02x",
345                                                 CTCM_FUNTAIL, ch->id, sense);
346                                 ch->sense_rc = SNS0_EQUIPMENT_CHECK;
347                         }
348                         fsm_event(ch->fsm, CTC_EVENT_UC_RXPARITY, ch);
349                 }
350         } else if (sense & SNS0_BUS_OUT_CHECK) {
351                 if (ch->sense_rc != SNS0_BUS_OUT_CHECK) {
352                         CTCM_DBF_TEXT_(TRACE, CTC_DBF_WARN,
353                                 "%s(%s): BUS OUT error %02x",
354                                         CTCM_FUNTAIL, ch->id, sense);
355                         ch->sense_rc = SNS0_BUS_OUT_CHECK;
356                 }
357                 if (sense & 0x04)       /* data-streaming timeout */
358                         fsm_event(ch->fsm, CTC_EVENT_UC_TXTIMEOUT, ch);
359                 else                    /* Data-transfer parity error */
360                         fsm_event(ch->fsm, CTC_EVENT_UC_TXPARITY, ch);
361         } else if (sense & SNS0_CMD_REJECT) {
362                 if (ch->sense_rc != SNS0_CMD_REJECT) {
363                         CTCM_DBF_TEXT_(TRACE, CTC_DBF_WARN,
364                                 "%s(%s): Command rejected",
365                                                 CTCM_FUNTAIL, ch->id);
366                         ch->sense_rc = SNS0_CMD_REJECT;
367                 }
368         } else if (sense == 0) {
369                 CTCM_DBF_TEXT_(TRACE, CTC_DBF_WARN,
370                         "%s(%s): Unit check ZERO",
371                                         CTCM_FUNTAIL, ch->id);
372                 fsm_event(ch->fsm, CTC_EVENT_UC_ZERO, ch);
373         } else {
374                 CTCM_DBF_TEXT_(TRACE, CTC_DBF_WARN,
375                         "%s(%s): Unit check code %02x unknown",
376                                         CTCM_FUNTAIL, ch->id, sense);
377                 fsm_event(ch->fsm, CTC_EVENT_UC_UNKNOWN, ch);
378         }
379 }
380
381 int ctcm_ch_alloc_buffer(struct channel *ch)
382 {
383         clear_normalized_cda(&ch->ccw[1]);
384         ch->trans_skb = __dev_alloc_skb(ch->max_bufsize, GFP_ATOMIC | GFP_DMA);
385         if (ch->trans_skb == NULL) {
386                 CTCM_DBF_TEXT_(ERROR, CTC_DBF_ERROR,
387                         "%s(%s): %s trans_skb allocation error",
388                         CTCM_FUNTAIL, ch->id,
389                         (CHANNEL_DIRECTION(ch->flags) == CTCM_READ) ?
390                                 "RX" : "TX");
391                 return -ENOMEM;
392         }
393
394         ch->ccw[1].count = ch->max_bufsize;
395         if (set_normalized_cda(&ch->ccw[1], ch->trans_skb->data)) {
396                 dev_kfree_skb(ch->trans_skb);
397                 ch->trans_skb = NULL;
398                 CTCM_DBF_TEXT_(ERROR, CTC_DBF_ERROR,
399                         "%s(%s): %s set norm_cda failed",
400                         CTCM_FUNTAIL, ch->id,
401                         (CHANNEL_DIRECTION(ch->flags) == CTCM_READ) ?
402                                 "RX" : "TX");
403                 return -ENOMEM;
404         }
405
406         ch->ccw[1].count = 0;
407         ch->trans_skb_data = ch->trans_skb->data;
408         ch->flags &= ~CHANNEL_FLAGS_BUFSIZE_CHANGED;
409         return 0;
410 }
411
412 /*
413  * Interface API for upper network layers
414  */
415
416 /**
417  * Open an interface.
418  * Called from generic network layer when ifconfig up is run.
419  *
420  *  dev         Pointer to interface struct.
421  *
422  * returns 0 on success, -ERRNO on failure. (Never fails.)
423  */
424 int ctcm_open(struct net_device *dev)
425 {
426         struct ctcm_priv *priv = dev->ml_priv;
427
428         CTCMY_DBF_DEV_NAME(SETUP, dev, "");
429         if (!IS_MPC(priv))
430                 fsm_event(priv->fsm,    DEV_EVENT_START, dev);
431         return 0;
432 }
433
434 /**
435  * Close an interface.
436  * Called from generic network layer when ifconfig down is run.
437  *
438  *  dev         Pointer to interface struct.
439  *
440  * returns 0 on success, -ERRNO on failure. (Never fails.)
441  */
442 int ctcm_close(struct net_device *dev)
443 {
444         struct ctcm_priv *priv = dev->ml_priv;
445
446         CTCMY_DBF_DEV_NAME(SETUP, dev, "");
447         if (!IS_MPC(priv))
448                 fsm_event(priv->fsm, DEV_EVENT_STOP, dev);
449         return 0;
450 }
451
452
453 /**
454  * Transmit a packet.
455  * This is a helper function for ctcm_tx().
456  *
457  *  ch          Channel to be used for sending.
458  *  skb         Pointer to struct sk_buff of packet to send.
459  *            The linklevel header has already been set up
460  *            by ctcm_tx().
461  *
462  * returns 0 on success, -ERRNO on failure. (Never fails.)
463  */
464 static int ctcm_transmit_skb(struct channel *ch, struct sk_buff *skb)
465 {
466         unsigned long saveflags;
467         struct ll_header header;
468         int rc = 0;
469         __u16 block_len;
470         int ccw_idx;
471         struct sk_buff *nskb;
472         unsigned long hi;
473
474         /* we need to acquire the lock for testing the state
475          * otherwise we can have an IRQ changing the state to
476          * TXIDLE after the test but before acquiring the lock.
477          */
478         spin_lock_irqsave(&ch->collect_lock, saveflags);
479         if (fsm_getstate(ch->fsm) != CTC_STATE_TXIDLE) {
480                 int l = skb->len + LL_HEADER_LENGTH;
481
482                 if (ch->collect_len + l > ch->max_bufsize - 2) {
483                         spin_unlock_irqrestore(&ch->collect_lock, saveflags);
484                         return -EBUSY;
485                 } else {
486                         atomic_inc(&skb->users);
487                         header.length = l;
488                         header.type = skb->protocol;
489                         header.unused = 0;
490                         memcpy(skb_push(skb, LL_HEADER_LENGTH), &header,
491                                LL_HEADER_LENGTH);
492                         skb_queue_tail(&ch->collect_queue, skb);
493                         ch->collect_len += l;
494                 }
495                 spin_unlock_irqrestore(&ch->collect_lock, saveflags);
496                                 goto done;
497         }
498         spin_unlock_irqrestore(&ch->collect_lock, saveflags);
499         /*
500          * Protect skb against beeing free'd by upper
501          * layers.
502          */
503         atomic_inc(&skb->users);
504         ch->prof.txlen += skb->len;
505         header.length = skb->len + LL_HEADER_LENGTH;
506         header.type = skb->protocol;
507         header.unused = 0;
508         memcpy(skb_push(skb, LL_HEADER_LENGTH), &header, LL_HEADER_LENGTH);
509         block_len = skb->len + 2;
510         *((__u16 *)skb_push(skb, 2)) = block_len;
511
512         /*
513          * IDAL support in CTCM is broken, so we have to
514          * care about skb's above 2G ourselves.
515          */
516         hi = ((unsigned long)skb_tail_pointer(skb) + LL_HEADER_LENGTH) >> 31;
517         if (hi) {
518                 nskb = alloc_skb(skb->len, GFP_ATOMIC | GFP_DMA);
519                 if (!nskb) {
520                         atomic_dec(&skb->users);
521                         skb_pull(skb, LL_HEADER_LENGTH + 2);
522                         ctcm_clear_busy(ch->netdev);
523                         return -ENOMEM;
524                 } else {
525                         memcpy(skb_put(nskb, skb->len), skb->data, skb->len);
526                         atomic_inc(&nskb->users);
527                         atomic_dec(&skb->users);
528                         dev_kfree_skb_irq(skb);
529                         skb = nskb;
530                 }
531         }
532
533         ch->ccw[4].count = block_len;
534         if (set_normalized_cda(&ch->ccw[4], skb->data)) {
535                 /*
536                  * idal allocation failed, try via copying to
537                  * trans_skb. trans_skb usually has a pre-allocated
538                  * idal.
539                  */
540                 if (ctcm_checkalloc_buffer(ch)) {
541                         /*
542                          * Remove our header. It gets added
543                          * again on retransmit.
544                          */
545                         atomic_dec(&skb->users);
546                         skb_pull(skb, LL_HEADER_LENGTH + 2);
547                         ctcm_clear_busy(ch->netdev);
548                         return -ENOMEM;
549                 }
550
551                 skb_reset_tail_pointer(ch->trans_skb);
552                 ch->trans_skb->len = 0;
553                 ch->ccw[1].count = skb->len;
554                 skb_copy_from_linear_data(skb,
555                                 skb_put(ch->trans_skb, skb->len), skb->len);
556                 atomic_dec(&skb->users);
557                 dev_kfree_skb_irq(skb);
558                 ccw_idx = 0;
559         } else {
560                 skb_queue_tail(&ch->io_queue, skb);
561                 ccw_idx = 3;
562         }
563         if (do_debug_ccw)
564                 ctcmpc_dumpit((char *)&ch->ccw[ccw_idx],
565                                         sizeof(struct ccw1) * 3);
566         ch->retry = 0;
567         fsm_newstate(ch->fsm, CTC_STATE_TX);
568         fsm_addtimer(&ch->timer, CTCM_TIME_5_SEC, CTC_EVENT_TIMER, ch);
569         spin_lock_irqsave(get_ccwdev_lock(ch->cdev), saveflags);
570         ch->prof.send_stamp = jiffies;
571         rc = ccw_device_start(ch->cdev, &ch->ccw[ccw_idx],
572                                         (unsigned long)ch, 0xff, 0);
573         spin_unlock_irqrestore(get_ccwdev_lock(ch->cdev), saveflags);
574         if (ccw_idx == 3)
575                 ch->prof.doios_single++;
576         if (rc != 0) {
577                 fsm_deltimer(&ch->timer);
578                 ctcm_ccw_check_rc(ch, rc, "single skb TX");
579                 if (ccw_idx == 3)
580                         skb_dequeue_tail(&ch->io_queue);
581                 /*
582                  * Remove our header. It gets added
583                  * again on retransmit.
584                  */
585                 skb_pull(skb, LL_HEADER_LENGTH + 2);
586         } else if (ccw_idx == 0) {
587                 struct net_device *dev = ch->netdev;
588                 struct ctcm_priv *priv = dev->ml_priv;
589                 priv->stats.tx_packets++;
590                 priv->stats.tx_bytes += skb->len - LL_HEADER_LENGTH;
591         }
592 done:
593         ctcm_clear_busy(ch->netdev);
594         return rc;
595 }
596
597 static void ctcmpc_send_sweep_req(struct channel *rch)
598 {
599         struct net_device *dev = rch->netdev;
600         struct ctcm_priv *priv;
601         struct mpc_group *grp;
602         struct th_sweep *header;
603         struct sk_buff *sweep_skb;
604         struct channel *ch;
605         /* int rc = 0; */
606
607         priv = dev->ml_priv;
608         grp = priv->mpcg;
609         ch = priv->channel[CTCM_WRITE];
610
611         /* sweep processing is not complete until response and request */
612         /* has completed for all read channels in group                */
613         if (grp->in_sweep == 0) {
614                 grp->in_sweep = 1;
615                 grp->sweep_rsp_pend_num = grp->active_channels[CTCM_READ];
616                 grp->sweep_req_pend_num = grp->active_channels[CTCM_READ];
617         }
618
619         sweep_skb = __dev_alloc_skb(MPC_BUFSIZE_DEFAULT, GFP_ATOMIC|GFP_DMA);
620
621         if (sweep_skb == NULL)  {
622                 /* rc = -ENOMEM; */
623                                 goto nomem;
624         }
625
626         header = kmalloc(TH_SWEEP_LENGTH, gfp_type());
627
628         if (!header) {
629                 dev_kfree_skb_any(sweep_skb);
630                 /* rc = -ENOMEM; */
631                                 goto nomem;
632         }
633
634         header->th.th_seg       = 0x00 ;
635         header->th.th_ch_flag   = TH_SWEEP_REQ;  /* 0x0f */
636         header->th.th_blk_flag  = 0x00;
637         header->th.th_is_xid    = 0x00;
638         header->th.th_seq_num   = 0x00;
639         header->sw.th_last_seq  = ch->th_seq_num;
640
641         memcpy(skb_put(sweep_skb, TH_SWEEP_LENGTH), header, TH_SWEEP_LENGTH);
642
643         kfree(header);
644
645         dev->trans_start = jiffies;
646         skb_queue_tail(&ch->sweep_queue, sweep_skb);
647
648         fsm_addtimer(&ch->sweep_timer, 100, CTC_EVENT_RSWEEP_TIMER, ch);
649
650         return;
651
652 nomem:
653         grp->in_sweep = 0;
654         ctcm_clear_busy(dev);
655         fsm_event(grp->fsm, MPCG_EVENT_INOP, dev);
656
657         return;
658 }
659
660 /*
661  * MPC mode version of transmit_skb
662  */
663 static int ctcmpc_transmit_skb(struct channel *ch, struct sk_buff *skb)
664 {
665         struct pdu *p_header;
666         struct net_device *dev = ch->netdev;
667         struct ctcm_priv *priv = dev->ml_priv;
668         struct mpc_group *grp = priv->mpcg;
669         struct th_header *header;
670         struct sk_buff *nskb;
671         int rc = 0;
672         int ccw_idx;
673         unsigned long hi;
674         unsigned long saveflags = 0;    /* avoids compiler warning */
675
676         CTCM_PR_DEBUG("Enter %s: %s, cp=%i ch=0x%p id=%s state=%s\n",
677                         __func__, dev->name, smp_processor_id(), ch,
678                                         ch->id, fsm_getstate_str(ch->fsm));
679
680         if ((fsm_getstate(ch->fsm) != CTC_STATE_TXIDLE) || grp->in_sweep) {
681                 spin_lock_irqsave(&ch->collect_lock, saveflags);
682                 atomic_inc(&skb->users);
683                 p_header = kmalloc(PDU_HEADER_LENGTH, gfp_type());
684
685                 if (!p_header) {
686                         spin_unlock_irqrestore(&ch->collect_lock, saveflags);
687                                 goto nomem_exit;
688                 }
689
690                 p_header->pdu_offset = skb->len;
691                 p_header->pdu_proto = 0x01;
692                 p_header->pdu_flag = 0x00;
693                 if (skb->protocol == ntohs(ETH_P_SNAP)) {
694                         p_header->pdu_flag |= PDU_FIRST | PDU_CNTL;
695                 } else {
696                         p_header->pdu_flag |= PDU_FIRST;
697                 }
698                 p_header->pdu_seq = 0;
699                 memcpy(skb_push(skb, PDU_HEADER_LENGTH), p_header,
700                        PDU_HEADER_LENGTH);
701
702                 CTCM_PR_DEBUG("%s(%s): Put on collect_q - skb len: %04x \n"
703                                 "pdu header and data for up to 32 bytes:\n",
704                                 __func__, dev->name, skb->len);
705                 CTCM_D3_DUMP((char *)skb->data, min_t(int, 32, skb->len));
706
707                 skb_queue_tail(&ch->collect_queue, skb);
708                 ch->collect_len += skb->len;
709                 kfree(p_header);
710
711                 spin_unlock_irqrestore(&ch->collect_lock, saveflags);
712                         goto done;
713         }
714
715         /*
716          * Protect skb against beeing free'd by upper
717          * layers.
718          */
719         atomic_inc(&skb->users);
720
721         /*
722          * IDAL support in CTCM is broken, so we have to
723          * care about skb's above 2G ourselves.
724          */
725         hi = ((unsigned long)skb->tail + TH_HEADER_LENGTH) >> 31;
726         if (hi) {
727                 nskb = __dev_alloc_skb(skb->len, GFP_ATOMIC | GFP_DMA);
728                 if (!nskb) {
729                         goto nomem_exit;
730                 } else {
731                         memcpy(skb_put(nskb, skb->len), skb->data, skb->len);
732                         atomic_inc(&nskb->users);
733                         atomic_dec(&skb->users);
734                         dev_kfree_skb_irq(skb);
735                         skb = nskb;
736                 }
737         }
738
739         p_header = kmalloc(PDU_HEADER_LENGTH, gfp_type());
740
741         if (!p_header)
742                 goto nomem_exit;
743
744         p_header->pdu_offset = skb->len;
745         p_header->pdu_proto = 0x01;
746         p_header->pdu_flag = 0x00;
747         p_header->pdu_seq = 0;
748         if (skb->protocol == ntohs(ETH_P_SNAP)) {
749                 p_header->pdu_flag |= PDU_FIRST | PDU_CNTL;
750         } else {
751                 p_header->pdu_flag |= PDU_FIRST;
752         }
753         memcpy(skb_push(skb, PDU_HEADER_LENGTH), p_header, PDU_HEADER_LENGTH);
754
755         kfree(p_header);
756
757         if (ch->collect_len > 0) {
758                 spin_lock_irqsave(&ch->collect_lock, saveflags);
759                 skb_queue_tail(&ch->collect_queue, skb);
760                 ch->collect_len += skb->len;
761                 skb = skb_dequeue(&ch->collect_queue);
762                 ch->collect_len -= skb->len;
763                 spin_unlock_irqrestore(&ch->collect_lock, saveflags);
764         }
765
766         p_header = (struct pdu *)skb->data;
767         p_header->pdu_flag |= PDU_LAST;
768
769         ch->prof.txlen += skb->len - PDU_HEADER_LENGTH;
770
771         header = kmalloc(TH_HEADER_LENGTH, gfp_type());
772         if (!header)
773                 goto nomem_exit;
774
775         header->th_seg = 0x00;
776         header->th_ch_flag = TH_HAS_PDU;  /* Normal data */
777         header->th_blk_flag = 0x00;
778         header->th_is_xid = 0x00;          /* Just data here */
779         ch->th_seq_num++;
780         header->th_seq_num = ch->th_seq_num;
781
782         CTCM_PR_DBGDATA("%s(%s) ToVTAM_th_seq= %08x\n" ,
783                        __func__, dev->name, ch->th_seq_num);
784
785         /* put the TH on the packet */
786         memcpy(skb_push(skb, TH_HEADER_LENGTH), header, TH_HEADER_LENGTH);
787
788         kfree(header);
789
790         CTCM_PR_DBGDATA("%s(%s): skb len: %04x\n - pdu header and data for "
791                         "up to 32 bytes sent to vtam:\n",
792                                 __func__, dev->name, skb->len);
793         CTCM_D3_DUMP((char *)skb->data, min_t(int, 32, skb->len));
794
795         ch->ccw[4].count = skb->len;
796         if (set_normalized_cda(&ch->ccw[4], skb->data)) {
797                 /*
798                  * idal allocation failed, try via copying to trans_skb.
799                  * trans_skb usually has a pre-allocated idal.
800                  */
801                 if (ctcm_checkalloc_buffer(ch)) {
802                         /*
803                          * Remove our header.
804                          * It gets added again on retransmit.
805                          */
806                                 goto nomem_exit;
807                 }
808
809                 skb_reset_tail_pointer(ch->trans_skb);
810                 ch->trans_skb->len = 0;
811                 ch->ccw[1].count = skb->len;
812                 memcpy(skb_put(ch->trans_skb, skb->len), skb->data, skb->len);
813                 atomic_dec(&skb->users);
814                 dev_kfree_skb_irq(skb);
815                 ccw_idx = 0;
816                 CTCM_PR_DBGDATA("%s(%s): trans_skb len: %04x\n"
817                                 "up to 32 bytes sent to vtam:\n",
818                                 __func__, dev->name, ch->trans_skb->len);
819                 CTCM_D3_DUMP((char *)ch->trans_skb->data,
820                                 min_t(int, 32, ch->trans_skb->len));
821         } else {
822                 skb_queue_tail(&ch->io_queue, skb);
823                 ccw_idx = 3;
824         }
825         ch->retry = 0;
826         fsm_newstate(ch->fsm, CTC_STATE_TX);
827         fsm_addtimer(&ch->timer, CTCM_TIME_5_SEC, CTC_EVENT_TIMER, ch);
828
829         if (do_debug_ccw)
830                 ctcmpc_dumpit((char *)&ch->ccw[ccw_idx],
831                                         sizeof(struct ccw1) * 3);
832
833         spin_lock_irqsave(get_ccwdev_lock(ch->cdev), saveflags);
834         ch->prof.send_stamp = jiffies;
835         rc = ccw_device_start(ch->cdev, &ch->ccw[ccw_idx],
836                                         (unsigned long)ch, 0xff, 0);
837         spin_unlock_irqrestore(get_ccwdev_lock(ch->cdev), saveflags);
838         if (ccw_idx == 3)
839                 ch->prof.doios_single++;
840         if (rc != 0) {
841                 fsm_deltimer(&ch->timer);
842                 ctcm_ccw_check_rc(ch, rc, "single skb TX");
843                 if (ccw_idx == 3)
844                         skb_dequeue_tail(&ch->io_queue);
845         } else if (ccw_idx == 0) {
846                 priv->stats.tx_packets++;
847                 priv->stats.tx_bytes += skb->len - TH_HEADER_LENGTH;
848         }
849         if (ch->th_seq_num > 0xf0000000)        /* Chose at random. */
850                 ctcmpc_send_sweep_req(ch);
851
852         goto done;
853 nomem_exit:
854         CTCM_DBF_TEXT_(MPC_ERROR, CTC_DBF_CRIT,
855                         "%s(%s): MEMORY allocation ERROR\n",
856                         CTCM_FUNTAIL, ch->id);
857         rc = -ENOMEM;
858         atomic_dec(&skb->users);
859         dev_kfree_skb_any(skb);
860         fsm_event(priv->mpcg->fsm, MPCG_EVENT_INOP, dev);
861 done:
862         CTCM_PR_DEBUG("Exit %s(%s)\n", __func__, dev->name);
863         return rc;
864 }
865
866 /**
867  * Start transmission of a packet.
868  * Called from generic network device layer.
869  *
870  *  skb         Pointer to buffer containing the packet.
871  *  dev         Pointer to interface struct.
872  *
873  * returns 0 if packet consumed, !0 if packet rejected.
874  *         Note: If we return !0, then the packet is free'd by
875  *               the generic network layer.
876  */
877 /* first merge version - leaving both functions separated */
878 static int ctcm_tx(struct sk_buff *skb, struct net_device *dev)
879 {
880         struct ctcm_priv *priv = dev->ml_priv;
881
882         if (skb == NULL) {
883                 CTCM_DBF_TEXT_(ERROR, CTC_DBF_ERROR,
884                                 "%s(%s): NULL sk_buff passed",
885                                         CTCM_FUNTAIL, dev->name);
886                 priv->stats.tx_dropped++;
887                 return NETDEV_TX_OK;
888         }
889         if (skb_headroom(skb) < (LL_HEADER_LENGTH + 2)) {
890                 CTCM_DBF_TEXT_(ERROR, CTC_DBF_ERROR,
891                         "%s(%s): Got sk_buff with head room < %ld bytes",
892                         CTCM_FUNTAIL, dev->name, LL_HEADER_LENGTH + 2);
893                 dev_kfree_skb(skb);
894                 priv->stats.tx_dropped++;
895                 return NETDEV_TX_OK;
896         }
897
898         /*
899          * If channels are not running, try to restart them
900          * and throw away packet.
901          */
902         if (fsm_getstate(priv->fsm) != DEV_STATE_RUNNING) {
903                 fsm_event(priv->fsm, DEV_EVENT_START, dev);
904                 dev_kfree_skb(skb);
905                 priv->stats.tx_dropped++;
906                 priv->stats.tx_errors++;
907                 priv->stats.tx_carrier_errors++;
908                 return NETDEV_TX_OK;
909         }
910
911         if (ctcm_test_and_set_busy(dev))
912                 return NETDEV_TX_BUSY;
913
914         dev->trans_start = jiffies;
915         if (ctcm_transmit_skb(priv->channel[CTCM_WRITE], skb) != 0)
916                 return NETDEV_TX_BUSY;
917         return NETDEV_TX_OK;
918 }
919
920 /* unmerged MPC variant of ctcm_tx */
921 static int ctcmpc_tx(struct sk_buff *skb, struct net_device *dev)
922 {
923         int len = 0;
924         struct ctcm_priv *priv = dev->ml_priv;
925         struct mpc_group *grp  = priv->mpcg;
926         struct sk_buff *newskb = NULL;
927
928         /*
929          * Some sanity checks ...
930          */
931         if (skb == NULL) {
932                 CTCM_DBF_TEXT_(MPC_ERROR, CTC_DBF_ERROR,
933                         "%s(%s): NULL sk_buff passed",
934                                         CTCM_FUNTAIL, dev->name);
935                 priv->stats.tx_dropped++;
936                                         goto done;
937         }
938         if (skb_headroom(skb) < (TH_HEADER_LENGTH + PDU_HEADER_LENGTH)) {
939                 CTCM_DBF_TEXT_(MPC_TRACE, CTC_DBF_ERROR,
940                         "%s(%s): Got sk_buff with head room < %ld bytes",
941                         CTCM_FUNTAIL, dev->name,
942                                 TH_HEADER_LENGTH + PDU_HEADER_LENGTH);
943
944                 CTCM_D3_DUMP((char *)skb->data, min_t(int, 32, skb->len));
945
946                 len =  skb->len + TH_HEADER_LENGTH + PDU_HEADER_LENGTH;
947                 newskb = __dev_alloc_skb(len, gfp_type() | GFP_DMA);
948
949                 if (!newskb) {
950                         CTCM_DBF_TEXT_(MPC_TRACE, CTC_DBF_ERROR,
951                                 "%s: %s: __dev_alloc_skb failed",
952                                                 __func__, dev->name);
953
954                         dev_kfree_skb_any(skb);
955                         priv->stats.tx_dropped++;
956                         priv->stats.tx_errors++;
957                         priv->stats.tx_carrier_errors++;
958                         fsm_event(grp->fsm, MPCG_EVENT_INOP, dev);
959                                         goto done;
960                 }
961                 newskb->protocol = skb->protocol;
962                 skb_reserve(newskb, TH_HEADER_LENGTH + PDU_HEADER_LENGTH);
963                 memcpy(skb_put(newskb, skb->len), skb->data, skb->len);
964                 dev_kfree_skb_any(skb);
965                 skb = newskb;
966         }
967
968         /*
969          * If channels are not running,
970          * notify anybody about a link failure and throw
971          * away packet.
972          */
973         if ((fsm_getstate(priv->fsm) != DEV_STATE_RUNNING) ||
974            (fsm_getstate(grp->fsm) <  MPCG_STATE_XID2INITW)) {
975                 dev_kfree_skb_any(skb);
976                 CTCM_DBF_TEXT_(MPC_ERROR, CTC_DBF_ERROR,
977                         "%s(%s): inactive MPCGROUP - dropped",
978                                         CTCM_FUNTAIL, dev->name);
979                 priv->stats.tx_dropped++;
980                 priv->stats.tx_errors++;
981                 priv->stats.tx_carrier_errors++;
982                                         goto done;
983         }
984
985         if (ctcm_test_and_set_busy(dev)) {
986                 CTCM_DBF_TEXT_(MPC_ERROR, CTC_DBF_ERROR,
987                         "%s(%s): device busy - dropped",
988                                         CTCM_FUNTAIL, dev->name);
989                 dev_kfree_skb_any(skb);
990                 priv->stats.tx_dropped++;
991                 priv->stats.tx_errors++;
992                 priv->stats.tx_carrier_errors++;
993                 fsm_event(grp->fsm, MPCG_EVENT_INOP, dev);
994                                         goto done;
995         }
996
997         dev->trans_start = jiffies;
998         if (ctcmpc_transmit_skb(priv->channel[CTCM_WRITE], skb) != 0) {
999                 CTCM_DBF_TEXT_(MPC_ERROR, CTC_DBF_ERROR,
1000                         "%s(%s): device error - dropped",
1001                                         CTCM_FUNTAIL, dev->name);
1002                 dev_kfree_skb_any(skb);
1003                 priv->stats.tx_dropped++;
1004                 priv->stats.tx_errors++;
1005                 priv->stats.tx_carrier_errors++;
1006                 ctcm_clear_busy(dev);
1007                 fsm_event(grp->fsm, MPCG_EVENT_INOP, dev);
1008                                         goto done;
1009         }
1010         ctcm_clear_busy(dev);
1011 done:
1012         if (do_debug)
1013                 MPC_DBF_DEV_NAME(TRACE, dev, "exit");
1014
1015         return NETDEV_TX_OK;    /* handle freeing of skb here */
1016 }
1017
1018
1019 /**
1020  * Sets MTU of an interface.
1021  *
1022  *  dev         Pointer to interface struct.
1023  *  new_mtu     The new MTU to use for this interface.
1024  *
1025  * returns 0 on success, -EINVAL if MTU is out of valid range.
1026  *         (valid range is 576 .. 65527). If VM is on the
1027  *         remote side, maximum MTU is 32760, however this is
1028  *         not checked here.
1029  */
1030 static int ctcm_change_mtu(struct net_device *dev, int new_mtu)
1031 {
1032         struct ctcm_priv *priv;
1033         int max_bufsize;
1034
1035         if (new_mtu < 576 || new_mtu > 65527)
1036                 return -EINVAL;
1037
1038         priv = dev->ml_priv;
1039         max_bufsize = priv->channel[CTCM_READ]->max_bufsize;
1040
1041         if (IS_MPC(priv)) {
1042                 if (new_mtu > max_bufsize - TH_HEADER_LENGTH)
1043                         return -EINVAL;
1044                 dev->hard_header_len = TH_HEADER_LENGTH + PDU_HEADER_LENGTH;
1045         } else {
1046                 if (new_mtu > max_bufsize - LL_HEADER_LENGTH - 2)
1047                         return -EINVAL;
1048                 dev->hard_header_len = LL_HEADER_LENGTH + 2;
1049         }
1050         dev->mtu = new_mtu;
1051         return 0;
1052 }
1053
1054 /**
1055  * Returns interface statistics of a device.
1056  *
1057  *  dev         Pointer to interface struct.
1058  *
1059  * returns Pointer to stats struct of this interface.
1060  */
1061 static struct net_device_stats *ctcm_stats(struct net_device *dev)
1062 {
1063         return &((struct ctcm_priv *)dev->ml_priv)->stats;
1064 }
1065
1066 static void ctcm_free_netdevice(struct net_device *dev)
1067 {
1068         struct ctcm_priv *priv;
1069         struct mpc_group *grp;
1070
1071         CTCM_DBF_TEXT_(SETUP, CTC_DBF_INFO,
1072                         "%s(%s)", CTCM_FUNTAIL, dev->name);
1073         priv = dev->ml_priv;
1074         if (priv) {
1075                 grp = priv->mpcg;
1076                 if (grp) {
1077                         if (grp->fsm)
1078                                 kfree_fsm(grp->fsm);
1079                         if (grp->xid_skb)
1080                                 dev_kfree_skb(grp->xid_skb);
1081                         if (grp->rcvd_xid_skb)
1082                                 dev_kfree_skb(grp->rcvd_xid_skb);
1083                         tasklet_kill(&grp->mpc_tasklet2);
1084                         kfree(grp);
1085                         priv->mpcg = NULL;
1086                 }
1087                 if (priv->fsm) {
1088                         kfree_fsm(priv->fsm);
1089                         priv->fsm = NULL;
1090                 }
1091                 kfree(priv->xid);
1092                 priv->xid = NULL;
1093         /*
1094          * Note: kfree(priv); is done in "opposite" function of
1095          * allocator function probe_device which is remove_device.
1096          */
1097         }
1098 #ifdef MODULE
1099         free_netdev(dev);
1100 #endif
1101 }
1102
1103 struct mpc_group *ctcmpc_init_mpc_group(struct ctcm_priv *priv);
1104
1105 static const struct net_device_ops ctcm_netdev_ops = {
1106         .ndo_open               = ctcm_open,
1107         .ndo_stop               = ctcm_close,
1108         .ndo_get_stats          = ctcm_stats,
1109         .ndo_change_mtu         = ctcm_change_mtu,
1110         .ndo_start_xmit         = ctcm_tx,
1111 };
1112
1113 static const struct net_device_ops ctcm_mpc_netdev_ops = {
1114         .ndo_open               = ctcm_open,
1115         .ndo_stop               = ctcm_close,
1116         .ndo_get_stats          = ctcm_stats,
1117         .ndo_change_mtu         = ctcm_change_mtu,
1118         .ndo_start_xmit         = ctcmpc_tx,
1119 };
1120
1121 void static ctcm_dev_setup(struct net_device *dev)
1122 {
1123         dev->type = ARPHRD_SLIP;
1124         dev->tx_queue_len = 100;
1125         dev->flags = IFF_POINTOPOINT | IFF_NOARP;
1126 }
1127
1128 /*
1129  * Initialize everything of the net device except the name and the
1130  * channel structs.
1131  */
1132 static struct net_device *ctcm_init_netdevice(struct ctcm_priv *priv)
1133 {
1134         struct net_device *dev;
1135         struct mpc_group *grp;
1136         if (!priv)
1137                 return NULL;
1138
1139         if (IS_MPC(priv))
1140                 dev = alloc_netdev(0, MPC_DEVICE_GENE, NET_NAME_UNKNOWN,
1141                                    ctcm_dev_setup);
1142         else
1143                 dev = alloc_netdev(0, CTC_DEVICE_GENE, NET_NAME_UNKNOWN,
1144                                    ctcm_dev_setup);
1145
1146         if (!dev) {
1147                 CTCM_DBF_TEXT_(ERROR, CTC_DBF_CRIT,
1148                         "%s: MEMORY allocation ERROR",
1149                         CTCM_FUNTAIL);
1150                 return NULL;
1151         }
1152         dev->ml_priv = priv;
1153         priv->fsm = init_fsm("ctcmdev", dev_state_names, dev_event_names,
1154                                 CTCM_NR_DEV_STATES, CTCM_NR_DEV_EVENTS,
1155                                 dev_fsm, dev_fsm_len, GFP_KERNEL);
1156         if (priv->fsm == NULL) {
1157                 CTCMY_DBF_DEV(SETUP, dev, "init_fsm error");
1158                 free_netdev(dev);
1159                 return NULL;
1160         }
1161         fsm_newstate(priv->fsm, DEV_STATE_STOPPED);
1162         fsm_settimer(priv->fsm, &priv->restart_timer);
1163
1164         if (IS_MPC(priv)) {
1165                 /*  MPC Group Initializations  */
1166                 grp = ctcmpc_init_mpc_group(priv);
1167                 if (grp == NULL) {
1168                         MPC_DBF_DEV(SETUP, dev, "init_mpc_group error");
1169                         free_netdev(dev);
1170                         return NULL;
1171                 }
1172                 tasklet_init(&grp->mpc_tasklet2,
1173                                 mpc_group_ready, (unsigned long)dev);
1174                 dev->mtu = MPC_BUFSIZE_DEFAULT -
1175                                 TH_HEADER_LENGTH - PDU_HEADER_LENGTH;
1176
1177                 dev->netdev_ops = &ctcm_mpc_netdev_ops;
1178                 dev->hard_header_len = TH_HEADER_LENGTH + PDU_HEADER_LENGTH;
1179                 priv->buffer_size = MPC_BUFSIZE_DEFAULT;
1180         } else {
1181                 dev->mtu = CTCM_BUFSIZE_DEFAULT - LL_HEADER_LENGTH - 2;
1182                 dev->netdev_ops = &ctcm_netdev_ops;
1183                 dev->hard_header_len = LL_HEADER_LENGTH + 2;
1184         }
1185
1186         CTCMY_DBF_DEV(SETUP, dev, "finished");
1187
1188         return dev;
1189 }
1190
1191 /**
1192  * Main IRQ handler.
1193  *
1194  *  cdev        The ccw_device the interrupt is for.
1195  *  intparm     interruption parameter.
1196  *  irb         interruption response block.
1197  */
1198 static void ctcm_irq_handler(struct ccw_device *cdev,
1199                                 unsigned long intparm, struct irb *irb)
1200 {
1201         struct channel          *ch;
1202         struct net_device       *dev;
1203         struct ctcm_priv        *priv;
1204         struct ccwgroup_device  *cgdev;
1205         int cstat;
1206         int dstat;
1207
1208         CTCM_DBF_TEXT_(TRACE, CTC_DBF_DEBUG,
1209                 "Enter %s(%s)", CTCM_FUNTAIL, dev_name(&cdev->dev));
1210
1211         if (ctcm_check_irb_error(cdev, irb))
1212                 return;
1213
1214         cgdev = dev_get_drvdata(&cdev->dev);
1215
1216         cstat = irb->scsw.cmd.cstat;
1217         dstat = irb->scsw.cmd.dstat;
1218
1219         /* Check for unsolicited interrupts. */
1220         if (cgdev == NULL) {
1221                 CTCM_DBF_TEXT_(TRACE, CTC_DBF_ERROR,
1222                         "%s(%s) unsolicited irq: c-%02x d-%02x\n",
1223                         CTCM_FUNTAIL, dev_name(&cdev->dev), cstat, dstat);
1224                 dev_warn(&cdev->dev,
1225                         "The adapter received a non-specific IRQ\n");
1226                 return;
1227         }
1228
1229         priv = dev_get_drvdata(&cgdev->dev);
1230
1231         /* Try to extract channel from driver data. */
1232         if (priv->channel[CTCM_READ]->cdev == cdev)
1233                 ch = priv->channel[CTCM_READ];
1234         else if (priv->channel[CTCM_WRITE]->cdev == cdev)
1235                 ch = priv->channel[CTCM_WRITE];
1236         else {
1237                 dev_err(&cdev->dev,
1238                         "%s: Internal error: Can't determine channel for "
1239                         "interrupt device %s\n",
1240                         __func__, dev_name(&cdev->dev));
1241                         /* Explain: inconsistent internal structures */
1242                 return;
1243         }
1244
1245         dev = ch->netdev;
1246         if (dev == NULL) {
1247                 dev_err(&cdev->dev,
1248                         "%s Internal error: net_device is NULL, ch = 0x%p\n",
1249                         __func__, ch);
1250                         /* Explain: inconsistent internal structures */
1251                 return;
1252         }
1253
1254         /* Copy interruption response block. */
1255         memcpy(ch->irb, irb, sizeof(struct irb));
1256
1257         /* Issue error message and return on subchannel error code */
1258         if (irb->scsw.cmd.cstat) {
1259                 fsm_event(ch->fsm, CTC_EVENT_SC_UNKNOWN, ch);
1260                 CTCM_DBF_TEXT_(TRACE, CTC_DBF_WARN,
1261                         "%s(%s): sub-ch check %s: cs=%02x ds=%02x",
1262                                 CTCM_FUNTAIL, dev->name, ch->id, cstat, dstat);
1263                 dev_warn(&cdev->dev,
1264                                 "A check occurred on the subchannel\n");
1265                 return;
1266         }
1267
1268         /* Check the reason-code of a unit check */
1269         if (irb->scsw.cmd.dstat & DEV_STAT_UNIT_CHECK) {
1270                 if ((irb->ecw[0] & ch->sense_rc) == 0)
1271                         /* print it only once */
1272                         CTCM_DBF_TEXT_(TRACE, CTC_DBF_WARN,
1273                                 "%s(%s): sense=%02x, ds=%02x",
1274                                 CTCM_FUNTAIL, ch->id, irb->ecw[0], dstat);
1275                 ccw_unit_check(ch, irb->ecw[0]);
1276                 return;
1277         }
1278         if (irb->scsw.cmd.dstat & DEV_STAT_BUSY) {
1279                 if (irb->scsw.cmd.dstat & DEV_STAT_ATTENTION)
1280                         fsm_event(ch->fsm, CTC_EVENT_ATTNBUSY, ch);
1281                 else
1282                         fsm_event(ch->fsm, CTC_EVENT_BUSY, ch);
1283                 return;
1284         }
1285         if (irb->scsw.cmd.dstat & DEV_STAT_ATTENTION) {
1286                 fsm_event(ch->fsm, CTC_EVENT_ATTN, ch);
1287                 return;
1288         }
1289         if ((irb->scsw.cmd.stctl & SCSW_STCTL_SEC_STATUS) ||
1290             (irb->scsw.cmd.stctl == SCSW_STCTL_STATUS_PEND) ||
1291             (irb->scsw.cmd.stctl ==
1292              (SCSW_STCTL_ALERT_STATUS | SCSW_STCTL_STATUS_PEND)))
1293                 fsm_event(ch->fsm, CTC_EVENT_FINSTAT, ch);
1294         else
1295                 fsm_event(ch->fsm, CTC_EVENT_IRQ, ch);
1296
1297 }
1298
1299 static const struct device_type ctcm_devtype = {
1300         .name = "ctcm",
1301         .groups = ctcm_attr_groups,
1302 };
1303
1304 /**
1305  * Add ctcm specific attributes.
1306  * Add ctcm private data.
1307  *
1308  *  cgdev       pointer to ccwgroup_device just added
1309  *
1310  * returns 0 on success, !0 on failure.
1311  */
1312 static int ctcm_probe_device(struct ccwgroup_device *cgdev)
1313 {
1314         struct ctcm_priv *priv;
1315
1316         CTCM_DBF_TEXT_(SETUP, CTC_DBF_INFO,
1317                         "%s %p",
1318                         __func__, cgdev);
1319
1320         if (!get_device(&cgdev->dev))
1321                 return -ENODEV;
1322
1323         priv = kzalloc(sizeof(struct ctcm_priv), GFP_KERNEL);
1324         if (!priv) {
1325                 CTCM_DBF_TEXT_(ERROR, CTC_DBF_ERROR,
1326                         "%s: memory allocation failure",
1327                         CTCM_FUNTAIL);
1328                 put_device(&cgdev->dev);
1329                 return -ENOMEM;
1330         }
1331         priv->buffer_size = CTCM_BUFSIZE_DEFAULT;
1332         cgdev->cdev[0]->handler = ctcm_irq_handler;
1333         cgdev->cdev[1]->handler = ctcm_irq_handler;
1334         dev_set_drvdata(&cgdev->dev, priv);
1335         cgdev->dev.type = &ctcm_devtype;
1336
1337         return 0;
1338 }
1339
1340 /**
1341  * Add a new channel to the list of channels.
1342  * Keeps the channel list sorted.
1343  *
1344  *  cdev        The ccw_device to be added.
1345  *  type        The type class of the new channel.
1346  *  priv        Points to the private data of the ccwgroup_device.
1347  *
1348  * returns 0 on success, !0 on error.
1349  */
1350 static int add_channel(struct ccw_device *cdev, enum ctcm_channel_types type,
1351                                 struct ctcm_priv *priv)
1352 {
1353         struct channel **c = &channels;
1354         struct channel *ch;
1355         int ccw_num;
1356         int rc = 0;
1357
1358         CTCM_DBF_TEXT_(SETUP, CTC_DBF_INFO,
1359                 "%s(%s), type %d, proto %d",
1360                         __func__, dev_name(&cdev->dev), type, priv->protocol);
1361
1362         ch = kzalloc(sizeof(struct channel), GFP_KERNEL);
1363         if (ch == NULL)
1364                 return -ENOMEM;
1365
1366         ch->protocol = priv->protocol;
1367         if (IS_MPC(priv)) {
1368                 ch->discontact_th = kzalloc(TH_HEADER_LENGTH, gfp_type());
1369                 if (ch->discontact_th == NULL)
1370                                         goto nomem_return;
1371
1372                 ch->discontact_th->th_blk_flag = TH_DISCONTACT;
1373                 tasklet_init(&ch->ch_disc_tasklet,
1374                         mpc_action_send_discontact, (unsigned long)ch);
1375
1376                 tasklet_init(&ch->ch_tasklet, ctcmpc_bh, (unsigned long)ch);
1377                 ch->max_bufsize = (MPC_BUFSIZE_DEFAULT - 35);
1378                 ccw_num = 17;
1379         } else
1380                 ccw_num = 8;
1381
1382         ch->ccw = kzalloc(ccw_num * sizeof(struct ccw1), GFP_KERNEL | GFP_DMA);
1383         if (ch->ccw == NULL)
1384                                         goto nomem_return;
1385
1386         ch->cdev = cdev;
1387         snprintf(ch->id, CTCM_ID_SIZE, "ch-%s", dev_name(&cdev->dev));
1388         ch->type = type;
1389
1390         /**
1391          * "static" ccws are used in the following way:
1392          *
1393          * ccw[0..2] (Channel program for generic I/O):
1394          *           0: prepare
1395          *           1: read or write (depending on direction) with fixed
1396          *              buffer (idal allocated once when buffer is allocated)
1397          *           2: nop
1398          * ccw[3..5] (Channel program for direct write of packets)
1399          *           3: prepare
1400          *           4: write (idal allocated on every write).
1401          *           5: nop
1402          * ccw[6..7] (Channel program for initial channel setup):
1403          *           6: set extended mode
1404          *           7: nop
1405          *
1406          * ch->ccw[0..5] are initialized in ch_action_start because
1407          * the channel's direction is yet unknown here.
1408          *
1409          * ccws used for xid2 negotiations
1410          *  ch-ccw[8-14] need to be used for the XID exchange either
1411          *    X side XID2 Processing
1412          *       8:  write control
1413          *       9:  write th
1414          *           10: write XID
1415          *           11: read th from secondary
1416          *           12: read XID   from secondary
1417          *           13: read 4 byte ID
1418          *           14: nop
1419          *    Y side XID Processing
1420          *           8:  sense
1421          *       9:  read th
1422          *           10: read XID
1423          *           11: write th
1424          *           12: write XID
1425          *           13: write 4 byte ID
1426          *           14: nop
1427          *
1428          *  ccws used for double noop due to VM timing issues
1429          *  which result in unrecoverable Busy on channel
1430          *       15: nop
1431          *       16: nop
1432          */
1433         ch->ccw[6].cmd_code     = CCW_CMD_SET_EXTENDED;
1434         ch->ccw[6].flags        = CCW_FLAG_SLI;
1435
1436         ch->ccw[7].cmd_code     = CCW_CMD_NOOP;
1437         ch->ccw[7].flags        = CCW_FLAG_SLI;
1438
1439         if (IS_MPC(priv)) {
1440                 ch->ccw[15].cmd_code = CCW_CMD_WRITE;
1441                 ch->ccw[15].flags    = CCW_FLAG_SLI | CCW_FLAG_CC;
1442                 ch->ccw[15].count    = TH_HEADER_LENGTH;
1443                 ch->ccw[15].cda      = virt_to_phys(ch->discontact_th);
1444
1445                 ch->ccw[16].cmd_code = CCW_CMD_NOOP;
1446                 ch->ccw[16].flags    = CCW_FLAG_SLI;
1447
1448                 ch->fsm = init_fsm(ch->id, ctc_ch_state_names,
1449                                 ctc_ch_event_names, CTC_MPC_NR_STATES,
1450                                 CTC_MPC_NR_EVENTS, ctcmpc_ch_fsm,
1451                                 mpc_ch_fsm_len, GFP_KERNEL);
1452         } else {
1453                 ch->fsm = init_fsm(ch->id, ctc_ch_state_names,
1454                                 ctc_ch_event_names, CTC_NR_STATES,
1455                                 CTC_NR_EVENTS, ch_fsm,
1456                                 ch_fsm_len, GFP_KERNEL);
1457         }
1458         if (ch->fsm == NULL)
1459                                 goto nomem_return;
1460
1461         fsm_newstate(ch->fsm, CTC_STATE_IDLE);
1462
1463         ch->irb = kzalloc(sizeof(struct irb), GFP_KERNEL);
1464         if (ch->irb == NULL)
1465                                 goto nomem_return;
1466
1467         while (*c && ctcm_less_than((*c)->id, ch->id))
1468                 c = &(*c)->next;
1469
1470         if (*c && (!strncmp((*c)->id, ch->id, CTCM_ID_SIZE))) {
1471                 CTCM_DBF_TEXT_(SETUP, CTC_DBF_INFO,
1472                                 "%s (%s) already in list, using old entry",
1473                                 __func__, (*c)->id);
1474
1475                                 goto free_return;
1476         }
1477
1478         spin_lock_init(&ch->collect_lock);
1479
1480         fsm_settimer(ch->fsm, &ch->timer);
1481         skb_queue_head_init(&ch->io_queue);
1482         skb_queue_head_init(&ch->collect_queue);
1483
1484         if (IS_MPC(priv)) {
1485                 fsm_settimer(ch->fsm, &ch->sweep_timer);
1486                 skb_queue_head_init(&ch->sweep_queue);
1487         }
1488         ch->next = *c;
1489         *c = ch;
1490         return 0;
1491
1492 nomem_return:
1493         rc = -ENOMEM;
1494
1495 free_return:    /* note that all channel pointers are 0 or valid */
1496         kfree(ch->ccw);
1497         kfree(ch->discontact_th);
1498         kfree_fsm(ch->fsm);
1499         kfree(ch->irb);
1500         kfree(ch);
1501         return rc;
1502 }
1503
1504 /*
1505  * Return type of a detected device.
1506  */
1507 static enum ctcm_channel_types get_channel_type(struct ccw_device_id *id)
1508 {
1509         enum ctcm_channel_types type;
1510         type = (enum ctcm_channel_types)id->driver_info;
1511
1512         if (type == ctcm_channel_type_ficon)
1513                 type = ctcm_channel_type_escon;
1514
1515         return type;
1516 }
1517
1518 /**
1519  *
1520  * Setup an interface.
1521  *
1522  *  cgdev       Device to be setup.
1523  *
1524  * returns 0 on success, !0 on failure.
1525  */
1526 static int ctcm_new_device(struct ccwgroup_device *cgdev)
1527 {
1528         char read_id[CTCM_ID_SIZE];
1529         char write_id[CTCM_ID_SIZE];
1530         int direction;
1531         enum ctcm_channel_types type;
1532         struct ctcm_priv *priv;
1533         struct net_device *dev;
1534         struct ccw_device *cdev0;
1535         struct ccw_device *cdev1;
1536         struct channel *readc;
1537         struct channel *writec;
1538         int ret;
1539         int result;
1540
1541         priv = dev_get_drvdata(&cgdev->dev);
1542         if (!priv) {
1543                 result = -ENODEV;
1544                 goto out_err_result;
1545         }
1546
1547         cdev0 = cgdev->cdev[0];
1548         cdev1 = cgdev->cdev[1];
1549
1550         type = get_channel_type(&cdev0->id);
1551
1552         snprintf(read_id, CTCM_ID_SIZE, "ch-%s", dev_name(&cdev0->dev));
1553         snprintf(write_id, CTCM_ID_SIZE, "ch-%s", dev_name(&cdev1->dev));
1554
1555         ret = add_channel(cdev0, type, priv);
1556         if (ret) {
1557                 result = ret;
1558                 goto out_err_result;
1559         }
1560         ret = add_channel(cdev1, type, priv);
1561         if (ret) {
1562                 result = ret;
1563                 goto out_remove_channel1;
1564         }
1565
1566         ret = ccw_device_set_online(cdev0);
1567         if (ret != 0) {
1568                 CTCM_DBF_TEXT_(TRACE, CTC_DBF_NOTICE,
1569                         "%s(%s) set_online rc=%d",
1570                                 CTCM_FUNTAIL, read_id, ret);
1571                 result = -EIO;
1572                 goto out_remove_channel2;
1573         }
1574
1575         ret = ccw_device_set_online(cdev1);
1576         if (ret != 0) {
1577                 CTCM_DBF_TEXT_(TRACE, CTC_DBF_NOTICE,
1578                         "%s(%s) set_online rc=%d",
1579                                 CTCM_FUNTAIL, write_id, ret);
1580
1581                 result = -EIO;
1582                 goto out_ccw1;
1583         }
1584
1585         dev = ctcm_init_netdevice(priv);
1586         if (dev == NULL) {
1587                 result = -ENODEV;
1588                 goto out_ccw2;
1589         }
1590
1591         for (direction = CTCM_READ; direction <= CTCM_WRITE; direction++) {
1592                 priv->channel[direction] =
1593                         channel_get(type, direction == CTCM_READ ?
1594                                 read_id : write_id, direction);
1595                 if (priv->channel[direction] == NULL) {
1596                         if (direction == CTCM_WRITE)
1597                                 channel_free(priv->channel[CTCM_READ]);
1598                         goto out_dev;
1599                 }
1600                 priv->channel[direction]->netdev = dev;
1601                 priv->channel[direction]->protocol = priv->protocol;
1602                 priv->channel[direction]->max_bufsize = priv->buffer_size;
1603         }
1604         /* sysfs magic */
1605         SET_NETDEV_DEV(dev, &cgdev->dev);
1606
1607         if (register_netdev(dev)) {
1608                 result = -ENODEV;
1609                 goto out_dev;
1610         }
1611
1612         strlcpy(priv->fsm->name, dev->name, sizeof(priv->fsm->name));
1613
1614         dev_info(&dev->dev,
1615                 "setup OK : r/w = %s/%s, protocol : %d\n",
1616                         priv->channel[CTCM_READ]->id,
1617                         priv->channel[CTCM_WRITE]->id, priv->protocol);
1618
1619         CTCM_DBF_TEXT_(SETUP, CTC_DBF_INFO,
1620                 "setup(%s) OK : r/w = %s/%s, protocol : %d", dev->name,
1621                         priv->channel[CTCM_READ]->id,
1622                         priv->channel[CTCM_WRITE]->id, priv->protocol);
1623
1624         return 0;
1625 out_dev:
1626         ctcm_free_netdevice(dev);
1627 out_ccw2:
1628         ccw_device_set_offline(cgdev->cdev[1]);
1629 out_ccw1:
1630         ccw_device_set_offline(cgdev->cdev[0]);
1631 out_remove_channel2:
1632         readc = channel_get(type, read_id, CTCM_READ);
1633         channel_remove(readc);
1634 out_remove_channel1:
1635         writec = channel_get(type, write_id, CTCM_WRITE);
1636         channel_remove(writec);
1637 out_err_result:
1638         return result;
1639 }
1640
1641 /**
1642  * Shutdown an interface.
1643  *
1644  *  cgdev       Device to be shut down.
1645  *
1646  * returns 0 on success, !0 on failure.
1647  */
1648 static int ctcm_shutdown_device(struct ccwgroup_device *cgdev)
1649 {
1650         struct ctcm_priv *priv;
1651         struct net_device *dev;
1652
1653         priv = dev_get_drvdata(&cgdev->dev);
1654         if (!priv)
1655                 return -ENODEV;
1656
1657         if (priv->channel[CTCM_READ]) {
1658                 dev = priv->channel[CTCM_READ]->netdev;
1659                 CTCM_DBF_DEV(SETUP, dev, "");
1660                 /* Close the device */
1661                 ctcm_close(dev);
1662                 dev->flags &= ~IFF_RUNNING;
1663                 channel_free(priv->channel[CTCM_READ]);
1664         } else
1665                 dev = NULL;
1666
1667         if (priv->channel[CTCM_WRITE])
1668                 channel_free(priv->channel[CTCM_WRITE]);
1669
1670         if (dev) {
1671                 unregister_netdev(dev);
1672                 ctcm_free_netdevice(dev);
1673         }
1674
1675         if (priv->fsm)
1676                 kfree_fsm(priv->fsm);
1677
1678         ccw_device_set_offline(cgdev->cdev[1]);
1679         ccw_device_set_offline(cgdev->cdev[0]);
1680
1681         if (priv->channel[CTCM_READ])
1682                 channel_remove(priv->channel[CTCM_READ]);
1683         if (priv->channel[CTCM_WRITE])
1684                 channel_remove(priv->channel[CTCM_WRITE]);
1685         priv->channel[CTCM_READ] = priv->channel[CTCM_WRITE] = NULL;
1686
1687         return 0;
1688
1689 }
1690
1691
1692 static void ctcm_remove_device(struct ccwgroup_device *cgdev)
1693 {
1694         struct ctcm_priv *priv = dev_get_drvdata(&cgdev->dev);
1695
1696         CTCM_DBF_TEXT_(SETUP, CTC_DBF_INFO,
1697                         "removing device %p, proto : %d",
1698                         cgdev, priv->protocol);
1699
1700         if (cgdev->state == CCWGROUP_ONLINE)
1701                 ctcm_shutdown_device(cgdev);
1702         dev_set_drvdata(&cgdev->dev, NULL);
1703         kfree(priv);
1704         put_device(&cgdev->dev);
1705 }
1706
1707 static int ctcm_pm_suspend(struct ccwgroup_device *gdev)
1708 {
1709         struct ctcm_priv *priv = dev_get_drvdata(&gdev->dev);
1710
1711         if (gdev->state == CCWGROUP_OFFLINE)
1712                 return 0;
1713         netif_device_detach(priv->channel[CTCM_READ]->netdev);
1714         ctcm_close(priv->channel[CTCM_READ]->netdev);
1715         if (!wait_event_timeout(priv->fsm->wait_q,
1716             fsm_getstate(priv->fsm) == DEV_STATE_STOPPED, CTCM_TIME_5_SEC)) {
1717                 netif_device_attach(priv->channel[CTCM_READ]->netdev);
1718                 return -EBUSY;
1719         }
1720         ccw_device_set_offline(gdev->cdev[1]);
1721         ccw_device_set_offline(gdev->cdev[0]);
1722         return 0;
1723 }
1724
1725 static int ctcm_pm_resume(struct ccwgroup_device *gdev)
1726 {
1727         struct ctcm_priv *priv = dev_get_drvdata(&gdev->dev);
1728         int rc;
1729
1730         if (gdev->state == CCWGROUP_OFFLINE)
1731                 return 0;
1732         rc = ccw_device_set_online(gdev->cdev[1]);
1733         if (rc)
1734                 goto err_out;
1735         rc = ccw_device_set_online(gdev->cdev[0]);
1736         if (rc)
1737                 goto err_out;
1738         ctcm_open(priv->channel[CTCM_READ]->netdev);
1739 err_out:
1740         netif_device_attach(priv->channel[CTCM_READ]->netdev);
1741         return rc;
1742 }
1743
1744 static struct ccw_device_id ctcm_ids[] = {
1745         {CCW_DEVICE(0x3088, 0x08), .driver_info = ctcm_channel_type_parallel},
1746         {CCW_DEVICE(0x3088, 0x1e), .driver_info = ctcm_channel_type_ficon},
1747         {CCW_DEVICE(0x3088, 0x1f), .driver_info = ctcm_channel_type_escon},
1748         {},
1749 };
1750 MODULE_DEVICE_TABLE(ccw, ctcm_ids);
1751
1752 static struct ccw_driver ctcm_ccw_driver = {
1753         .driver = {
1754                 .owner  = THIS_MODULE,
1755                 .name   = "ctcm",
1756         },
1757         .ids    = ctcm_ids,
1758         .probe  = ccwgroup_probe_ccwdev,
1759         .remove = ccwgroup_remove_ccwdev,
1760         .int_class = IRQIO_CTC,
1761 };
1762
1763 static struct ccwgroup_driver ctcm_group_driver = {
1764         .driver = {
1765                 .owner  = THIS_MODULE,
1766                 .name   = CTC_DRIVER_NAME,
1767         },
1768         .setup       = ctcm_probe_device,
1769         .remove      = ctcm_remove_device,
1770         .set_online  = ctcm_new_device,
1771         .set_offline = ctcm_shutdown_device,
1772         .freeze      = ctcm_pm_suspend,
1773         .thaw        = ctcm_pm_resume,
1774         .restore     = ctcm_pm_resume,
1775 };
1776
1777 static ssize_t ctcm_driver_group_store(struct device_driver *ddrv,
1778                                        const char *buf, size_t count)
1779 {
1780         int err;
1781
1782         err = ccwgroup_create_dev(ctcm_root_dev, &ctcm_group_driver, 2, buf);
1783         return err ? err : count;
1784 }
1785 static DRIVER_ATTR(group, 0200, NULL, ctcm_driver_group_store);
1786
1787 static struct attribute *ctcm_drv_attrs[] = {
1788         &driver_attr_group.attr,
1789         NULL,
1790 };
1791 static struct attribute_group ctcm_drv_attr_group = {
1792         .attrs = ctcm_drv_attrs,
1793 };
1794 static const struct attribute_group *ctcm_drv_attr_groups[] = {
1795         &ctcm_drv_attr_group,
1796         NULL,
1797 };
1798
1799 /*
1800  * Module related routines
1801  */
1802
1803 /*
1804  * Prepare to be unloaded. Free IRQ's and release all resources.
1805  * This is called just before this module is unloaded. It is
1806  * not called, if the usage count is !0, so we don't need to check
1807  * for that.
1808  */
1809 static void __exit ctcm_exit(void)
1810 {
1811         ccwgroup_driver_unregister(&ctcm_group_driver);
1812         ccw_driver_unregister(&ctcm_ccw_driver);
1813         root_device_unregister(ctcm_root_dev);
1814         ctcm_unregister_dbf_views();
1815         pr_info("CTCM driver unloaded\n");
1816 }
1817
1818 /*
1819  * Print Banner.
1820  */
1821 static void print_banner(void)
1822 {
1823         pr_info("CTCM driver initialized\n");
1824 }
1825
1826 /**
1827  * Initialize module.
1828  * This is called just after the module is loaded.
1829  *
1830  * returns 0 on success, !0 on error.
1831  */
1832 static int __init ctcm_init(void)
1833 {
1834         int ret;
1835
1836         channels = NULL;
1837
1838         ret = ctcm_register_dbf_views();
1839         if (ret)
1840                 goto out_err;
1841         ctcm_root_dev = root_device_register("ctcm");
1842         ret = PTR_ERR_OR_ZERO(ctcm_root_dev);
1843         if (ret)
1844                 goto register_err;
1845         ret = ccw_driver_register(&ctcm_ccw_driver);
1846         if (ret)
1847                 goto ccw_err;
1848         ctcm_group_driver.driver.groups = ctcm_drv_attr_groups;
1849         ret = ccwgroup_driver_register(&ctcm_group_driver);
1850         if (ret)
1851                 goto ccwgroup_err;
1852         print_banner();
1853         return 0;
1854
1855 ccwgroup_err:
1856         ccw_driver_unregister(&ctcm_ccw_driver);
1857 ccw_err:
1858         root_device_unregister(ctcm_root_dev);
1859 register_err:
1860         ctcm_unregister_dbf_views();
1861 out_err:
1862         pr_err("%s / Initializing the ctcm device driver failed, ret = %d\n",
1863                 __func__, ret);
1864         return ret;
1865 }
1866
1867 module_init(ctcm_init);
1868 module_exit(ctcm_exit);
1869
1870 MODULE_AUTHOR("Peter Tiedemann <ptiedem@de.ibm.com>");
1871 MODULE_DESCRIPTION("Network driver for S/390 CTC + CTCMPC (SNA)");
1872 MODULE_LICENSE("GPL");
1873