Add the rt linux 4.1.3-rt3 as base
[kvmfornfv.git] / kernel / drivers / staging / lustre / lnet / klnds / socklnd / socklnd.c
1 /*
2  * GPL HEADER START
3  *
4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 only,
8  * as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * General Public License version 2 for more details (a copy is included
14  * in the LICENSE file that accompanied this code).
15  *
16  * You should have received a copy of the GNU General Public License
17  * version 2 along with this program; If not, see
18  * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
19  *
20  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21  * CA 95054 USA or visit www.sun.com if you need additional information or
22  * have any questions.
23  *
24  * GPL HEADER END
25  */
26 /*
27  * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
28  * Use is subject to license terms.
29  *
30  * Copyright (c) 2011, 2012, Intel Corporation.
31  */
32 /*
33  * This file is part of Lustre, http://www.lustre.org/
34  * Lustre is a trademark of Sun Microsystems, Inc.
35  *
36  * lnet/klnds/socklnd/socklnd.c
37  *
38  * Author: Zach Brown <zab@zabbo.net>
39  * Author: Peter J. Braam <braam@clusterfs.com>
40  * Author: Phil Schwan <phil@clusterfs.com>
41  * Author: Eric Barton <eric@bartonsoftware.com>
42  */
43
44 #include "socklnd.h"
45
46 static lnd_t the_ksocklnd;
47 ksock_nal_data_t ksocknal_data;
48
49 static ksock_interface_t *
50 ksocknal_ip2iface(lnet_ni_t *ni, __u32 ip)
51 {
52         ksock_net_t       *net = ni->ni_data;
53         int             i;
54         ksock_interface_t *iface;
55
56         for (i = 0; i < net->ksnn_ninterfaces; i++) {
57                 LASSERT(i < LNET_MAX_INTERFACES);
58                 iface = &net->ksnn_interfaces[i];
59
60                 if (iface->ksni_ipaddr == ip)
61                         return iface;
62         }
63
64         return NULL;
65 }
66
67 static ksock_route_t *
68 ksocknal_create_route(__u32 ipaddr, int port)
69 {
70         ksock_route_t *route;
71
72         LIBCFS_ALLOC(route, sizeof(*route));
73         if (route == NULL)
74                 return NULL;
75
76         atomic_set(&route->ksnr_refcount, 1);
77         route->ksnr_peer = NULL;
78         route->ksnr_retry_interval = 0;  /* OK to connect at any time */
79         route->ksnr_ipaddr = ipaddr;
80         route->ksnr_port = port;
81         route->ksnr_scheduled = 0;
82         route->ksnr_connecting = 0;
83         route->ksnr_connected = 0;
84         route->ksnr_deleted = 0;
85         route->ksnr_conn_count = 0;
86         route->ksnr_share_count = 0;
87
88         return route;
89 }
90
91 void
92 ksocknal_destroy_route(ksock_route_t *route)
93 {
94         LASSERT(atomic_read(&route->ksnr_refcount) == 0);
95
96         if (route->ksnr_peer != NULL)
97                 ksocknal_peer_decref(route->ksnr_peer);
98
99         LIBCFS_FREE(route, sizeof(*route));
100 }
101
102 static int
103 ksocknal_create_peer(ksock_peer_t **peerp, lnet_ni_t *ni, lnet_process_id_t id)
104 {
105         ksock_net_t   *net = ni->ni_data;
106         ksock_peer_t  *peer;
107
108         LASSERT(id.nid != LNET_NID_ANY);
109         LASSERT(id.pid != LNET_PID_ANY);
110         LASSERT(!in_interrupt());
111
112         LIBCFS_ALLOC(peer, sizeof(*peer));
113         if (peer == NULL)
114                 return -ENOMEM;
115
116         peer->ksnp_ni = ni;
117         peer->ksnp_id = id;
118         atomic_set(&peer->ksnp_refcount, 1);   /* 1 ref for caller */
119         peer->ksnp_closing = 0;
120         peer->ksnp_accepting = 0;
121         peer->ksnp_proto = NULL;
122         peer->ksnp_last_alive = 0;
123         peer->ksnp_zc_next_cookie = SOCKNAL_KEEPALIVE_PING + 1;
124
125         INIT_LIST_HEAD(&peer->ksnp_conns);
126         INIT_LIST_HEAD(&peer->ksnp_routes);
127         INIT_LIST_HEAD(&peer->ksnp_tx_queue);
128         INIT_LIST_HEAD(&peer->ksnp_zc_req_list);
129         spin_lock_init(&peer->ksnp_lock);
130
131         spin_lock_bh(&net->ksnn_lock);
132
133         if (net->ksnn_shutdown) {
134                 spin_unlock_bh(&net->ksnn_lock);
135
136                 LIBCFS_FREE(peer, sizeof(*peer));
137                 CERROR("Can't create peer: network shutdown\n");
138                 return -ESHUTDOWN;
139         }
140
141         net->ksnn_npeers++;
142
143         spin_unlock_bh(&net->ksnn_lock);
144
145         *peerp = peer;
146         return 0;
147 }
148
149 void
150 ksocknal_destroy_peer(ksock_peer_t *peer)
151 {
152         ksock_net_t    *net = peer->ksnp_ni->ni_data;
153
154         CDEBUG(D_NET, "peer %s %p deleted\n",
155                 libcfs_id2str(peer->ksnp_id), peer);
156
157         LASSERT(atomic_read(&peer->ksnp_refcount) == 0);
158         LASSERT(peer->ksnp_accepting == 0);
159         LASSERT(list_empty(&peer->ksnp_conns));
160         LASSERT(list_empty(&peer->ksnp_routes));
161         LASSERT(list_empty(&peer->ksnp_tx_queue));
162         LASSERT(list_empty(&peer->ksnp_zc_req_list));
163
164         LIBCFS_FREE(peer, sizeof(*peer));
165
166         /* NB a peer's connections and routes keep a reference on their peer
167          * until they are destroyed, so we can be assured that _all_ state to
168          * do with this peer has been cleaned up when its refcount drops to
169          * zero. */
170         spin_lock_bh(&net->ksnn_lock);
171         net->ksnn_npeers--;
172         spin_unlock_bh(&net->ksnn_lock);
173 }
174
175 ksock_peer_t *
176 ksocknal_find_peer_locked(lnet_ni_t *ni, lnet_process_id_t id)
177 {
178         struct list_head       *peer_list = ksocknal_nid2peerlist(id.nid);
179         struct list_head       *tmp;
180         ksock_peer_t     *peer;
181
182         list_for_each(tmp, peer_list) {
183
184                 peer = list_entry(tmp, ksock_peer_t, ksnp_list);
185
186                 LASSERT(!peer->ksnp_closing);
187
188                 if (peer->ksnp_ni != ni)
189                         continue;
190
191                 if (peer->ksnp_id.nid != id.nid ||
192                     peer->ksnp_id.pid != id.pid)
193                         continue;
194
195                 CDEBUG(D_NET, "got peer [%p] -> %s (%d)\n",
196                        peer, libcfs_id2str(id),
197                        atomic_read(&peer->ksnp_refcount));
198                 return peer;
199         }
200         return NULL;
201 }
202
203 ksock_peer_t *
204 ksocknal_find_peer(lnet_ni_t *ni, lnet_process_id_t id)
205 {
206         ksock_peer_t     *peer;
207
208         read_lock(&ksocknal_data.ksnd_global_lock);
209         peer = ksocknal_find_peer_locked(ni, id);
210         if (peer != NULL)                       /* +1 ref for caller? */
211                 ksocknal_peer_addref(peer);
212         read_unlock(&ksocknal_data.ksnd_global_lock);
213
214         return peer;
215 }
216
217 static void
218 ksocknal_unlink_peer_locked(ksock_peer_t *peer)
219 {
220         int             i;
221         __u32         ip;
222         ksock_interface_t *iface;
223
224         for (i = 0; i < peer->ksnp_n_passive_ips; i++) {
225                 LASSERT(i < LNET_MAX_INTERFACES);
226                 ip = peer->ksnp_passive_ips[i];
227
228                 iface = ksocknal_ip2iface(peer->ksnp_ni, ip);
229                 /* All IPs in peer->ksnp_passive_ips[] come from the
230                  * interface list, therefore the call must succeed. */
231                 LASSERT(iface != NULL);
232
233                 CDEBUG(D_NET, "peer=%p iface=%p ksni_nroutes=%d\n",
234                        peer, iface, iface->ksni_nroutes);
235                 iface->ksni_npeers--;
236         }
237
238         LASSERT(list_empty(&peer->ksnp_conns));
239         LASSERT(list_empty(&peer->ksnp_routes));
240         LASSERT(!peer->ksnp_closing);
241         peer->ksnp_closing = 1;
242         list_del(&peer->ksnp_list);
243         /* lose peerlist's ref */
244         ksocknal_peer_decref(peer);
245 }
246
247 static int
248 ksocknal_get_peer_info(lnet_ni_t *ni, int index,
249                         lnet_process_id_t *id, __u32 *myip, __u32 *peer_ip,
250                         int *port, int *conn_count, int *share_count)
251 {
252         ksock_peer_t      *peer;
253         struct list_head        *ptmp;
254         ksock_route_t     *route;
255         struct list_head        *rtmp;
256         int             i;
257         int             j;
258         int             rc = -ENOENT;
259
260         read_lock(&ksocknal_data.ksnd_global_lock);
261
262         for (i = 0; i < ksocknal_data.ksnd_peer_hash_size; i++) {
263
264                 list_for_each(ptmp, &ksocknal_data.ksnd_peers[i]) {
265                         peer = list_entry(ptmp, ksock_peer_t, ksnp_list);
266
267                         if (peer->ksnp_ni != ni)
268                                 continue;
269
270                         if (peer->ksnp_n_passive_ips == 0 &&
271                             list_empty(&peer->ksnp_routes)) {
272                                 if (index-- > 0)
273                                         continue;
274
275                                 *id = peer->ksnp_id;
276                                 *myip = 0;
277                                 *peer_ip = 0;
278                                 *port = 0;
279                                 *conn_count = 0;
280                                 *share_count = 0;
281                                 rc = 0;
282                                 goto out;
283                         }
284
285                         for (j = 0; j < peer->ksnp_n_passive_ips; j++) {
286                                 if (index-- > 0)
287                                         continue;
288
289                                 *id = peer->ksnp_id;
290                                 *myip = peer->ksnp_passive_ips[j];
291                                 *peer_ip = 0;
292                                 *port = 0;
293                                 *conn_count = 0;
294                                 *share_count = 0;
295                                 rc = 0;
296                                 goto out;
297                         }
298
299                         list_for_each(rtmp, &peer->ksnp_routes) {
300                                 if (index-- > 0)
301                                         continue;
302
303                                 route = list_entry(rtmp, ksock_route_t,
304                                                        ksnr_list);
305
306                                 *id = peer->ksnp_id;
307                                 *myip = route->ksnr_myipaddr;
308                                 *peer_ip = route->ksnr_ipaddr;
309                                 *port = route->ksnr_port;
310                                 *conn_count = route->ksnr_conn_count;
311                                 *share_count = route->ksnr_share_count;
312                                 rc = 0;
313                                 goto out;
314                         }
315                 }
316         }
317  out:
318         read_unlock(&ksocknal_data.ksnd_global_lock);
319         return rc;
320 }
321
322 static void
323 ksocknal_associate_route_conn_locked(ksock_route_t *route, ksock_conn_t *conn)
324 {
325         ksock_peer_t      *peer = route->ksnr_peer;
326         int             type = conn->ksnc_type;
327         ksock_interface_t *iface;
328
329         conn->ksnc_route = route;
330         ksocknal_route_addref(route);
331
332         if (route->ksnr_myipaddr != conn->ksnc_myipaddr) {
333                 if (route->ksnr_myipaddr == 0) {
334                         /* route wasn't bound locally yet (the initial route) */
335                         CDEBUG(D_NET, "Binding %s %pI4h to %pI4h\n",
336                                libcfs_id2str(peer->ksnp_id),
337                                &route->ksnr_ipaddr,
338                                &conn->ksnc_myipaddr);
339                 } else {
340                         CDEBUG(D_NET, "Rebinding %s %pI4h from %pI4h to %pI4h\n",
341                                libcfs_id2str(peer->ksnp_id),
342                                &route->ksnr_ipaddr,
343                                &route->ksnr_myipaddr,
344                                &conn->ksnc_myipaddr);
345
346                         iface = ksocknal_ip2iface(route->ksnr_peer->ksnp_ni,
347                                                   route->ksnr_myipaddr);
348                         if (iface != NULL)
349                                 iface->ksni_nroutes--;
350                 }
351                 route->ksnr_myipaddr = conn->ksnc_myipaddr;
352                 iface = ksocknal_ip2iface(route->ksnr_peer->ksnp_ni,
353                                           route->ksnr_myipaddr);
354                 if (iface != NULL)
355                         iface->ksni_nroutes++;
356         }
357
358         route->ksnr_connected |= (1<<type);
359         route->ksnr_conn_count++;
360
361         /* Successful connection => further attempts can
362          * proceed immediately */
363         route->ksnr_retry_interval = 0;
364 }
365
366 static void
367 ksocknal_add_route_locked(ksock_peer_t *peer, ksock_route_t *route)
368 {
369         struct list_head        *tmp;
370         ksock_conn_t      *conn;
371         ksock_route_t     *route2;
372
373         LASSERT(!peer->ksnp_closing);
374         LASSERT(route->ksnr_peer == NULL);
375         LASSERT(!route->ksnr_scheduled);
376         LASSERT(!route->ksnr_connecting);
377         LASSERT(route->ksnr_connected == 0);
378
379         /* LASSERT(unique) */
380         list_for_each(tmp, &peer->ksnp_routes) {
381                 route2 = list_entry(tmp, ksock_route_t, ksnr_list);
382
383                 if (route2->ksnr_ipaddr == route->ksnr_ipaddr) {
384                         CERROR("Duplicate route %s %pI4h\n",
385                                 libcfs_id2str(peer->ksnp_id),
386                                 &route->ksnr_ipaddr);
387                         LBUG();
388                 }
389         }
390
391         route->ksnr_peer = peer;
392         ksocknal_peer_addref(peer);
393         /* peer's routelist takes over my ref on 'route' */
394         list_add_tail(&route->ksnr_list, &peer->ksnp_routes);
395
396         list_for_each(tmp, &peer->ksnp_conns) {
397                 conn = list_entry(tmp, ksock_conn_t, ksnc_list);
398
399                 if (conn->ksnc_ipaddr != route->ksnr_ipaddr)
400                         continue;
401
402                 ksocknal_associate_route_conn_locked(route, conn);
403                 /* keep going (typed routes) */
404         }
405 }
406
407 static void
408 ksocknal_del_route_locked(ksock_route_t *route)
409 {
410         ksock_peer_t      *peer = route->ksnr_peer;
411         ksock_interface_t *iface;
412         ksock_conn_t      *conn;
413         struct list_head        *ctmp;
414         struct list_head        *cnxt;
415
416         LASSERT(!route->ksnr_deleted);
417
418         /* Close associated conns */
419         list_for_each_safe(ctmp, cnxt, &peer->ksnp_conns) {
420                 conn = list_entry(ctmp, ksock_conn_t, ksnc_list);
421
422                 if (conn->ksnc_route != route)
423                         continue;
424
425                 ksocknal_close_conn_locked(conn, 0);
426         }
427
428         if (route->ksnr_myipaddr != 0) {
429                 iface = ksocknal_ip2iface(route->ksnr_peer->ksnp_ni,
430                                           route->ksnr_myipaddr);
431                 if (iface != NULL)
432                         iface->ksni_nroutes--;
433         }
434
435         route->ksnr_deleted = 1;
436         list_del(&route->ksnr_list);
437         ksocknal_route_decref(route);        /* drop peer's ref */
438
439         if (list_empty(&peer->ksnp_routes) &&
440             list_empty(&peer->ksnp_conns)) {
441                 /* I've just removed the last route to a peer with no active
442                  * connections */
443                 ksocknal_unlink_peer_locked(peer);
444         }
445 }
446
447 int
448 ksocknal_add_peer(lnet_ni_t *ni, lnet_process_id_t id, __u32 ipaddr, int port)
449 {
450         struct list_head        *tmp;
451         ksock_peer_t      *peer;
452         ksock_peer_t      *peer2;
453         ksock_route_t     *route;
454         ksock_route_t     *route2;
455         int             rc;
456
457         if (id.nid == LNET_NID_ANY ||
458             id.pid == LNET_PID_ANY)
459                 return -EINVAL;
460
461         /* Have a brand new peer ready... */
462         rc = ksocknal_create_peer(&peer, ni, id);
463         if (rc != 0)
464                 return rc;
465
466         route = ksocknal_create_route(ipaddr, port);
467         if (route == NULL) {
468                 ksocknal_peer_decref(peer);
469                 return -ENOMEM;
470         }
471
472         write_lock_bh(&ksocknal_data.ksnd_global_lock);
473
474         /* always called with a ref on ni, so shutdown can't have started */
475         LASSERT(((ksock_net_t *) ni->ni_data)->ksnn_shutdown == 0);
476
477         peer2 = ksocknal_find_peer_locked(ni, id);
478         if (peer2 != NULL) {
479                 ksocknal_peer_decref(peer);
480                 peer = peer2;
481         } else {
482                 /* peer table takes my ref on peer */
483                 list_add_tail(&peer->ksnp_list,
484                                    ksocknal_nid2peerlist(id.nid));
485         }
486
487         route2 = NULL;
488         list_for_each(tmp, &peer->ksnp_routes) {
489                 route2 = list_entry(tmp, ksock_route_t, ksnr_list);
490
491                 if (route2->ksnr_ipaddr == ipaddr)
492                         break;
493
494                 route2 = NULL;
495         }
496         if (route2 == NULL) {
497                 ksocknal_add_route_locked(peer, route);
498                 route->ksnr_share_count++;
499         } else {
500                 ksocknal_route_decref(route);
501                 route2->ksnr_share_count++;
502         }
503
504         write_unlock_bh(&ksocknal_data.ksnd_global_lock);
505
506         return 0;
507 }
508
509 static void
510 ksocknal_del_peer_locked(ksock_peer_t *peer, __u32 ip)
511 {
512         ksock_conn_t     *conn;
513         ksock_route_t    *route;
514         struct list_head       *tmp;
515         struct list_head       *nxt;
516         int            nshared;
517
518         LASSERT(!peer->ksnp_closing);
519
520         /* Extra ref prevents peer disappearing until I'm done with it */
521         ksocknal_peer_addref(peer);
522
523         list_for_each_safe(tmp, nxt, &peer->ksnp_routes) {
524                 route = list_entry(tmp, ksock_route_t, ksnr_list);
525
526                 /* no match */
527                 if (!(ip == 0 || route->ksnr_ipaddr == ip))
528                         continue;
529
530                 route->ksnr_share_count = 0;
531                 /* This deletes associated conns too */
532                 ksocknal_del_route_locked(route);
533         }
534
535         nshared = 0;
536         list_for_each_safe(tmp, nxt, &peer->ksnp_routes) {
537                 route = list_entry(tmp, ksock_route_t, ksnr_list);
538                 nshared += route->ksnr_share_count;
539         }
540
541         if (nshared == 0) {
542                 /* remove everything else if there are no explicit entries
543                  * left */
544
545                 list_for_each_safe(tmp, nxt, &peer->ksnp_routes) {
546                         route = list_entry(tmp, ksock_route_t, ksnr_list);
547
548                         /* we should only be removing auto-entries */
549                         LASSERT(route->ksnr_share_count == 0);
550                         ksocknal_del_route_locked(route);
551                 }
552
553                 list_for_each_safe(tmp, nxt, &peer->ksnp_conns) {
554                         conn = list_entry(tmp, ksock_conn_t, ksnc_list);
555
556                         ksocknal_close_conn_locked(conn, 0);
557                 }
558         }
559
560         ksocknal_peer_decref(peer);
561         /* NB peer unlinks itself when last conn/route is removed */
562 }
563
564 static int
565 ksocknal_del_peer(lnet_ni_t *ni, lnet_process_id_t id, __u32 ip)
566 {
567         LIST_HEAD(zombies);
568         struct list_head        *ptmp;
569         struct list_head        *pnxt;
570         ksock_peer_t      *peer;
571         int             lo;
572         int             hi;
573         int             i;
574         int             rc = -ENOENT;
575
576         write_lock_bh(&ksocknal_data.ksnd_global_lock);
577
578         if (id.nid != LNET_NID_ANY)
579                 lo = hi = (int)(ksocknal_nid2peerlist(id.nid) - ksocknal_data.ksnd_peers);
580         else {
581                 lo = 0;
582                 hi = ksocknal_data.ksnd_peer_hash_size - 1;
583         }
584
585         for (i = lo; i <= hi; i++) {
586                 list_for_each_safe(ptmp, pnxt,
587                                         &ksocknal_data.ksnd_peers[i]) {
588                         peer = list_entry(ptmp, ksock_peer_t, ksnp_list);
589
590                         if (peer->ksnp_ni != ni)
591                                 continue;
592
593                         if (!((id.nid == LNET_NID_ANY || peer->ksnp_id.nid == id.nid) &&
594                               (id.pid == LNET_PID_ANY || peer->ksnp_id.pid == id.pid)))
595                                 continue;
596
597                         ksocknal_peer_addref(peer);     /* a ref for me... */
598
599                         ksocknal_del_peer_locked(peer, ip);
600
601                         if (peer->ksnp_closing &&
602                             !list_empty(&peer->ksnp_tx_queue)) {
603                                 LASSERT(list_empty(&peer->ksnp_conns));
604                                 LASSERT(list_empty(&peer->ksnp_routes));
605
606                                 list_splice_init(&peer->ksnp_tx_queue,
607                                                      &zombies);
608                         }
609
610                         ksocknal_peer_decref(peer);     /* ...till here */
611
612                         rc = 0;          /* matched! */
613                 }
614         }
615
616         write_unlock_bh(&ksocknal_data.ksnd_global_lock);
617
618         ksocknal_txlist_done(ni, &zombies, 1);
619
620         return rc;
621 }
622
623 static ksock_conn_t *
624 ksocknal_get_conn_by_idx(lnet_ni_t *ni, int index)
625 {
626         ksock_peer_t      *peer;
627         struct list_head        *ptmp;
628         ksock_conn_t      *conn;
629         struct list_head        *ctmp;
630         int             i;
631
632         read_lock(&ksocknal_data.ksnd_global_lock);
633
634         for (i = 0; i < ksocknal_data.ksnd_peer_hash_size; i++) {
635                 list_for_each(ptmp, &ksocknal_data.ksnd_peers[i]) {
636                         peer = list_entry(ptmp, ksock_peer_t, ksnp_list);
637
638                         LASSERT(!peer->ksnp_closing);
639
640                         if (peer->ksnp_ni != ni)
641                                 continue;
642
643                         list_for_each(ctmp, &peer->ksnp_conns) {
644                                 if (index-- > 0)
645                                         continue;
646
647                                 conn = list_entry(ctmp, ksock_conn_t,
648                                                        ksnc_list);
649                                 ksocknal_conn_addref(conn);
650                                 read_unlock(&ksocknal_data.ksnd_global_lock);
651                                 return conn;
652                         }
653                 }
654         }
655
656         read_unlock(&ksocknal_data.ksnd_global_lock);
657         return NULL;
658 }
659
660 static ksock_sched_t *
661 ksocknal_choose_scheduler_locked(unsigned int cpt)
662 {
663         struct ksock_sched_info *info = ksocknal_data.ksnd_sched_info[cpt];
664         ksock_sched_t           *sched;
665         int                     i;
666
667         LASSERT(info->ksi_nthreads > 0);
668
669         sched = &info->ksi_scheds[0];
670         /*
671          * NB: it's safe so far, but info->ksi_nthreads could be changed
672          * at runtime when we have dynamic LNet configuration, then we
673          * need to take care of this.
674          */
675         for (i = 1; i < info->ksi_nthreads; i++) {
676                 if (sched->kss_nconns > info->ksi_scheds[i].kss_nconns)
677                         sched = &info->ksi_scheds[i];
678         }
679
680         return sched;
681 }
682
683 static int
684 ksocknal_local_ipvec(lnet_ni_t *ni, __u32 *ipaddrs)
685 {
686         ksock_net_t       *net = ni->ni_data;
687         int             i;
688         int             nip;
689
690         read_lock(&ksocknal_data.ksnd_global_lock);
691
692         nip = net->ksnn_ninterfaces;
693         LASSERT(nip <= LNET_MAX_INTERFACES);
694
695         /* Only offer interfaces for additional connections if I have
696          * more than one. */
697         if (nip < 2) {
698                 read_unlock(&ksocknal_data.ksnd_global_lock);
699                 return 0;
700         }
701
702         for (i = 0; i < nip; i++) {
703                 ipaddrs[i] = net->ksnn_interfaces[i].ksni_ipaddr;
704                 LASSERT(ipaddrs[i] != 0);
705         }
706
707         read_unlock(&ksocknal_data.ksnd_global_lock);
708         return nip;
709 }
710
711 static int
712 ksocknal_match_peerip(ksock_interface_t *iface, __u32 *ips, int nips)
713 {
714         int   best_netmatch = 0;
715         int   best_xor      = 0;
716         int   best        = -1;
717         int   this_xor;
718         int   this_netmatch;
719         int   i;
720
721         for (i = 0; i < nips; i++) {
722                 if (ips[i] == 0)
723                         continue;
724
725                 this_xor = ips[i] ^ iface->ksni_ipaddr;
726                 this_netmatch = ((this_xor & iface->ksni_netmask) == 0) ? 1 : 0;
727
728                 if (!(best < 0 ||
729                       best_netmatch < this_netmatch ||
730                       (best_netmatch == this_netmatch &&
731                        best_xor > this_xor)))
732                         continue;
733
734                 best = i;
735                 best_netmatch = this_netmatch;
736                 best_xor = this_xor;
737         }
738
739         LASSERT(best >= 0);
740         return best;
741 }
742
743 static int
744 ksocknal_select_ips(ksock_peer_t *peer, __u32 *peerips, int n_peerips)
745 {
746         rwlock_t                *global_lock = &ksocknal_data.ksnd_global_lock;
747         ksock_net_t     *net = peer->ksnp_ni->ni_data;
748         ksock_interface_t  *iface;
749         ksock_interface_t  *best_iface;
750         int              n_ips;
751         int              i;
752         int              j;
753         int              k;
754         __u32          ip;
755         __u32          xor;
756         int              this_netmatch;
757         int              best_netmatch;
758         int              best_npeers;
759
760         /* CAVEAT EMPTOR: We do all our interface matching with an
761          * exclusive hold of global lock at IRQ priority.  We're only
762          * expecting to be dealing with small numbers of interfaces, so the
763          * O(n**3)-ness shouldn't matter */
764
765         /* Also note that I'm not going to return more than n_peerips
766          * interfaces, even if I have more myself */
767
768         write_lock_bh(global_lock);
769
770         LASSERT(n_peerips <= LNET_MAX_INTERFACES);
771         LASSERT(net->ksnn_ninterfaces <= LNET_MAX_INTERFACES);
772
773         /* Only match interfaces for additional connections
774          * if I have > 1 interface */
775         n_ips = (net->ksnn_ninterfaces < 2) ? 0 :
776                 min(n_peerips, net->ksnn_ninterfaces);
777
778         for (i = 0; peer->ksnp_n_passive_ips < n_ips; i++) {
779                 /*            ^ yes really... */
780
781                 /* If we have any new interfaces, first tick off all the
782                  * peer IPs that match old interfaces, then choose new
783                  * interfaces to match the remaining peer IPS.
784                  * We don't forget interfaces we've stopped using; we might
785                  * start using them again... */
786
787                 if (i < peer->ksnp_n_passive_ips) {
788                         /* Old interface. */
789                         ip = peer->ksnp_passive_ips[i];
790                         best_iface = ksocknal_ip2iface(peer->ksnp_ni, ip);
791
792                 } else {
793                         /* choose a new interface */
794                         LASSERT(i == peer->ksnp_n_passive_ips);
795
796                         best_iface = NULL;
797                         best_netmatch = 0;
798                         best_npeers = 0;
799
800                         for (j = 0; j < net->ksnn_ninterfaces; j++) {
801                                 iface = &net->ksnn_interfaces[j];
802                                 ip = iface->ksni_ipaddr;
803
804                                 for (k = 0; k < peer->ksnp_n_passive_ips; k++)
805                                         if (peer->ksnp_passive_ips[k] == ip)
806                                                 break;
807
808                                 if (k < peer->ksnp_n_passive_ips) /* using it already */
809                                         continue;
810
811                                 k = ksocknal_match_peerip(iface, peerips, n_peerips);
812                                 xor = ip ^ peerips[k];
813                                 this_netmatch = ((xor & iface->ksni_netmask) == 0) ? 1 : 0;
814
815                                 if (!(best_iface == NULL ||
816                                       best_netmatch < this_netmatch ||
817                                       (best_netmatch == this_netmatch &&
818                                        best_npeers > iface->ksni_npeers)))
819                                         continue;
820
821                                 best_iface = iface;
822                                 best_netmatch = this_netmatch;
823                                 best_npeers = iface->ksni_npeers;
824                         }
825
826                         best_iface->ksni_npeers++;
827                         ip = best_iface->ksni_ipaddr;
828                         peer->ksnp_passive_ips[i] = ip;
829                         peer->ksnp_n_passive_ips = i+1;
830                 }
831
832                 /* mark the best matching peer IP used */
833                 j = ksocknal_match_peerip(best_iface, peerips, n_peerips);
834                 peerips[j] = 0;
835         }
836
837         /* Overwrite input peer IP addresses */
838         memcpy(peerips, peer->ksnp_passive_ips, n_ips * sizeof(*peerips));
839
840         write_unlock_bh(global_lock);
841
842         return n_ips;
843 }
844
845 static void
846 ksocknal_create_routes(ksock_peer_t *peer, int port,
847                        __u32 *peer_ipaddrs, int npeer_ipaddrs)
848 {
849         ksock_route_t       *newroute = NULL;
850         rwlock_t                *global_lock = &ksocknal_data.ksnd_global_lock;
851         lnet_ni_t          *ni = peer->ksnp_ni;
852         ksock_net_t      *net = ni->ni_data;
853         struct list_head          *rtmp;
854         ksock_route_t       *route;
855         ksock_interface_t   *iface;
856         ksock_interface_t   *best_iface;
857         int               best_netmatch;
858         int               this_netmatch;
859         int               best_nroutes;
860         int               i;
861         int               j;
862
863         /* CAVEAT EMPTOR: We do all our interface matching with an
864          * exclusive hold of global lock at IRQ priority.  We're only
865          * expecting to be dealing with small numbers of interfaces, so the
866          * O(n**3)-ness here shouldn't matter */
867
868         write_lock_bh(global_lock);
869
870         if (net->ksnn_ninterfaces < 2) {
871                 /* Only create additional connections
872                  * if I have > 1 interface */
873                 write_unlock_bh(global_lock);
874                 return;
875         }
876
877         LASSERT(npeer_ipaddrs <= LNET_MAX_INTERFACES);
878
879         for (i = 0; i < npeer_ipaddrs; i++) {
880                 if (newroute != NULL) {
881                         newroute->ksnr_ipaddr = peer_ipaddrs[i];
882                 } else {
883                         write_unlock_bh(global_lock);
884
885                         newroute = ksocknal_create_route(peer_ipaddrs[i], port);
886                         if (newroute == NULL)
887                                 return;
888
889                         write_lock_bh(global_lock);
890                 }
891
892                 if (peer->ksnp_closing) {
893                         /* peer got closed under me */
894                         break;
895                 }
896
897                 /* Already got a route? */
898                 route = NULL;
899                 list_for_each(rtmp, &peer->ksnp_routes) {
900                         route = list_entry(rtmp, ksock_route_t, ksnr_list);
901
902                         if (route->ksnr_ipaddr == newroute->ksnr_ipaddr)
903                                 break;
904
905                         route = NULL;
906                 }
907                 if (route != NULL)
908                         continue;
909
910                 best_iface = NULL;
911                 best_nroutes = 0;
912                 best_netmatch = 0;
913
914                 LASSERT(net->ksnn_ninterfaces <= LNET_MAX_INTERFACES);
915
916                 /* Select interface to connect from */
917                 for (j = 0; j < net->ksnn_ninterfaces; j++) {
918                         iface = &net->ksnn_interfaces[j];
919
920                         /* Using this interface already? */
921                         list_for_each(rtmp, &peer->ksnp_routes) {
922                                 route = list_entry(rtmp, ksock_route_t,
923                                                        ksnr_list);
924
925                                 if (route->ksnr_myipaddr == iface->ksni_ipaddr)
926                                         break;
927
928                                 route = NULL;
929                         }
930                         if (route != NULL)
931                                 continue;
932
933                         this_netmatch = (((iface->ksni_ipaddr ^
934                                            newroute->ksnr_ipaddr) &
935                                            iface->ksni_netmask) == 0) ? 1 : 0;
936
937                         if (!(best_iface == NULL ||
938                               best_netmatch < this_netmatch ||
939                               (best_netmatch == this_netmatch &&
940                                best_nroutes > iface->ksni_nroutes)))
941                                 continue;
942
943                         best_iface = iface;
944                         best_netmatch = this_netmatch;
945                         best_nroutes = iface->ksni_nroutes;
946                 }
947
948                 if (best_iface == NULL)
949                         continue;
950
951                 newroute->ksnr_myipaddr = best_iface->ksni_ipaddr;
952                 best_iface->ksni_nroutes++;
953
954                 ksocknal_add_route_locked(peer, newroute);
955                 newroute = NULL;
956         }
957
958         write_unlock_bh(global_lock);
959         if (newroute != NULL)
960                 ksocknal_route_decref(newroute);
961 }
962
963 int
964 ksocknal_accept(lnet_ni_t *ni, struct socket *sock)
965 {
966         ksock_connreq_t    *cr;
967         int              rc;
968         __u32          peer_ip;
969         int              peer_port;
970
971         rc = libcfs_sock_getaddr(sock, 1, &peer_ip, &peer_port);
972         LASSERT(rc == 0);                     /* we succeeded before */
973
974         LIBCFS_ALLOC(cr, sizeof(*cr));
975         if (cr == NULL) {
976                 LCONSOLE_ERROR_MSG(0x12f, "Dropping connection request from %pI4h: memory exhausted\n",
977                                    &peer_ip);
978                 return -ENOMEM;
979         }
980
981         lnet_ni_addref(ni);
982         cr->ksncr_ni   = ni;
983         cr->ksncr_sock = sock;
984
985         spin_lock_bh(&ksocknal_data.ksnd_connd_lock);
986
987         list_add_tail(&cr->ksncr_list, &ksocknal_data.ksnd_connd_connreqs);
988         wake_up(&ksocknal_data.ksnd_connd_waitq);
989
990         spin_unlock_bh(&ksocknal_data.ksnd_connd_lock);
991         return 0;
992 }
993
994 static int
995 ksocknal_connecting(ksock_peer_t *peer, __u32 ipaddr)
996 {
997         ksock_route_t   *route;
998
999         list_for_each_entry(route, &peer->ksnp_routes, ksnr_list) {
1000
1001                 if (route->ksnr_ipaddr == ipaddr)
1002                         return route->ksnr_connecting;
1003         }
1004         return 0;
1005 }
1006
1007 int
1008 ksocknal_create_conn(lnet_ni_t *ni, ksock_route_t *route,
1009                       struct socket *sock, int type)
1010 {
1011         rwlock_t                *global_lock = &ksocknal_data.ksnd_global_lock;
1012         LIST_HEAD(zombies);
1013         lnet_process_id_t  peerid;
1014         struct list_head        *tmp;
1015         __u64         incarnation;
1016         ksock_conn_t      *conn;
1017         ksock_conn_t      *conn2;
1018         ksock_peer_t      *peer = NULL;
1019         ksock_peer_t      *peer2;
1020         ksock_sched_t     *sched;
1021         ksock_hello_msg_t *hello;
1022         int                cpt;
1023         ksock_tx_t      *tx;
1024         ksock_tx_t      *txtmp;
1025         int             rc;
1026         int             active;
1027         char          *warn = NULL;
1028
1029         active = (route != NULL);
1030
1031         LASSERT(active == (type != SOCKLND_CONN_NONE));
1032
1033         LIBCFS_ALLOC(conn, sizeof(*conn));
1034         if (conn == NULL) {
1035                 rc = -ENOMEM;
1036                 goto failed_0;
1037         }
1038
1039         conn->ksnc_peer = NULL;
1040         conn->ksnc_route = NULL;
1041         conn->ksnc_sock = sock;
1042         /* 2 ref, 1 for conn, another extra ref prevents socket
1043          * being closed before establishment of connection */
1044         atomic_set(&conn->ksnc_sock_refcount, 2);
1045         conn->ksnc_type = type;
1046         ksocknal_lib_save_callback(sock, conn);
1047         atomic_set(&conn->ksnc_conn_refcount, 1); /* 1 ref for me */
1048
1049         conn->ksnc_rx_ready = 0;
1050         conn->ksnc_rx_scheduled = 0;
1051
1052         INIT_LIST_HEAD(&conn->ksnc_tx_queue);
1053         conn->ksnc_tx_ready = 0;
1054         conn->ksnc_tx_scheduled = 0;
1055         conn->ksnc_tx_carrier = NULL;
1056         atomic_set(&conn->ksnc_tx_nob, 0);
1057
1058         LIBCFS_ALLOC(hello, offsetof(ksock_hello_msg_t,
1059                                      kshm_ips[LNET_MAX_INTERFACES]));
1060         if (hello == NULL) {
1061                 rc = -ENOMEM;
1062                 goto failed_1;
1063         }
1064
1065         /* stash conn's local and remote addrs */
1066         rc = ksocknal_lib_get_conn_addrs(conn);
1067         if (rc != 0)
1068                 goto failed_1;
1069
1070         /* Find out/confirm peer's NID and connection type and get the
1071          * vector of interfaces she's willing to let me connect to.
1072          * Passive connections use the listener timeout since the peer sends
1073          * eagerly */
1074
1075         if (active) {
1076                 peer = route->ksnr_peer;
1077                 LASSERT(ni == peer->ksnp_ni);
1078
1079                 /* Active connection sends HELLO eagerly */
1080                 hello->kshm_nips = ksocknal_local_ipvec(ni, hello->kshm_ips);
1081                 peerid = peer->ksnp_id;
1082
1083                 write_lock_bh(global_lock);
1084                 conn->ksnc_proto = peer->ksnp_proto;
1085                 write_unlock_bh(global_lock);
1086
1087                 if (conn->ksnc_proto == NULL) {
1088                          conn->ksnc_proto = &ksocknal_protocol_v3x;
1089 #if SOCKNAL_VERSION_DEBUG
1090                          if (*ksocknal_tunables.ksnd_protocol == 2)
1091                                  conn->ksnc_proto = &ksocknal_protocol_v2x;
1092                          else if (*ksocknal_tunables.ksnd_protocol == 1)
1093                                  conn->ksnc_proto = &ksocknal_protocol_v1x;
1094 #endif
1095                 }
1096
1097                 rc = ksocknal_send_hello(ni, conn, peerid.nid, hello);
1098                 if (rc != 0)
1099                         goto failed_1;
1100         } else {
1101                 peerid.nid = LNET_NID_ANY;
1102                 peerid.pid = LNET_PID_ANY;
1103
1104                 /* Passive, get protocol from peer */
1105                 conn->ksnc_proto = NULL;
1106         }
1107
1108         rc = ksocknal_recv_hello(ni, conn, hello, &peerid, &incarnation);
1109         if (rc < 0)
1110                 goto failed_1;
1111
1112         LASSERT(rc == 0 || active);
1113         LASSERT(conn->ksnc_proto != NULL);
1114         LASSERT(peerid.nid != LNET_NID_ANY);
1115
1116         cpt = lnet_cpt_of_nid(peerid.nid);
1117
1118         if (active) {
1119                 ksocknal_peer_addref(peer);
1120                 write_lock_bh(global_lock);
1121         } else {
1122                 rc = ksocknal_create_peer(&peer, ni, peerid);
1123                 if (rc != 0)
1124                         goto failed_1;
1125
1126                 write_lock_bh(global_lock);
1127
1128                 /* called with a ref on ni, so shutdown can't have started */
1129                 LASSERT(((ksock_net_t *) ni->ni_data)->ksnn_shutdown == 0);
1130
1131                 peer2 = ksocknal_find_peer_locked(ni, peerid);
1132                 if (peer2 == NULL) {
1133                         /* NB this puts an "empty" peer in the peer
1134                          * table (which takes my ref) */
1135                         list_add_tail(&peer->ksnp_list,
1136                                           ksocknal_nid2peerlist(peerid.nid));
1137                 } else {
1138                         ksocknal_peer_decref(peer);
1139                         peer = peer2;
1140                 }
1141
1142                 /* +1 ref for me */
1143                 ksocknal_peer_addref(peer);
1144                 peer->ksnp_accepting++;
1145
1146                 /* Am I already connecting to this guy?  Resolve in
1147                  * favour of higher NID... */
1148                 if (peerid.nid < ni->ni_nid &&
1149                     ksocknal_connecting(peer, conn->ksnc_ipaddr)) {
1150                         rc = EALREADY;
1151                         warn = "connection race resolution";
1152                         goto failed_2;
1153                 }
1154         }
1155
1156         if (peer->ksnp_closing ||
1157             (active && route->ksnr_deleted)) {
1158                 /* peer/route got closed under me */
1159                 rc = -ESTALE;
1160                 warn = "peer/route removed";
1161                 goto failed_2;
1162         }
1163
1164         if (peer->ksnp_proto == NULL) {
1165                 /* Never connected before.
1166                  * NB recv_hello may have returned EPROTO to signal my peer
1167                  * wants a different protocol than the one I asked for.
1168                  */
1169                 LASSERT(list_empty(&peer->ksnp_conns));
1170
1171                 peer->ksnp_proto = conn->ksnc_proto;
1172                 peer->ksnp_incarnation = incarnation;
1173         }
1174
1175         if (peer->ksnp_proto != conn->ksnc_proto ||
1176             peer->ksnp_incarnation != incarnation) {
1177                 /* Peer rebooted or I've got the wrong protocol version */
1178                 ksocknal_close_peer_conns_locked(peer, 0, 0);
1179
1180                 peer->ksnp_proto = NULL;
1181                 rc = ESTALE;
1182                 warn = peer->ksnp_incarnation != incarnation ?
1183                        "peer rebooted" :
1184                        "wrong proto version";
1185                 goto failed_2;
1186         }
1187
1188         switch (rc) {
1189         default:
1190                 LBUG();
1191         case 0:
1192                 break;
1193         case EALREADY:
1194                 warn = "lost conn race";
1195                 goto failed_2;
1196         case EPROTO:
1197                 warn = "retry with different protocol version";
1198                 goto failed_2;
1199         }
1200
1201         /* Refuse to duplicate an existing connection, unless this is a
1202          * loopback connection */
1203         if (conn->ksnc_ipaddr != conn->ksnc_myipaddr) {
1204                 list_for_each(tmp, &peer->ksnp_conns) {
1205                         conn2 = list_entry(tmp, ksock_conn_t, ksnc_list);
1206
1207                         if (conn2->ksnc_ipaddr != conn->ksnc_ipaddr ||
1208                             conn2->ksnc_myipaddr != conn->ksnc_myipaddr ||
1209                             conn2->ksnc_type != conn->ksnc_type)
1210                                 continue;
1211
1212                         /* Reply on a passive connection attempt so the peer
1213                          * realises we're connected. */
1214                         LASSERT(rc == 0);
1215                         if (!active)
1216                                 rc = EALREADY;
1217
1218                         warn = "duplicate";
1219                         goto failed_2;
1220                 }
1221         }
1222
1223         /* If the connection created by this route didn't bind to the IP
1224          * address the route connected to, the connection/route matching
1225          * code below probably isn't going to work. */
1226         if (active &&
1227             route->ksnr_ipaddr != conn->ksnc_ipaddr) {
1228                 CERROR("Route %s %pI4h connected to %pI4h\n",
1229                        libcfs_id2str(peer->ksnp_id),
1230                        &route->ksnr_ipaddr,
1231                        &conn->ksnc_ipaddr);
1232         }
1233
1234         /* Search for a route corresponding to the new connection and
1235          * create an association.  This allows incoming connections created
1236          * by routes in my peer to match my own route entries so I don't
1237          * continually create duplicate routes. */
1238         list_for_each(tmp, &peer->ksnp_routes) {
1239                 route = list_entry(tmp, ksock_route_t, ksnr_list);
1240
1241                 if (route->ksnr_ipaddr != conn->ksnc_ipaddr)
1242                         continue;
1243
1244                 ksocknal_associate_route_conn_locked(route, conn);
1245                 break;
1246         }
1247
1248         conn->ksnc_peer = peer;          /* conn takes my ref on peer */
1249         peer->ksnp_last_alive = cfs_time_current();
1250         peer->ksnp_send_keepalive = 0;
1251         peer->ksnp_error = 0;
1252
1253         sched = ksocknal_choose_scheduler_locked(cpt);
1254         sched->kss_nconns++;
1255         conn->ksnc_scheduler = sched;
1256
1257         conn->ksnc_tx_last_post = cfs_time_current();
1258         /* Set the deadline for the outgoing HELLO to drain */
1259         conn->ksnc_tx_bufnob = sock->sk->sk_wmem_queued;
1260         conn->ksnc_tx_deadline = cfs_time_shift(*ksocknal_tunables.ksnd_timeout);
1261         mb();   /* order with adding to peer's conn list */
1262
1263         list_add(&conn->ksnc_list, &peer->ksnp_conns);
1264         ksocknal_conn_addref(conn);
1265
1266         ksocknal_new_packet(conn, 0);
1267
1268         conn->ksnc_zc_capable = ksocknal_lib_zc_capable(conn);
1269
1270         /* Take packets blocking for this connection. */
1271         list_for_each_entry_safe(tx, txtmp, &peer->ksnp_tx_queue, tx_list) {
1272                 if (conn->ksnc_proto->pro_match_tx(conn, tx, tx->tx_nonblk) == SOCKNAL_MATCH_NO)
1273                                 continue;
1274
1275                 list_del(&tx->tx_list);
1276                 ksocknal_queue_tx_locked(tx, conn);
1277         }
1278
1279         write_unlock_bh(global_lock);
1280
1281         /* We've now got a new connection.  Any errors from here on are just
1282          * like "normal" comms errors and we close the connection normally.
1283          * NB (a) we still have to send the reply HELLO for passive
1284          *      connections,
1285          *    (b) normal I/O on the conn is blocked until I setup and call the
1286          *      socket callbacks.
1287          */
1288
1289         CDEBUG(D_NET, "New conn %s p %d.x %pI4h -> %pI4h/%d incarnation:%lld sched[%d:%d]\n",
1290                libcfs_id2str(peerid), conn->ksnc_proto->pro_version,
1291                &conn->ksnc_myipaddr, &conn->ksnc_ipaddr,
1292                conn->ksnc_port, incarnation, cpt,
1293                (int)(sched - &sched->kss_info->ksi_scheds[0]));
1294
1295         if (active) {
1296                 /* additional routes after interface exchange? */
1297                 ksocknal_create_routes(peer, conn->ksnc_port,
1298                                        hello->kshm_ips, hello->kshm_nips);
1299         } else {
1300                 hello->kshm_nips = ksocknal_select_ips(peer, hello->kshm_ips,
1301                                                        hello->kshm_nips);
1302                 rc = ksocknal_send_hello(ni, conn, peerid.nid, hello);
1303         }
1304
1305         LIBCFS_FREE(hello, offsetof(ksock_hello_msg_t,
1306                                     kshm_ips[LNET_MAX_INTERFACES]));
1307
1308         /* setup the socket AFTER I've received hello (it disables
1309          * SO_LINGER).  I might call back to the acceptor who may want
1310          * to send a protocol version response and then close the
1311          * socket; this ensures the socket only tears down after the
1312          * response has been sent. */
1313         if (rc == 0)
1314                 rc = ksocknal_lib_setup_sock(sock);
1315
1316         write_lock_bh(global_lock);
1317
1318         /* NB my callbacks block while I hold ksnd_global_lock */
1319         ksocknal_lib_set_callback(sock, conn);
1320
1321         if (!active)
1322                 peer->ksnp_accepting--;
1323
1324         write_unlock_bh(global_lock);
1325
1326         if (rc != 0) {
1327                 write_lock_bh(global_lock);
1328                 if (!conn->ksnc_closing) {
1329                         /* could be closed by another thread */
1330                         ksocknal_close_conn_locked(conn, rc);
1331                 }
1332                 write_unlock_bh(global_lock);
1333         } else if (ksocknal_connsock_addref(conn) == 0) {
1334                 /* Allow I/O to proceed. */
1335                 ksocknal_read_callback(conn);
1336                 ksocknal_write_callback(conn);
1337                 ksocknal_connsock_decref(conn);
1338         }
1339
1340         ksocknal_connsock_decref(conn);
1341         ksocknal_conn_decref(conn);
1342         return rc;
1343
1344  failed_2:
1345         if (!peer->ksnp_closing &&
1346             list_empty(&peer->ksnp_conns) &&
1347             list_empty(&peer->ksnp_routes)) {
1348                 list_add(&zombies, &peer->ksnp_tx_queue);
1349                 list_del_init(&peer->ksnp_tx_queue);
1350                 ksocknal_unlink_peer_locked(peer);
1351         }
1352
1353         write_unlock_bh(global_lock);
1354
1355         if (warn != NULL) {
1356                 if (rc < 0)
1357                         CERROR("Not creating conn %s type %d: %s\n",
1358                                libcfs_id2str(peerid), conn->ksnc_type, warn);
1359                 else
1360                         CDEBUG(D_NET, "Not creating conn %s type %d: %s\n",
1361                               libcfs_id2str(peerid), conn->ksnc_type, warn);
1362         }
1363
1364         if (!active) {
1365                 if (rc > 0) {
1366                         /* Request retry by replying with CONN_NONE
1367                          * ksnc_proto has been set already */
1368                         conn->ksnc_type = SOCKLND_CONN_NONE;
1369                         hello->kshm_nips = 0;
1370                         ksocknal_send_hello(ni, conn, peerid.nid, hello);
1371                 }
1372
1373                 write_lock_bh(global_lock);
1374                 peer->ksnp_accepting--;
1375                 write_unlock_bh(global_lock);
1376         }
1377
1378         ksocknal_txlist_done(ni, &zombies, 1);
1379         ksocknal_peer_decref(peer);
1380
1381  failed_1:
1382         if (hello != NULL)
1383                 LIBCFS_FREE(hello, offsetof(ksock_hello_msg_t,
1384                                             kshm_ips[LNET_MAX_INTERFACES]));
1385
1386         LIBCFS_FREE(conn, sizeof(*conn));
1387
1388  failed_0:
1389         libcfs_sock_release(sock);
1390         return rc;
1391 }
1392
1393 void
1394 ksocknal_close_conn_locked(ksock_conn_t *conn, int error)
1395 {
1396         /* This just does the immmediate housekeeping, and queues the
1397          * connection for the reaper to terminate.
1398          * Caller holds ksnd_global_lock exclusively in irq context */
1399         ksock_peer_t      *peer = conn->ksnc_peer;
1400         ksock_route_t     *route;
1401         ksock_conn_t      *conn2;
1402         struct list_head        *tmp;
1403
1404         LASSERT(peer->ksnp_error == 0);
1405         LASSERT(!conn->ksnc_closing);
1406         conn->ksnc_closing = 1;
1407
1408         /* ksnd_deathrow_conns takes over peer's ref */
1409         list_del(&conn->ksnc_list);
1410
1411         route = conn->ksnc_route;
1412         if (route != NULL) {
1413                 /* dissociate conn from route... */
1414                 LASSERT(!route->ksnr_deleted);
1415                 LASSERT((route->ksnr_connected & (1 << conn->ksnc_type)) != 0);
1416
1417                 conn2 = NULL;
1418                 list_for_each(tmp, &peer->ksnp_conns) {
1419                         conn2 = list_entry(tmp, ksock_conn_t, ksnc_list);
1420
1421                         if (conn2->ksnc_route == route &&
1422                             conn2->ksnc_type == conn->ksnc_type)
1423                                 break;
1424
1425                         conn2 = NULL;
1426                 }
1427                 if (conn2 == NULL)
1428                         route->ksnr_connected &= ~(1 << conn->ksnc_type);
1429
1430                 conn->ksnc_route = NULL;
1431
1432 #if 0      /* irrelevant with only eager routes */
1433                 /* make route least favourite */
1434                 list_del(&route->ksnr_list);
1435                 list_add_tail(&route->ksnr_list, &peer->ksnp_routes);
1436 #endif
1437                 ksocknal_route_decref(route);     /* drop conn's ref on route */
1438         }
1439
1440         if (list_empty(&peer->ksnp_conns)) {
1441                 /* No more connections to this peer */
1442
1443                 if (!list_empty(&peer->ksnp_tx_queue)) {
1444                         ksock_tx_t *tx;
1445
1446                         LASSERT(conn->ksnc_proto == &ksocknal_protocol_v3x);
1447
1448                         /* throw them to the last connection...,
1449                          * these TXs will be send to /dev/null by scheduler */
1450                         list_for_each_entry(tx, &peer->ksnp_tx_queue,
1451                                                 tx_list)
1452                                 ksocknal_tx_prep(conn, tx);
1453
1454                         spin_lock_bh(&conn->ksnc_scheduler->kss_lock);
1455                         list_splice_init(&peer->ksnp_tx_queue,
1456                                              &conn->ksnc_tx_queue);
1457                         spin_unlock_bh(&conn->ksnc_scheduler->kss_lock);
1458                 }
1459
1460                 peer->ksnp_proto = NULL;        /* renegotiate protocol version */
1461                 peer->ksnp_error = error;       /* stash last conn close reason */
1462
1463                 if (list_empty(&peer->ksnp_routes)) {
1464                         /* I've just closed last conn belonging to a
1465                          * peer with no routes to it */
1466                         ksocknal_unlink_peer_locked(peer);
1467                 }
1468         }
1469
1470         spin_lock_bh(&ksocknal_data.ksnd_reaper_lock);
1471
1472         list_add_tail(&conn->ksnc_list,
1473                           &ksocknal_data.ksnd_deathrow_conns);
1474         wake_up(&ksocknal_data.ksnd_reaper_waitq);
1475
1476         spin_unlock_bh(&ksocknal_data.ksnd_reaper_lock);
1477 }
1478
1479 void
1480 ksocknal_peer_failed(ksock_peer_t *peer)
1481 {
1482         int     notify = 0;
1483         unsigned long last_alive = 0;
1484
1485         /* There has been a connection failure or comms error; but I'll only
1486          * tell LNET I think the peer is dead if it's to another kernel and
1487          * there are no connections or connection attempts in existence. */
1488
1489         read_lock(&ksocknal_data.ksnd_global_lock);
1490
1491         if ((peer->ksnp_id.pid & LNET_PID_USERFLAG) == 0 &&
1492             list_empty(&peer->ksnp_conns) &&
1493             peer->ksnp_accepting == 0 &&
1494             ksocknal_find_connecting_route_locked(peer) == NULL) {
1495                 notify = 1;
1496                 last_alive = peer->ksnp_last_alive;
1497         }
1498
1499         read_unlock(&ksocknal_data.ksnd_global_lock);
1500
1501         if (notify)
1502                 lnet_notify(peer->ksnp_ni, peer->ksnp_id.nid, 0,
1503                              last_alive);
1504 }
1505
1506 void
1507 ksocknal_finalize_zcreq(ksock_conn_t *conn)
1508 {
1509         ksock_peer_t     *peer = conn->ksnc_peer;
1510         ksock_tx_t       *tx;
1511         ksock_tx_t       *tmp;
1512         LIST_HEAD(zlist);
1513
1514         /* NB safe to finalize TXs because closing of socket will
1515          * abort all buffered data */
1516         LASSERT(conn->ksnc_sock == NULL);
1517
1518         spin_lock(&peer->ksnp_lock);
1519
1520         list_for_each_entry_safe(tx, tmp, &peer->ksnp_zc_req_list, tx_zc_list) {
1521                 if (tx->tx_conn != conn)
1522                         continue;
1523
1524                 LASSERT(tx->tx_msg.ksm_zc_cookies[0] != 0);
1525
1526                 tx->tx_msg.ksm_zc_cookies[0] = 0;
1527                 tx->tx_zc_aborted = 1; /* mark it as not-acked */
1528                 list_del(&tx->tx_zc_list);
1529                 list_add(&tx->tx_zc_list, &zlist);
1530         }
1531
1532         spin_unlock(&peer->ksnp_lock);
1533
1534         while (!list_empty(&zlist)) {
1535                 tx = list_entry(zlist.next, ksock_tx_t, tx_zc_list);
1536
1537                 list_del(&tx->tx_zc_list);
1538                 ksocknal_tx_decref(tx);
1539         }
1540 }
1541
1542 void
1543 ksocknal_terminate_conn(ksock_conn_t *conn)
1544 {
1545         /* This gets called by the reaper (guaranteed thread context) to
1546          * disengage the socket from its callbacks and close it.
1547          * ksnc_refcount will eventually hit zero, and then the reaper will
1548          * destroy it. */
1549         ksock_peer_t     *peer = conn->ksnc_peer;
1550         ksock_sched_t    *sched = conn->ksnc_scheduler;
1551         int            failed = 0;
1552
1553         LASSERT(conn->ksnc_closing);
1554
1555         /* wake up the scheduler to "send" all remaining packets to /dev/null */
1556         spin_lock_bh(&sched->kss_lock);
1557
1558         /* a closing conn is always ready to tx */
1559         conn->ksnc_tx_ready = 1;
1560
1561         if (!conn->ksnc_tx_scheduled &&
1562             !list_empty(&conn->ksnc_tx_queue)) {
1563                 list_add_tail(&conn->ksnc_tx_list,
1564                                &sched->kss_tx_conns);
1565                 conn->ksnc_tx_scheduled = 1;
1566                 /* extra ref for scheduler */
1567                 ksocknal_conn_addref(conn);
1568
1569                 wake_up(&sched->kss_waitq);
1570         }
1571
1572         spin_unlock_bh(&sched->kss_lock);
1573
1574         /* serialise with callbacks */
1575         write_lock_bh(&ksocknal_data.ksnd_global_lock);
1576
1577         ksocknal_lib_reset_callback(conn->ksnc_sock, conn);
1578
1579         /* OK, so this conn may not be completely disengaged from its
1580          * scheduler yet, but it _has_ committed to terminate... */
1581         conn->ksnc_scheduler->kss_nconns--;
1582
1583         if (peer->ksnp_error != 0) {
1584                 /* peer's last conn closed in error */
1585                 LASSERT(list_empty(&peer->ksnp_conns));
1586                 failed = 1;
1587                 peer->ksnp_error = 0;     /* avoid multiple notifications */
1588         }
1589
1590         write_unlock_bh(&ksocknal_data.ksnd_global_lock);
1591
1592         if (failed)
1593                 ksocknal_peer_failed(peer);
1594
1595         /* The socket is closed on the final put; either here, or in
1596          * ksocknal_{send,recv}msg().  Since we set up the linger2 option
1597          * when the connection was established, this will close the socket
1598          * immediately, aborting anything buffered in it. Any hung
1599          * zero-copy transmits will therefore complete in finite time. */
1600         ksocknal_connsock_decref(conn);
1601 }
1602
1603 void
1604 ksocknal_queue_zombie_conn(ksock_conn_t *conn)
1605 {
1606         /* Queue the conn for the reaper to destroy */
1607
1608         LASSERT(atomic_read(&conn->ksnc_conn_refcount) == 0);
1609         spin_lock_bh(&ksocknal_data.ksnd_reaper_lock);
1610
1611         list_add_tail(&conn->ksnc_list, &ksocknal_data.ksnd_zombie_conns);
1612         wake_up(&ksocknal_data.ksnd_reaper_waitq);
1613
1614         spin_unlock_bh(&ksocknal_data.ksnd_reaper_lock);
1615 }
1616
1617 void
1618 ksocknal_destroy_conn(ksock_conn_t *conn)
1619 {
1620         unsigned long      last_rcv;
1621
1622         /* Final coup-de-grace of the reaper */
1623         CDEBUG(D_NET, "connection %p\n", conn);
1624
1625         LASSERT(atomic_read(&conn->ksnc_conn_refcount) == 0);
1626         LASSERT(atomic_read(&conn->ksnc_sock_refcount) == 0);
1627         LASSERT(conn->ksnc_sock == NULL);
1628         LASSERT(conn->ksnc_route == NULL);
1629         LASSERT(!conn->ksnc_tx_scheduled);
1630         LASSERT(!conn->ksnc_rx_scheduled);
1631         LASSERT(list_empty(&conn->ksnc_tx_queue));
1632
1633         /* complete current receive if any */
1634         switch (conn->ksnc_rx_state) {
1635         case SOCKNAL_RX_LNET_PAYLOAD:
1636                 last_rcv = conn->ksnc_rx_deadline -
1637                            cfs_time_seconds(*ksocknal_tunables.ksnd_timeout);
1638                 CERROR("Completing partial receive from %s[%d], ip %pI4h:%d, with error, wanted: %d, left: %d, last alive is %ld secs ago\n",
1639                        libcfs_id2str(conn->ksnc_peer->ksnp_id), conn->ksnc_type,
1640                        &conn->ksnc_ipaddr, conn->ksnc_port,
1641                        conn->ksnc_rx_nob_wanted, conn->ksnc_rx_nob_left,
1642                        cfs_duration_sec(cfs_time_sub(cfs_time_current(),
1643                                                      last_rcv)));
1644                 lnet_finalize(conn->ksnc_peer->ksnp_ni,
1645                                conn->ksnc_cookie, -EIO);
1646                 break;
1647         case SOCKNAL_RX_LNET_HEADER:
1648                 if (conn->ksnc_rx_started)
1649                         CERROR("Incomplete receive of lnet header from %s, ip %pI4h:%d, with error, protocol: %d.x.\n",
1650                                libcfs_id2str(conn->ksnc_peer->ksnp_id),
1651                                &conn->ksnc_ipaddr, conn->ksnc_port,
1652                                conn->ksnc_proto->pro_version);
1653                 break;
1654         case SOCKNAL_RX_KSM_HEADER:
1655                 if (conn->ksnc_rx_started)
1656                         CERROR("Incomplete receive of ksock message from %s, ip %pI4h:%d, with error, protocol: %d.x.\n",
1657                                libcfs_id2str(conn->ksnc_peer->ksnp_id),
1658                                &conn->ksnc_ipaddr, conn->ksnc_port,
1659                                conn->ksnc_proto->pro_version);
1660                 break;
1661         case SOCKNAL_RX_SLOP:
1662                 if (conn->ksnc_rx_started)
1663                         CERROR("Incomplete receive of slops from %s, ip %pI4h:%d, with error\n",
1664                                libcfs_id2str(conn->ksnc_peer->ksnp_id),
1665                                &conn->ksnc_ipaddr, conn->ksnc_port);
1666                break;
1667         default:
1668                 LBUG();
1669                 break;
1670         }
1671
1672         ksocknal_peer_decref(conn->ksnc_peer);
1673
1674         LIBCFS_FREE(conn, sizeof(*conn));
1675 }
1676
1677 int
1678 ksocknal_close_peer_conns_locked(ksock_peer_t *peer, __u32 ipaddr, int why)
1679 {
1680         ksock_conn_t       *conn;
1681         struct list_head         *ctmp;
1682         struct list_head         *cnxt;
1683         int              count = 0;
1684
1685         list_for_each_safe(ctmp, cnxt, &peer->ksnp_conns) {
1686                 conn = list_entry(ctmp, ksock_conn_t, ksnc_list);
1687
1688                 if (ipaddr == 0 ||
1689                     conn->ksnc_ipaddr == ipaddr) {
1690                         count++;
1691                         ksocknal_close_conn_locked(conn, why);
1692                 }
1693         }
1694
1695         return count;
1696 }
1697
1698 int
1699 ksocknal_close_conn_and_siblings(ksock_conn_t *conn, int why)
1700 {
1701         ksock_peer_t     *peer = conn->ksnc_peer;
1702         __u32        ipaddr = conn->ksnc_ipaddr;
1703         int            count;
1704
1705         write_lock_bh(&ksocknal_data.ksnd_global_lock);
1706
1707         count = ksocknal_close_peer_conns_locked(peer, ipaddr, why);
1708
1709         write_unlock_bh(&ksocknal_data.ksnd_global_lock);
1710
1711         return count;
1712 }
1713
1714 int
1715 ksocknal_close_matching_conns(lnet_process_id_t id, __u32 ipaddr)
1716 {
1717         ksock_peer_t       *peer;
1718         struct list_head         *ptmp;
1719         struct list_head         *pnxt;
1720         int              lo;
1721         int              hi;
1722         int              i;
1723         int              count = 0;
1724
1725         write_lock_bh(&ksocknal_data.ksnd_global_lock);
1726
1727         if (id.nid != LNET_NID_ANY)
1728                 lo = hi = (int)(ksocknal_nid2peerlist(id.nid) - ksocknal_data.ksnd_peers);
1729         else {
1730                 lo = 0;
1731                 hi = ksocknal_data.ksnd_peer_hash_size - 1;
1732         }
1733
1734         for (i = lo; i <= hi; i++) {
1735                 list_for_each_safe(ptmp, pnxt,
1736                                         &ksocknal_data.ksnd_peers[i]) {
1737
1738                         peer = list_entry(ptmp, ksock_peer_t, ksnp_list);
1739
1740                         if (!((id.nid == LNET_NID_ANY || id.nid == peer->ksnp_id.nid) &&
1741                               (id.pid == LNET_PID_ANY || id.pid == peer->ksnp_id.pid)))
1742                                 continue;
1743
1744                         count += ksocknal_close_peer_conns_locked(peer, ipaddr, 0);
1745                 }
1746         }
1747
1748         write_unlock_bh(&ksocknal_data.ksnd_global_lock);
1749
1750         /* wildcards always succeed */
1751         if (id.nid == LNET_NID_ANY || id.pid == LNET_PID_ANY || ipaddr == 0)
1752                 return 0;
1753
1754         if (count == 0)
1755                 return -ENOENT;
1756         else
1757                 return 0;
1758 }
1759
1760 void
1761 ksocknal_notify(lnet_ni_t *ni, lnet_nid_t gw_nid, int alive)
1762 {
1763         /* The router is telling me she's been notified of a change in
1764          * gateway state.... */
1765         lnet_process_id_t  id = {0};
1766
1767         id.nid = gw_nid;
1768         id.pid = LNET_PID_ANY;
1769
1770         CDEBUG(D_NET, "gw %s %s\n", libcfs_nid2str(gw_nid),
1771                 alive ? "up" : "down");
1772
1773         if (!alive) {
1774                 /* If the gateway crashed, close all open connections... */
1775                 ksocknal_close_matching_conns(id, 0);
1776                 return;
1777         }
1778
1779         /* ...otherwise do nothing.  We can only establish new connections
1780          * if we have autroutes, and these connect on demand. */
1781 }
1782
1783 void
1784 ksocknal_query(lnet_ni_t *ni, lnet_nid_t nid, unsigned long *when)
1785 {
1786         int             connect = 1;
1787         unsigned long    last_alive = 0;
1788         unsigned long    now = cfs_time_current();
1789         ksock_peer_t      *peer = NULL;
1790         rwlock_t                *glock = &ksocknal_data.ksnd_global_lock;
1791         lnet_process_id_t  id = {.nid = nid, .pid = LUSTRE_SRV_LNET_PID};
1792
1793         read_lock(glock);
1794
1795         peer = ksocknal_find_peer_locked(ni, id);
1796         if (peer != NULL) {
1797                 struct list_head       *tmp;
1798                 ksock_conn_t     *conn;
1799                 int            bufnob;
1800
1801                 list_for_each(tmp, &peer->ksnp_conns) {
1802                         conn = list_entry(tmp, ksock_conn_t, ksnc_list);
1803                         bufnob = conn->ksnc_sock->sk->sk_wmem_queued;
1804
1805                         if (bufnob < conn->ksnc_tx_bufnob) {
1806                                 /* something got ACKed */
1807                                 conn->ksnc_tx_deadline =
1808                                         cfs_time_shift(*ksocknal_tunables.ksnd_timeout);
1809                                 peer->ksnp_last_alive = now;
1810                                 conn->ksnc_tx_bufnob = bufnob;
1811                         }
1812                 }
1813
1814                 last_alive = peer->ksnp_last_alive;
1815                 if (ksocknal_find_connectable_route_locked(peer) == NULL)
1816                         connect = 0;
1817         }
1818
1819         read_unlock(glock);
1820
1821         if (last_alive != 0)
1822                 *when = last_alive;
1823
1824         CDEBUG(D_NET, "Peer %s %p, alive %ld secs ago, connect %d\n",
1825                libcfs_nid2str(nid), peer,
1826                last_alive ? cfs_duration_sec(now - last_alive) : -1,
1827                connect);
1828
1829         if (!connect)
1830                 return;
1831
1832         ksocknal_add_peer(ni, id, LNET_NIDADDR(nid), lnet_acceptor_port());
1833
1834         write_lock_bh(glock);
1835
1836         peer = ksocknal_find_peer_locked(ni, id);
1837         if (peer != NULL)
1838                 ksocknal_launch_all_connections_locked(peer);
1839
1840         write_unlock_bh(glock);
1841         return;
1842 }
1843
1844 static void
1845 ksocknal_push_peer(ksock_peer_t *peer)
1846 {
1847         int            index;
1848         int            i;
1849         struct list_head       *tmp;
1850         ksock_conn_t     *conn;
1851
1852         for (index = 0; ; index++) {
1853                 read_lock(&ksocknal_data.ksnd_global_lock);
1854
1855                 i = 0;
1856                 conn = NULL;
1857
1858                 list_for_each(tmp, &peer->ksnp_conns) {
1859                         if (i++ == index) {
1860                                 conn = list_entry(tmp, ksock_conn_t,
1861                                                        ksnc_list);
1862                                 ksocknal_conn_addref(conn);
1863                                 break;
1864                         }
1865                 }
1866
1867                 read_unlock(&ksocknal_data.ksnd_global_lock);
1868
1869                 if (conn == NULL)
1870                         break;
1871
1872                 ksocknal_lib_push_conn(conn);
1873                 ksocknal_conn_decref(conn);
1874         }
1875 }
1876
1877 static int
1878 ksocknal_push(lnet_ni_t *ni, lnet_process_id_t id)
1879 {
1880         ksock_peer_t      *peer;
1881         struct list_head        *tmp;
1882         int             index;
1883         int             i;
1884         int             j;
1885         int             rc = -ENOENT;
1886
1887         for (i = 0; i < ksocknal_data.ksnd_peer_hash_size; i++) {
1888                 for (j = 0; ; j++) {
1889                         read_lock(&ksocknal_data.ksnd_global_lock);
1890
1891                         index = 0;
1892                         peer = NULL;
1893
1894                         list_for_each(tmp, &ksocknal_data.ksnd_peers[i]) {
1895                                 peer = list_entry(tmp, ksock_peer_t,
1896                                                       ksnp_list);
1897
1898                                 if (!((id.nid == LNET_NID_ANY ||
1899                                        id.nid == peer->ksnp_id.nid) &&
1900                                       (id.pid == LNET_PID_ANY ||
1901                                        id.pid == peer->ksnp_id.pid))) {
1902                                         peer = NULL;
1903                                         continue;
1904                                 }
1905
1906                                 if (index++ == j) {
1907                                         ksocknal_peer_addref(peer);
1908                                         break;
1909                                 }
1910                         }
1911
1912                         read_unlock(&ksocknal_data.ksnd_global_lock);
1913
1914                         if (peer != NULL) {
1915                                 rc = 0;
1916                                 ksocknal_push_peer(peer);
1917                                 ksocknal_peer_decref(peer);
1918                         }
1919                 }
1920
1921         }
1922
1923         return rc;
1924 }
1925
1926 static int
1927 ksocknal_add_interface(lnet_ni_t *ni, __u32 ipaddress, __u32 netmask)
1928 {
1929         ksock_net_t       *net = ni->ni_data;
1930         ksock_interface_t *iface;
1931         int             rc;
1932         int             i;
1933         int             j;
1934         struct list_head        *ptmp;
1935         ksock_peer_t      *peer;
1936         struct list_head        *rtmp;
1937         ksock_route_t     *route;
1938
1939         if (ipaddress == 0 ||
1940             netmask == 0)
1941                 return -EINVAL;
1942
1943         write_lock_bh(&ksocknal_data.ksnd_global_lock);
1944
1945         iface = ksocknal_ip2iface(ni, ipaddress);
1946         if (iface != NULL) {
1947                 /* silently ignore dups */
1948                 rc = 0;
1949         } else if (net->ksnn_ninterfaces == LNET_MAX_INTERFACES) {
1950                 rc = -ENOSPC;
1951         } else {
1952                 iface = &net->ksnn_interfaces[net->ksnn_ninterfaces++];
1953
1954                 iface->ksni_ipaddr = ipaddress;
1955                 iface->ksni_netmask = netmask;
1956                 iface->ksni_nroutes = 0;
1957                 iface->ksni_npeers = 0;
1958
1959                 for (i = 0; i < ksocknal_data.ksnd_peer_hash_size; i++) {
1960                         list_for_each(ptmp, &ksocknal_data.ksnd_peers[i]) {
1961                                 peer = list_entry(ptmp, ksock_peer_t,
1962                                                       ksnp_list);
1963
1964                                 for (j = 0; j < peer->ksnp_n_passive_ips; j++)
1965                                         if (peer->ksnp_passive_ips[j] == ipaddress)
1966                                                 iface->ksni_npeers++;
1967
1968                                 list_for_each(rtmp, &peer->ksnp_routes) {
1969                                         route = list_entry(rtmp,
1970                                                                ksock_route_t,
1971                                                                ksnr_list);
1972
1973                                         if (route->ksnr_myipaddr == ipaddress)
1974                                                 iface->ksni_nroutes++;
1975                                 }
1976                         }
1977                 }
1978
1979                 rc = 0;
1980                 /* NB only new connections will pay attention to the new interface! */
1981         }
1982
1983         write_unlock_bh(&ksocknal_data.ksnd_global_lock);
1984
1985         return rc;
1986 }
1987
1988 static void
1989 ksocknal_peer_del_interface_locked(ksock_peer_t *peer, __u32 ipaddr)
1990 {
1991         struct list_head         *tmp;
1992         struct list_head         *nxt;
1993         ksock_route_t      *route;
1994         ksock_conn_t       *conn;
1995         int              i;
1996         int              j;
1997
1998         for (i = 0; i < peer->ksnp_n_passive_ips; i++)
1999                 if (peer->ksnp_passive_ips[i] == ipaddr) {
2000                         for (j = i+1; j < peer->ksnp_n_passive_ips; j++)
2001                                 peer->ksnp_passive_ips[j-1] =
2002                                         peer->ksnp_passive_ips[j];
2003                         peer->ksnp_n_passive_ips--;
2004                         break;
2005                 }
2006
2007         list_for_each_safe(tmp, nxt, &peer->ksnp_routes) {
2008                 route = list_entry(tmp, ksock_route_t, ksnr_list);
2009
2010                 if (route->ksnr_myipaddr != ipaddr)
2011                         continue;
2012
2013                 if (route->ksnr_share_count != 0) {
2014                         /* Manually created; keep, but unbind */
2015                         route->ksnr_myipaddr = 0;
2016                 } else {
2017                         ksocknal_del_route_locked(route);
2018                 }
2019         }
2020
2021         list_for_each_safe(tmp, nxt, &peer->ksnp_conns) {
2022                 conn = list_entry(tmp, ksock_conn_t, ksnc_list);
2023
2024                 if (conn->ksnc_myipaddr == ipaddr)
2025                         ksocknal_close_conn_locked(conn, 0);
2026         }
2027 }
2028
2029 static int
2030 ksocknal_del_interface(lnet_ni_t *ni, __u32 ipaddress)
2031 {
2032         ksock_net_t       *net = ni->ni_data;
2033         int             rc = -ENOENT;
2034         struct list_head        *tmp;
2035         struct list_head        *nxt;
2036         ksock_peer_t      *peer;
2037         __u32         this_ip;
2038         int             i;
2039         int             j;
2040
2041         write_lock_bh(&ksocknal_data.ksnd_global_lock);
2042
2043         for (i = 0; i < net->ksnn_ninterfaces; i++) {
2044                 this_ip = net->ksnn_interfaces[i].ksni_ipaddr;
2045
2046                 if (!(ipaddress == 0 ||
2047                       ipaddress == this_ip))
2048                         continue;
2049
2050                 rc = 0;
2051
2052                 for (j = i+1; j < net->ksnn_ninterfaces; j++)
2053                         net->ksnn_interfaces[j-1] =
2054                                 net->ksnn_interfaces[j];
2055
2056                 net->ksnn_ninterfaces--;
2057
2058                 for (j = 0; j < ksocknal_data.ksnd_peer_hash_size; j++) {
2059                         list_for_each_safe(tmp, nxt,
2060                                                &ksocknal_data.ksnd_peers[j]) {
2061                                 peer = list_entry(tmp, ksock_peer_t,
2062                                                       ksnp_list);
2063
2064                                 if (peer->ksnp_ni != ni)
2065                                         continue;
2066
2067                                 ksocknal_peer_del_interface_locked(peer, this_ip);
2068                         }
2069                 }
2070         }
2071
2072         write_unlock_bh(&ksocknal_data.ksnd_global_lock);
2073
2074         return rc;
2075 }
2076
2077 int
2078 ksocknal_ctl(lnet_ni_t *ni, unsigned int cmd, void *arg)
2079 {
2080         lnet_process_id_t id = {0};
2081         struct libcfs_ioctl_data *data = arg;
2082         int rc;
2083
2084         switch (cmd) {
2085         case IOC_LIBCFS_GET_INTERFACE: {
2086                 ksock_net_t       *net = ni->ni_data;
2087                 ksock_interface_t *iface;
2088
2089                 read_lock(&ksocknal_data.ksnd_global_lock);
2090
2091                 if (data->ioc_count >= (__u32)net->ksnn_ninterfaces) {
2092                         rc = -ENOENT;
2093                 } else {
2094                         rc = 0;
2095                         iface = &net->ksnn_interfaces[data->ioc_count];
2096
2097                         data->ioc_u32[0] = iface->ksni_ipaddr;
2098                         data->ioc_u32[1] = iface->ksni_netmask;
2099                         data->ioc_u32[2] = iface->ksni_npeers;
2100                         data->ioc_u32[3] = iface->ksni_nroutes;
2101                 }
2102
2103                 read_unlock(&ksocknal_data.ksnd_global_lock);
2104                 return rc;
2105         }
2106
2107         case IOC_LIBCFS_ADD_INTERFACE:
2108                 return ksocknal_add_interface(ni,
2109                                               data->ioc_u32[0], /* IP address */
2110                                               data->ioc_u32[1]); /* net mask */
2111
2112         case IOC_LIBCFS_DEL_INTERFACE:
2113                 return ksocknal_del_interface(ni,
2114                                               data->ioc_u32[0]); /* IP address */
2115
2116         case IOC_LIBCFS_GET_PEER: {
2117                 __u32       myip = 0;
2118                 __u32       ip = 0;
2119                 int           port = 0;
2120                 int           conn_count = 0;
2121                 int           share_count = 0;
2122
2123                 rc = ksocknal_get_peer_info(ni, data->ioc_count,
2124                                             &id, &myip, &ip, &port,
2125                                             &conn_count,  &share_count);
2126                 if (rc != 0)
2127                         return rc;
2128
2129                 data->ioc_nid    = id.nid;
2130                 data->ioc_count  = share_count;
2131                 data->ioc_u32[0] = ip;
2132                 data->ioc_u32[1] = port;
2133                 data->ioc_u32[2] = myip;
2134                 data->ioc_u32[3] = conn_count;
2135                 data->ioc_u32[4] = id.pid;
2136                 return 0;
2137         }
2138
2139         case IOC_LIBCFS_ADD_PEER:
2140                 id.nid = data->ioc_nid;
2141                 id.pid = LUSTRE_SRV_LNET_PID;
2142                 return ksocknal_add_peer(ni, id,
2143                                           data->ioc_u32[0], /* IP */
2144                                           data->ioc_u32[1]); /* port */
2145
2146         case IOC_LIBCFS_DEL_PEER:
2147                 id.nid = data->ioc_nid;
2148                 id.pid = LNET_PID_ANY;
2149                 return ksocknal_del_peer(ni, id,
2150                                           data->ioc_u32[0]); /* IP */
2151
2152         case IOC_LIBCFS_GET_CONN: {
2153                 int        txmem;
2154                 int        rxmem;
2155                 int        nagle;
2156                 ksock_conn_t *conn = ksocknal_get_conn_by_idx(ni, data->ioc_count);
2157
2158                 if (conn == NULL)
2159                         return -ENOENT;
2160
2161                 ksocknal_lib_get_conn_tunables(conn, &txmem, &rxmem, &nagle);
2162
2163                 data->ioc_count  = txmem;
2164                 data->ioc_nid    = conn->ksnc_peer->ksnp_id.nid;
2165                 data->ioc_flags  = nagle;
2166                 data->ioc_u32[0] = conn->ksnc_ipaddr;
2167                 data->ioc_u32[1] = conn->ksnc_port;
2168                 data->ioc_u32[2] = conn->ksnc_myipaddr;
2169                 data->ioc_u32[3] = conn->ksnc_type;
2170                 data->ioc_u32[4] = conn->ksnc_scheduler->kss_info->ksi_cpt;
2171                 data->ioc_u32[5] = rxmem;
2172                 data->ioc_u32[6] = conn->ksnc_peer->ksnp_id.pid;
2173                 ksocknal_conn_decref(conn);
2174                 return 0;
2175         }
2176
2177         case IOC_LIBCFS_CLOSE_CONNECTION:
2178                 id.nid = data->ioc_nid;
2179                 id.pid = LNET_PID_ANY;
2180                 return ksocknal_close_matching_conns(id,
2181                                                       data->ioc_u32[0]);
2182
2183         case IOC_LIBCFS_REGISTER_MYNID:
2184                 /* Ignore if this is a noop */
2185                 if (data->ioc_nid == ni->ni_nid)
2186                         return 0;
2187
2188                 CERROR("obsolete IOC_LIBCFS_REGISTER_MYNID: %s(%s)\n",
2189                        libcfs_nid2str(data->ioc_nid),
2190                        libcfs_nid2str(ni->ni_nid));
2191                 return -EINVAL;
2192
2193         case IOC_LIBCFS_PUSH_CONNECTION:
2194                 id.nid = data->ioc_nid;
2195                 id.pid = LNET_PID_ANY;
2196                 return ksocknal_push(ni, id);
2197
2198         default:
2199                 return -EINVAL;
2200         }
2201         /* not reached */
2202 }
2203
2204 static void
2205 ksocknal_free_buffers(void)
2206 {
2207         LASSERT(atomic_read(&ksocknal_data.ksnd_nactive_txs) == 0);
2208
2209         if (ksocknal_data.ksnd_sched_info != NULL) {
2210                 struct ksock_sched_info *info;
2211                 int                     i;
2212
2213                 cfs_percpt_for_each(info, i, ksocknal_data.ksnd_sched_info) {
2214                         if (info->ksi_scheds != NULL) {
2215                                 LIBCFS_FREE(info->ksi_scheds,
2216                                             info->ksi_nthreads_max *
2217                                             sizeof(info->ksi_scheds[0]));
2218                         }
2219                 }
2220                 cfs_percpt_free(ksocknal_data.ksnd_sched_info);
2221         }
2222
2223         LIBCFS_FREE(ksocknal_data.ksnd_peers,
2224                      sizeof(struct list_head) *
2225                      ksocknal_data.ksnd_peer_hash_size);
2226
2227         spin_lock(&ksocknal_data.ksnd_tx_lock);
2228
2229         if (!list_empty(&ksocknal_data.ksnd_idle_noop_txs)) {
2230                 struct list_head        zlist;
2231                 ksock_tx_t      *tx;
2232
2233                 list_add(&zlist, &ksocknal_data.ksnd_idle_noop_txs);
2234                 list_del_init(&ksocknal_data.ksnd_idle_noop_txs);
2235                 spin_unlock(&ksocknal_data.ksnd_tx_lock);
2236
2237                 while (!list_empty(&zlist)) {
2238                         tx = list_entry(zlist.next, ksock_tx_t, tx_list);
2239                         list_del(&tx->tx_list);
2240                         LIBCFS_FREE(tx, tx->tx_desc_size);
2241                 }
2242         } else {
2243                 spin_unlock(&ksocknal_data.ksnd_tx_lock);
2244         }
2245 }
2246
2247 static void
2248 ksocknal_base_shutdown(void)
2249 {
2250         struct ksock_sched_info *info;
2251         ksock_sched_t           *sched;
2252         int                     i;
2253         int                     j;
2254
2255         CDEBUG(D_MALLOC, "before NAL cleanup: kmem %d\n",
2256                atomic_read(&libcfs_kmemory));
2257         LASSERT(ksocknal_data.ksnd_nnets == 0);
2258
2259         switch (ksocknal_data.ksnd_init) {
2260         default:
2261                 LASSERT(0);
2262
2263         case SOCKNAL_INIT_ALL:
2264         case SOCKNAL_INIT_DATA:
2265                 LASSERT(ksocknal_data.ksnd_peers != NULL);
2266                 for (i = 0; i < ksocknal_data.ksnd_peer_hash_size; i++) {
2267                         LASSERT(list_empty(&ksocknal_data.ksnd_peers[i]));
2268                 }
2269
2270                 LASSERT(list_empty(&ksocknal_data.ksnd_nets));
2271                 LASSERT(list_empty(&ksocknal_data.ksnd_enomem_conns));
2272                 LASSERT(list_empty(&ksocknal_data.ksnd_zombie_conns));
2273                 LASSERT(list_empty(&ksocknal_data.ksnd_connd_connreqs));
2274                 LASSERT(list_empty(&ksocknal_data.ksnd_connd_routes));
2275
2276                 if (ksocknal_data.ksnd_sched_info != NULL) {
2277                         cfs_percpt_for_each(info, i,
2278                                             ksocknal_data.ksnd_sched_info) {
2279                                 if (info->ksi_scheds == NULL)
2280                                         continue;
2281
2282                                 for (j = 0; j < info->ksi_nthreads_max; j++) {
2283
2284                                         sched = &info->ksi_scheds[j];
2285                                         LASSERT(list_empty(
2286                                                 &sched->kss_tx_conns));
2287                                         LASSERT(list_empty(
2288                                                 &sched->kss_rx_conns));
2289                                         LASSERT(list_empty(
2290                                                 &sched->kss_zombie_noop_txs));
2291                                         LASSERT(sched->kss_nconns == 0);
2292                                 }
2293                         }
2294                 }
2295
2296                 /* flag threads to terminate; wake and wait for them to die */
2297                 ksocknal_data.ksnd_shuttingdown = 1;
2298                 wake_up_all(&ksocknal_data.ksnd_connd_waitq);
2299                 wake_up_all(&ksocknal_data.ksnd_reaper_waitq);
2300
2301                 if (ksocknal_data.ksnd_sched_info != NULL) {
2302                         cfs_percpt_for_each(info, i,
2303                                             ksocknal_data.ksnd_sched_info) {
2304                                 if (info->ksi_scheds == NULL)
2305                                         continue;
2306
2307                                 for (j = 0; j < info->ksi_nthreads_max; j++) {
2308                                         sched = &info->ksi_scheds[j];
2309                                         wake_up_all(&sched->kss_waitq);
2310                                 }
2311                         }
2312                 }
2313
2314                 i = 4;
2315                 read_lock(&ksocknal_data.ksnd_global_lock);
2316                 while (ksocknal_data.ksnd_nthreads != 0) {
2317                         i++;
2318                         CDEBUG(((i & (-i)) == i) ? D_WARNING : D_NET, /* power of 2? */
2319                                "waiting for %d threads to terminate\n",
2320                                 ksocknal_data.ksnd_nthreads);
2321                         read_unlock(&ksocknal_data.ksnd_global_lock);
2322                         set_current_state(TASK_UNINTERRUPTIBLE);
2323                         schedule_timeout(cfs_time_seconds(1));
2324                         read_lock(&ksocknal_data.ksnd_global_lock);
2325                 }
2326                 read_unlock(&ksocknal_data.ksnd_global_lock);
2327
2328                 ksocknal_free_buffers();
2329
2330                 ksocknal_data.ksnd_init = SOCKNAL_INIT_NOTHING;
2331                 break;
2332         }
2333
2334         CDEBUG(D_MALLOC, "after NAL cleanup: kmem %d\n",
2335                atomic_read(&libcfs_kmemory));
2336
2337         module_put(THIS_MODULE);
2338 }
2339
2340 static __u64
2341 ksocknal_new_incarnation(void)
2342 {
2343
2344         /* The incarnation number is the time this module loaded and it
2345          * identifies this particular instance of the socknal.
2346          */
2347         return ktime_get_ns();
2348 }
2349
2350 static int
2351 ksocknal_base_startup(void)
2352 {
2353         struct ksock_sched_info *info;
2354         int                     rc;
2355         int                     i;
2356
2357         LASSERT(ksocknal_data.ksnd_init == SOCKNAL_INIT_NOTHING);
2358         LASSERT(ksocknal_data.ksnd_nnets == 0);
2359
2360         memset(&ksocknal_data, 0, sizeof(ksocknal_data)); /* zero pointers */
2361
2362         ksocknal_data.ksnd_peer_hash_size = SOCKNAL_PEER_HASH_SIZE;
2363         LIBCFS_ALLOC(ksocknal_data.ksnd_peers,
2364                       sizeof(struct list_head) *
2365                       ksocknal_data.ksnd_peer_hash_size);
2366         if (ksocknal_data.ksnd_peers == NULL)
2367                 return -ENOMEM;
2368
2369         for (i = 0; i < ksocknal_data.ksnd_peer_hash_size; i++)
2370                 INIT_LIST_HEAD(&ksocknal_data.ksnd_peers[i]);
2371
2372         rwlock_init(&ksocknal_data.ksnd_global_lock);
2373         INIT_LIST_HEAD(&ksocknal_data.ksnd_nets);
2374
2375         spin_lock_init(&ksocknal_data.ksnd_reaper_lock);
2376         INIT_LIST_HEAD(&ksocknal_data.ksnd_enomem_conns);
2377         INIT_LIST_HEAD(&ksocknal_data.ksnd_zombie_conns);
2378         INIT_LIST_HEAD(&ksocknal_data.ksnd_deathrow_conns);
2379         init_waitqueue_head(&ksocknal_data.ksnd_reaper_waitq);
2380
2381         spin_lock_init(&ksocknal_data.ksnd_connd_lock);
2382         INIT_LIST_HEAD(&ksocknal_data.ksnd_connd_connreqs);
2383         INIT_LIST_HEAD(&ksocknal_data.ksnd_connd_routes);
2384         init_waitqueue_head(&ksocknal_data.ksnd_connd_waitq);
2385
2386         spin_lock_init(&ksocknal_data.ksnd_tx_lock);
2387         INIT_LIST_HEAD(&ksocknal_data.ksnd_idle_noop_txs);
2388
2389         /* NB memset above zeros whole of ksocknal_data */
2390
2391         /* flag lists/ptrs/locks initialised */
2392         ksocknal_data.ksnd_init = SOCKNAL_INIT_DATA;
2393         try_module_get(THIS_MODULE);
2394
2395         ksocknal_data.ksnd_sched_info = cfs_percpt_alloc(lnet_cpt_table(),
2396                                                          sizeof(*info));
2397         if (ksocknal_data.ksnd_sched_info == NULL)
2398                 goto failed;
2399
2400         cfs_percpt_for_each(info, i, ksocknal_data.ksnd_sched_info) {
2401                 ksock_sched_t   *sched;
2402                 int             nthrs;
2403
2404                 nthrs = cfs_cpt_weight(lnet_cpt_table(), i);
2405                 if (*ksocknal_tunables.ksnd_nscheds > 0) {
2406                         nthrs = min(nthrs, *ksocknal_tunables.ksnd_nscheds);
2407                 } else {
2408                         /* max to half of CPUs, assume another half should be
2409                          * reserved for upper layer modules */
2410                         nthrs = min(max(SOCKNAL_NSCHEDS, nthrs >> 1), nthrs);
2411                 }
2412
2413                 info->ksi_nthreads_max = nthrs;
2414                 info->ksi_cpt = i;
2415
2416                 LIBCFS_CPT_ALLOC(info->ksi_scheds, lnet_cpt_table(), i,
2417                                  info->ksi_nthreads_max * sizeof(*sched));
2418                 if (info->ksi_scheds == NULL)
2419                         goto failed;
2420
2421                 for (; nthrs > 0; nthrs--) {
2422                         sched = &info->ksi_scheds[nthrs - 1];
2423
2424                         sched->kss_info = info;
2425                         spin_lock_init(&sched->kss_lock);
2426                         INIT_LIST_HEAD(&sched->kss_rx_conns);
2427                         INIT_LIST_HEAD(&sched->kss_tx_conns);
2428                         INIT_LIST_HEAD(&sched->kss_zombie_noop_txs);
2429                         init_waitqueue_head(&sched->kss_waitq);
2430                 }
2431         }
2432
2433         ksocknal_data.ksnd_connd_starting        = 0;
2434         ksocknal_data.ksnd_connd_failed_stamp     = 0;
2435         ksocknal_data.ksnd_connd_starting_stamp   = get_seconds();
2436         /* must have at least 2 connds to remain responsive to accepts while
2437          * connecting */
2438         if (*ksocknal_tunables.ksnd_nconnds < SOCKNAL_CONND_RESV + 1)
2439                 *ksocknal_tunables.ksnd_nconnds = SOCKNAL_CONND_RESV + 1;
2440
2441         if (*ksocknal_tunables.ksnd_nconnds_max <
2442             *ksocknal_tunables.ksnd_nconnds) {
2443                 ksocknal_tunables.ksnd_nconnds_max =
2444                         ksocknal_tunables.ksnd_nconnds;
2445         }
2446
2447         for (i = 0; i < *ksocknal_tunables.ksnd_nconnds; i++) {
2448                 char name[16];
2449                 spin_lock_bh(&ksocknal_data.ksnd_connd_lock);
2450                 ksocknal_data.ksnd_connd_starting++;
2451                 spin_unlock_bh(&ksocknal_data.ksnd_connd_lock);
2452
2453
2454                 snprintf(name, sizeof(name), "socknal_cd%02d", i);
2455                 rc = ksocknal_thread_start(ksocknal_connd,
2456                                            (void *)((ulong_ptr_t)i), name);
2457                 if (rc != 0) {
2458                         spin_lock_bh(&ksocknal_data.ksnd_connd_lock);
2459                         ksocknal_data.ksnd_connd_starting--;
2460                         spin_unlock_bh(&ksocknal_data.ksnd_connd_lock);
2461                         CERROR("Can't spawn socknal connd: %d\n", rc);
2462                         goto failed;
2463                 }
2464         }
2465
2466         rc = ksocknal_thread_start(ksocknal_reaper, NULL, "socknal_reaper");
2467         if (rc != 0) {
2468                 CERROR("Can't spawn socknal reaper: %d\n", rc);
2469                 goto failed;
2470         }
2471
2472         /* flag everything initialised */
2473         ksocknal_data.ksnd_init = SOCKNAL_INIT_ALL;
2474
2475         return 0;
2476
2477  failed:
2478         ksocknal_base_shutdown();
2479         return -ENETDOWN;
2480 }
2481
2482 static void
2483 ksocknal_debug_peerhash(lnet_ni_t *ni)
2484 {
2485         ksock_peer_t    *peer = NULL;
2486         struct list_head        *tmp;
2487         int             i;
2488
2489         read_lock(&ksocknal_data.ksnd_global_lock);
2490
2491         for (i = 0; i < ksocknal_data.ksnd_peer_hash_size; i++) {
2492                 list_for_each(tmp, &ksocknal_data.ksnd_peers[i]) {
2493                         peer = list_entry(tmp, ksock_peer_t, ksnp_list);
2494
2495                         if (peer->ksnp_ni == ni)
2496                                 break;
2497
2498                         peer = NULL;
2499                 }
2500         }
2501
2502         if (peer != NULL) {
2503                 ksock_route_t *route;
2504                 ksock_conn_t  *conn;
2505
2506                 CWARN("Active peer on shutdown: %s, ref %d, scnt %d, closing %d, accepting %d, err %d, zcookie %llu, txq %d, zc_req %d\n",
2507                       libcfs_id2str(peer->ksnp_id),
2508                       atomic_read(&peer->ksnp_refcount),
2509                       peer->ksnp_sharecount, peer->ksnp_closing,
2510                       peer->ksnp_accepting, peer->ksnp_error,
2511                       peer->ksnp_zc_next_cookie,
2512                       !list_empty(&peer->ksnp_tx_queue),
2513                       !list_empty(&peer->ksnp_zc_req_list));
2514
2515                 list_for_each(tmp, &peer->ksnp_routes) {
2516                         route = list_entry(tmp, ksock_route_t, ksnr_list);
2517                         CWARN("Route: ref %d, schd %d, conn %d, cnted %d, del %d\n",
2518                               atomic_read(&route->ksnr_refcount),
2519                               route->ksnr_scheduled, route->ksnr_connecting,
2520                               route->ksnr_connected, route->ksnr_deleted);
2521                 }
2522
2523                 list_for_each(tmp, &peer->ksnp_conns) {
2524                         conn = list_entry(tmp, ksock_conn_t, ksnc_list);
2525                         CWARN("Conn: ref %d, sref %d, t %d, c %d\n",
2526                                atomic_read(&conn->ksnc_conn_refcount),
2527                                atomic_read(&conn->ksnc_sock_refcount),
2528                                conn->ksnc_type, conn->ksnc_closing);
2529                 }
2530         }
2531
2532         read_unlock(&ksocknal_data.ksnd_global_lock);
2533         return;
2534 }
2535
2536 void
2537 ksocknal_shutdown(lnet_ni_t *ni)
2538 {
2539         ksock_net_t      *net = ni->ni_data;
2540         int            i;
2541         lnet_process_id_t anyid = {0};
2542
2543         anyid.nid =  LNET_NID_ANY;
2544         anyid.pid =  LNET_PID_ANY;
2545
2546         LASSERT(ksocknal_data.ksnd_init == SOCKNAL_INIT_ALL);
2547         LASSERT(ksocknal_data.ksnd_nnets > 0);
2548
2549         spin_lock_bh(&net->ksnn_lock);
2550         net->ksnn_shutdown = 1;          /* prevent new peers */
2551         spin_unlock_bh(&net->ksnn_lock);
2552
2553         /* Delete all peers */
2554         ksocknal_del_peer(ni, anyid, 0);
2555
2556         /* Wait for all peer state to clean up */
2557         i = 2;
2558         spin_lock_bh(&net->ksnn_lock);
2559         while (net->ksnn_npeers != 0) {
2560                 spin_unlock_bh(&net->ksnn_lock);
2561
2562                 i++;
2563                 CDEBUG(((i & (-i)) == i) ? D_WARNING : D_NET, /* power of 2? */
2564                        "waiting for %d peers to disconnect\n",
2565                        net->ksnn_npeers);
2566                 set_current_state(TASK_UNINTERRUPTIBLE);
2567                 schedule_timeout(cfs_time_seconds(1));
2568
2569                 ksocknal_debug_peerhash(ni);
2570
2571                 spin_lock_bh(&net->ksnn_lock);
2572         }
2573         spin_unlock_bh(&net->ksnn_lock);
2574
2575         for (i = 0; i < net->ksnn_ninterfaces; i++) {
2576                 LASSERT(net->ksnn_interfaces[i].ksni_npeers == 0);
2577                 LASSERT(net->ksnn_interfaces[i].ksni_nroutes == 0);
2578         }
2579
2580         list_del(&net->ksnn_list);
2581         LIBCFS_FREE(net, sizeof(*net));
2582
2583         ksocknal_data.ksnd_nnets--;
2584         if (ksocknal_data.ksnd_nnets == 0)
2585                 ksocknal_base_shutdown();
2586 }
2587
2588 static int
2589 ksocknal_enumerate_interfaces(ksock_net_t *net)
2590 {
2591         char      **names;
2592         int      i;
2593         int      j;
2594         int      rc;
2595         int      n;
2596
2597         n = libcfs_ipif_enumerate(&names);
2598         if (n <= 0) {
2599                 CERROR("Can't enumerate interfaces: %d\n", n);
2600                 return n;
2601         }
2602
2603         for (i = j = 0; i < n; i++) {
2604                 int     up;
2605                 __u32      ip;
2606                 __u32      mask;
2607
2608                 if (!strcmp(names[i], "lo")) /* skip the loopback IF */
2609                         continue;
2610
2611                 rc = libcfs_ipif_query(names[i], &up, &ip, &mask);
2612                 if (rc != 0) {
2613                         CWARN("Can't get interface %s info: %d\n",
2614                               names[i], rc);
2615                         continue;
2616                 }
2617
2618                 if (!up) {
2619                         CWARN("Ignoring interface %s (down)\n",
2620                               names[i]);
2621                         continue;
2622                 }
2623
2624                 if (j == LNET_MAX_INTERFACES) {
2625                         CWARN("Ignoring interface %s (too many interfaces)\n",
2626                               names[i]);
2627                         continue;
2628                 }
2629
2630                 net->ksnn_interfaces[j].ksni_ipaddr = ip;
2631                 net->ksnn_interfaces[j].ksni_netmask = mask;
2632                 strncpy(&net->ksnn_interfaces[j].ksni_name[0],
2633                         names[i], IFNAMSIZ);
2634                 j++;
2635         }
2636
2637         libcfs_ipif_free_enumeration(names, n);
2638
2639         if (j == 0)
2640                 CERROR("Can't find any usable interfaces\n");
2641
2642         return j;
2643 }
2644
2645 static int
2646 ksocknal_search_new_ipif(ksock_net_t *net)
2647 {
2648         int     new_ipif = 0;
2649         int     i;
2650
2651         for (i = 0; i < net->ksnn_ninterfaces; i++) {
2652                 char            *ifnam = &net->ksnn_interfaces[i].ksni_name[0];
2653                 char            *colon = strchr(ifnam, ':');
2654                 int             found  = 0;
2655                 ksock_net_t     *tmp;
2656                 int             j;
2657
2658                 if (colon != NULL) /* ignore alias device */
2659                         *colon = 0;
2660
2661                 list_for_each_entry(tmp, &ksocknal_data.ksnd_nets,
2662                                         ksnn_list) {
2663                         for (j = 0; !found && j < tmp->ksnn_ninterfaces; j++) {
2664                                 char *ifnam2 =
2665                                         &tmp->ksnn_interfaces[j].ksni_name[0];
2666                                 char *colon2 = strchr(ifnam2, ':');
2667
2668                                 if (colon2 != NULL)
2669                                         *colon2 = 0;
2670
2671                                 found = strcmp(ifnam, ifnam2) == 0;
2672                                 if (colon2 != NULL)
2673                                         *colon2 = ':';
2674                         }
2675                         if (found)
2676                                 break;
2677                 }
2678
2679                 new_ipif += !found;
2680                 if (colon != NULL)
2681                         *colon = ':';
2682         }
2683
2684         return new_ipif;
2685 }
2686
2687 static int
2688 ksocknal_start_schedulers(struct ksock_sched_info *info)
2689 {
2690         int     nthrs;
2691         int     rc = 0;
2692         int     i;
2693
2694         if (info->ksi_nthreads == 0) {
2695                 if (*ksocknal_tunables.ksnd_nscheds > 0) {
2696                         nthrs = info->ksi_nthreads_max;
2697                 } else {
2698                         nthrs = cfs_cpt_weight(lnet_cpt_table(),
2699                                                info->ksi_cpt);
2700                         nthrs = min(max(SOCKNAL_NSCHEDS, nthrs >> 1), nthrs);
2701                         nthrs = min(SOCKNAL_NSCHEDS_HIGH, nthrs);
2702                 }
2703                 nthrs = min(nthrs, info->ksi_nthreads_max);
2704         } else {
2705                 LASSERT(info->ksi_nthreads <= info->ksi_nthreads_max);
2706                 /* increase two threads if there is new interface */
2707                 nthrs = min(2, info->ksi_nthreads_max - info->ksi_nthreads);
2708         }
2709
2710         for (i = 0; i < nthrs; i++) {
2711                 long            id;
2712                 char            name[20];
2713                 ksock_sched_t   *sched;
2714                 id = KSOCK_THREAD_ID(info->ksi_cpt, info->ksi_nthreads + i);
2715                 sched = &info->ksi_scheds[KSOCK_THREAD_SID(id)];
2716                 snprintf(name, sizeof(name), "socknal_sd%02d_%02d",
2717                          info->ksi_cpt, (int)(sched - &info->ksi_scheds[0]));
2718
2719                 rc = ksocknal_thread_start(ksocknal_scheduler,
2720                                            (void *)id, name);
2721                 if (rc == 0)
2722                         continue;
2723
2724                 CERROR("Can't spawn thread %d for scheduler[%d]: %d\n",
2725                        info->ksi_cpt, info->ksi_nthreads + i, rc);
2726                 break;
2727         }
2728
2729         info->ksi_nthreads += i;
2730         return rc;
2731 }
2732
2733 static int
2734 ksocknal_net_start_threads(ksock_net_t *net, __u32 *cpts, int ncpts)
2735 {
2736         int     newif = ksocknal_search_new_ipif(net);
2737         int     rc;
2738         int     i;
2739
2740         LASSERT(ncpts > 0 && ncpts <= cfs_cpt_number(lnet_cpt_table()));
2741
2742         for (i = 0; i < ncpts; i++) {
2743                 struct ksock_sched_info *info;
2744                 int cpt = (cpts == NULL) ? i : cpts[i];
2745
2746                 LASSERT(cpt < cfs_cpt_number(lnet_cpt_table()));
2747                 info = ksocknal_data.ksnd_sched_info[cpt];
2748
2749                 if (!newif && info->ksi_nthreads > 0)
2750                         continue;
2751
2752                 rc = ksocknal_start_schedulers(info);
2753                 if (rc != 0)
2754                         return rc;
2755         }
2756         return 0;
2757 }
2758
2759 int
2760 ksocknal_startup(lnet_ni_t *ni)
2761 {
2762         ksock_net_t  *net;
2763         int        rc;
2764         int        i;
2765
2766         LASSERT(ni->ni_lnd == &the_ksocklnd);
2767
2768         if (ksocknal_data.ksnd_init == SOCKNAL_INIT_NOTHING) {
2769                 rc = ksocknal_base_startup();
2770                 if (rc != 0)
2771                         return rc;
2772         }
2773
2774         LIBCFS_ALLOC(net, sizeof(*net));
2775         if (net == NULL)
2776                 goto fail_0;
2777
2778         spin_lock_init(&net->ksnn_lock);
2779         net->ksnn_incarnation = ksocknal_new_incarnation();
2780         ni->ni_data = net;
2781         ni->ni_peertimeout    = *ksocknal_tunables.ksnd_peertimeout;
2782         ni->ni_maxtxcredits   = *ksocknal_tunables.ksnd_credits;
2783         ni->ni_peertxcredits  = *ksocknal_tunables.ksnd_peertxcredits;
2784         ni->ni_peerrtrcredits = *ksocknal_tunables.ksnd_peerrtrcredits;
2785
2786         if (ni->ni_interfaces[0] == NULL) {
2787                 rc = ksocknal_enumerate_interfaces(net);
2788                 if (rc <= 0)
2789                         goto fail_1;
2790
2791                 net->ksnn_ninterfaces = 1;
2792         } else {
2793                 for (i = 0; i < LNET_MAX_INTERFACES; i++) {
2794                         int    up;
2795
2796                         if (ni->ni_interfaces[i] == NULL)
2797                                 break;
2798
2799                         rc = libcfs_ipif_query(
2800                                 ni->ni_interfaces[i], &up,
2801                                 &net->ksnn_interfaces[i].ksni_ipaddr,
2802                                 &net->ksnn_interfaces[i].ksni_netmask);
2803
2804                         if (rc != 0) {
2805                                 CERROR("Can't get interface %s info: %d\n",
2806                                        ni->ni_interfaces[i], rc);
2807                                 goto fail_1;
2808                         }
2809
2810                         if (!up) {
2811                                 CERROR("Interface %s is down\n",
2812                                        ni->ni_interfaces[i]);
2813                                 goto fail_1;
2814                         }
2815
2816                         strncpy(&net->ksnn_interfaces[i].ksni_name[0],
2817                                 ni->ni_interfaces[i], IFNAMSIZ);
2818                 }
2819                 net->ksnn_ninterfaces = i;
2820         }
2821
2822         /* call it before add it to ksocknal_data.ksnd_nets */
2823         rc = ksocknal_net_start_threads(net, ni->ni_cpts, ni->ni_ncpts);
2824         if (rc != 0)
2825                 goto fail_1;
2826
2827         ni->ni_nid = LNET_MKNID(LNET_NIDNET(ni->ni_nid),
2828                                 net->ksnn_interfaces[0].ksni_ipaddr);
2829         list_add(&net->ksnn_list, &ksocknal_data.ksnd_nets);
2830
2831         ksocknal_data.ksnd_nnets++;
2832
2833         return 0;
2834
2835  fail_1:
2836         LIBCFS_FREE(net, sizeof(*net));
2837  fail_0:
2838         if (ksocknal_data.ksnd_nnets == 0)
2839                 ksocknal_base_shutdown();
2840
2841         return -ENETDOWN;
2842 }
2843
2844
2845 static void __exit
2846 ksocknal_module_fini(void)
2847 {
2848         lnet_unregister_lnd(&the_ksocklnd);
2849 }
2850
2851 static int __init
2852 ksocknal_module_init(void)
2853 {
2854         int    rc;
2855
2856         /* check ksnr_connected/connecting field large enough */
2857         CLASSERT(SOCKLND_CONN_NTYPES <= 4);
2858         CLASSERT(SOCKLND_CONN_ACK == SOCKLND_CONN_BULK_IN);
2859
2860         /* initialize the_ksocklnd */
2861         the_ksocklnd.lnd_type     = SOCKLND;
2862         the_ksocklnd.lnd_startup  = ksocknal_startup;
2863         the_ksocklnd.lnd_shutdown = ksocknal_shutdown;
2864         the_ksocklnd.lnd_ctl      = ksocknal_ctl;
2865         the_ksocklnd.lnd_send     = ksocknal_send;
2866         the_ksocklnd.lnd_recv     = ksocknal_recv;
2867         the_ksocklnd.lnd_notify   = ksocknal_notify;
2868         the_ksocklnd.lnd_query    = ksocknal_query;
2869         the_ksocklnd.lnd_accept   = ksocknal_accept;
2870
2871         rc = ksocknal_tunables_init();
2872         if (rc != 0)
2873                 return rc;
2874
2875         lnet_register_lnd(&the_ksocklnd);
2876
2877         return 0;
2878 }
2879
2880 MODULE_AUTHOR("Sun Microsystems, Inc. <http://www.lustre.org/>");
2881 MODULE_DESCRIPTION("Kernel TCP Socket LND v3.0.0");
2882 MODULE_LICENSE("GPL");
2883 MODULE_VERSION("3.0.0");
2884
2885 module_init(ksocknal_module_init);
2886 module_exit(ksocknal_module_fini);