Upgrade to 4.4.50-rt62
[kvmfornfv.git] / kernel / drivers / infiniband / core / multicast.c
1 /*
2  * Copyright (c) 2006 Intel Corporation.  All rights reserved.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and/or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  */
32
33 #include <linux/completion.h>
34 #include <linux/dma-mapping.h>
35 #include <linux/err.h>
36 #include <linux/interrupt.h>
37 #include <linux/export.h>
38 #include <linux/slab.h>
39 #include <linux/bitops.h>
40 #include <linux/random.h>
41
42 #include <rdma/ib_cache.h>
43 #include "sa.h"
44
45 static void mcast_add_one(struct ib_device *device);
46 static void mcast_remove_one(struct ib_device *device, void *client_data);
47
48 static struct ib_client mcast_client = {
49         .name   = "ib_multicast",
50         .add    = mcast_add_one,
51         .remove = mcast_remove_one
52 };
53
54 static struct ib_sa_client      sa_client;
55 static struct workqueue_struct  *mcast_wq;
56 static union ib_gid mgid0;
57
58 struct mcast_device;
59
60 struct mcast_port {
61         struct mcast_device     *dev;
62         spinlock_t              lock;
63         struct rb_root          table;
64         atomic_t                refcount;
65         struct completion       comp;
66         u8                      port_num;
67 };
68
69 struct mcast_device {
70         struct ib_device        *device;
71         struct ib_event_handler event_handler;
72         int                     start_port;
73         int                     end_port;
74         struct mcast_port       port[0];
75 };
76
77 enum mcast_state {
78         MCAST_JOINING,
79         MCAST_MEMBER,
80         MCAST_ERROR,
81 };
82
83 enum mcast_group_state {
84         MCAST_IDLE,
85         MCAST_BUSY,
86         MCAST_GROUP_ERROR,
87         MCAST_PKEY_EVENT
88 };
89
90 enum {
91         MCAST_INVALID_PKEY_INDEX = 0xFFFF
92 };
93
94 struct mcast_member;
95
96 struct mcast_group {
97         struct ib_sa_mcmember_rec rec;
98         struct rb_node          node;
99         struct mcast_port       *port;
100         spinlock_t              lock;
101         struct work_struct      work;
102         struct list_head        pending_list;
103         struct list_head        active_list;
104         struct mcast_member     *last_join;
105         int                     members[3];
106         atomic_t                refcount;
107         enum mcast_group_state  state;
108         struct ib_sa_query      *query;
109         u16                     pkey_index;
110         u8                      leave_state;
111         int                     retries;
112 };
113
114 struct mcast_member {
115         struct ib_sa_multicast  multicast;
116         struct ib_sa_client     *client;
117         struct mcast_group      *group;
118         struct list_head        list;
119         enum mcast_state        state;
120         atomic_t                refcount;
121         struct completion       comp;
122 };
123
124 static void join_handler(int status, struct ib_sa_mcmember_rec *rec,
125                          void *context);
126 static void leave_handler(int status, struct ib_sa_mcmember_rec *rec,
127                           void *context);
128
129 static struct mcast_group *mcast_find(struct mcast_port *port,
130                                       union ib_gid *mgid)
131 {
132         struct rb_node *node = port->table.rb_node;
133         struct mcast_group *group;
134         int ret;
135
136         while (node) {
137                 group = rb_entry(node, struct mcast_group, node);
138                 ret = memcmp(mgid->raw, group->rec.mgid.raw, sizeof *mgid);
139                 if (!ret)
140                         return group;
141
142                 if (ret < 0)
143                         node = node->rb_left;
144                 else
145                         node = node->rb_right;
146         }
147         return NULL;
148 }
149
150 static struct mcast_group *mcast_insert(struct mcast_port *port,
151                                         struct mcast_group *group,
152                                         int allow_duplicates)
153 {
154         struct rb_node **link = &port->table.rb_node;
155         struct rb_node *parent = NULL;
156         struct mcast_group *cur_group;
157         int ret;
158
159         while (*link) {
160                 parent = *link;
161                 cur_group = rb_entry(parent, struct mcast_group, node);
162
163                 ret = memcmp(group->rec.mgid.raw, cur_group->rec.mgid.raw,
164                              sizeof group->rec.mgid);
165                 if (ret < 0)
166                         link = &(*link)->rb_left;
167                 else if (ret > 0)
168                         link = &(*link)->rb_right;
169                 else if (allow_duplicates)
170                         link = &(*link)->rb_left;
171                 else
172                         return cur_group;
173         }
174         rb_link_node(&group->node, parent, link);
175         rb_insert_color(&group->node, &port->table);
176         return NULL;
177 }
178
179 static void deref_port(struct mcast_port *port)
180 {
181         if (atomic_dec_and_test(&port->refcount))
182                 complete(&port->comp);
183 }
184
185 static void release_group(struct mcast_group *group)
186 {
187         struct mcast_port *port = group->port;
188         unsigned long flags;
189
190         spin_lock_irqsave(&port->lock, flags);
191         if (atomic_dec_and_test(&group->refcount)) {
192                 rb_erase(&group->node, &port->table);
193                 spin_unlock_irqrestore(&port->lock, flags);
194                 kfree(group);
195                 deref_port(port);
196         } else
197                 spin_unlock_irqrestore(&port->lock, flags);
198 }
199
200 static void deref_member(struct mcast_member *member)
201 {
202         if (atomic_dec_and_test(&member->refcount))
203                 complete(&member->comp);
204 }
205
206 static void queue_join(struct mcast_member *member)
207 {
208         struct mcast_group *group = member->group;
209         unsigned long flags;
210
211         spin_lock_irqsave(&group->lock, flags);
212         list_add_tail(&member->list, &group->pending_list);
213         if (group->state == MCAST_IDLE) {
214                 group->state = MCAST_BUSY;
215                 atomic_inc(&group->refcount);
216                 queue_work(mcast_wq, &group->work);
217         }
218         spin_unlock_irqrestore(&group->lock, flags);
219 }
220
221 /*
222  * A multicast group has three types of members: full member, non member, and
223  * send only member.  We need to keep track of the number of members of each
224  * type based on their join state.  Adjust the number of members the belong to
225  * the specified join states.
226  */
227 static void adjust_membership(struct mcast_group *group, u8 join_state, int inc)
228 {
229         int i;
230
231         for (i = 0; i < 3; i++, join_state >>= 1)
232                 if (join_state & 0x1)
233                         group->members[i] += inc;
234 }
235
236 /*
237  * If a multicast group has zero members left for a particular join state, but
238  * the group is still a member with the SA, we need to leave that join state.
239  * Determine which join states we still belong to, but that do not have any
240  * active members.
241  */
242 static u8 get_leave_state(struct mcast_group *group)
243 {
244         u8 leave_state = 0;
245         int i;
246
247         for (i = 0; i < 3; i++)
248                 if (!group->members[i])
249                         leave_state |= (0x1 << i);
250
251         return leave_state & group->rec.join_state;
252 }
253
254 static int check_selector(ib_sa_comp_mask comp_mask,
255                           ib_sa_comp_mask selector_mask,
256                           ib_sa_comp_mask value_mask,
257                           u8 selector, u8 src_value, u8 dst_value)
258 {
259         int err;
260
261         if (!(comp_mask & selector_mask) || !(comp_mask & value_mask))
262                 return 0;
263
264         switch (selector) {
265         case IB_SA_GT:
266                 err = (src_value <= dst_value);
267                 break;
268         case IB_SA_LT:
269                 err = (src_value >= dst_value);
270                 break;
271         case IB_SA_EQ:
272                 err = (src_value != dst_value);
273                 break;
274         default:
275                 err = 0;
276                 break;
277         }
278
279         return err;
280 }
281
282 static int cmp_rec(struct ib_sa_mcmember_rec *src,
283                    struct ib_sa_mcmember_rec *dst, ib_sa_comp_mask comp_mask)
284 {
285         /* MGID must already match */
286
287         if (comp_mask & IB_SA_MCMEMBER_REC_PORT_GID &&
288             memcmp(&src->port_gid, &dst->port_gid, sizeof src->port_gid))
289                 return -EINVAL;
290         if (comp_mask & IB_SA_MCMEMBER_REC_QKEY && src->qkey != dst->qkey)
291                 return -EINVAL;
292         if (comp_mask & IB_SA_MCMEMBER_REC_MLID && src->mlid != dst->mlid)
293                 return -EINVAL;
294         if (check_selector(comp_mask, IB_SA_MCMEMBER_REC_MTU_SELECTOR,
295                            IB_SA_MCMEMBER_REC_MTU, dst->mtu_selector,
296                            src->mtu, dst->mtu))
297                 return -EINVAL;
298         if (comp_mask & IB_SA_MCMEMBER_REC_TRAFFIC_CLASS &&
299             src->traffic_class != dst->traffic_class)
300                 return -EINVAL;
301         if (comp_mask & IB_SA_MCMEMBER_REC_PKEY && src->pkey != dst->pkey)
302                 return -EINVAL;
303         if (check_selector(comp_mask, IB_SA_MCMEMBER_REC_RATE_SELECTOR,
304                            IB_SA_MCMEMBER_REC_RATE, dst->rate_selector,
305                            src->rate, dst->rate))
306                 return -EINVAL;
307         if (check_selector(comp_mask,
308                            IB_SA_MCMEMBER_REC_PACKET_LIFE_TIME_SELECTOR,
309                            IB_SA_MCMEMBER_REC_PACKET_LIFE_TIME,
310                            dst->packet_life_time_selector,
311                            src->packet_life_time, dst->packet_life_time))
312                 return -EINVAL;
313         if (comp_mask & IB_SA_MCMEMBER_REC_SL && src->sl != dst->sl)
314                 return -EINVAL;
315         if (comp_mask & IB_SA_MCMEMBER_REC_FLOW_LABEL &&
316             src->flow_label != dst->flow_label)
317                 return -EINVAL;
318         if (comp_mask & IB_SA_MCMEMBER_REC_HOP_LIMIT &&
319             src->hop_limit != dst->hop_limit)
320                 return -EINVAL;
321         if (comp_mask & IB_SA_MCMEMBER_REC_SCOPE && src->scope != dst->scope)
322                 return -EINVAL;
323
324         /* join_state checked separately, proxy_join ignored */
325
326         return 0;
327 }
328
329 static int send_join(struct mcast_group *group, struct mcast_member *member)
330 {
331         struct mcast_port *port = group->port;
332         int ret;
333
334         group->last_join = member;
335         ret = ib_sa_mcmember_rec_query(&sa_client, port->dev->device,
336                                        port->port_num, IB_MGMT_METHOD_SET,
337                                        &member->multicast.rec,
338                                        member->multicast.comp_mask,
339                                        3000, GFP_KERNEL, join_handler, group,
340                                        &group->query);
341         return (ret > 0) ? 0 : ret;
342 }
343
344 static int send_leave(struct mcast_group *group, u8 leave_state)
345 {
346         struct mcast_port *port = group->port;
347         struct ib_sa_mcmember_rec rec;
348         int ret;
349
350         rec = group->rec;
351         rec.join_state = leave_state;
352         group->leave_state = leave_state;
353
354         ret = ib_sa_mcmember_rec_query(&sa_client, port->dev->device,
355                                        port->port_num, IB_SA_METHOD_DELETE, &rec,
356                                        IB_SA_MCMEMBER_REC_MGID     |
357                                        IB_SA_MCMEMBER_REC_PORT_GID |
358                                        IB_SA_MCMEMBER_REC_JOIN_STATE,
359                                        3000, GFP_KERNEL, leave_handler,
360                                        group, &group->query);
361         return (ret > 0) ? 0 : ret;
362 }
363
364 static void join_group(struct mcast_group *group, struct mcast_member *member,
365                        u8 join_state)
366 {
367         member->state = MCAST_MEMBER;
368         adjust_membership(group, join_state, 1);
369         group->rec.join_state |= join_state;
370         member->multicast.rec = group->rec;
371         member->multicast.rec.join_state = join_state;
372         list_move(&member->list, &group->active_list);
373 }
374
375 static int fail_join(struct mcast_group *group, struct mcast_member *member,
376                      int status)
377 {
378         spin_lock_irq(&group->lock);
379         list_del_init(&member->list);
380         spin_unlock_irq(&group->lock);
381         return member->multicast.callback(status, &member->multicast);
382 }
383
384 static void process_group_error(struct mcast_group *group)
385 {
386         struct mcast_member *member;
387         int ret = 0;
388         u16 pkey_index;
389
390         if (group->state == MCAST_PKEY_EVENT)
391                 ret = ib_find_pkey(group->port->dev->device,
392                                    group->port->port_num,
393                                    be16_to_cpu(group->rec.pkey), &pkey_index);
394
395         spin_lock_irq(&group->lock);
396         if (group->state == MCAST_PKEY_EVENT && !ret &&
397             group->pkey_index == pkey_index)
398                 goto out;
399
400         while (!list_empty(&group->active_list)) {
401                 member = list_entry(group->active_list.next,
402                                     struct mcast_member, list);
403                 atomic_inc(&member->refcount);
404                 list_del_init(&member->list);
405                 adjust_membership(group, member->multicast.rec.join_state, -1);
406                 member->state = MCAST_ERROR;
407                 spin_unlock_irq(&group->lock);
408
409                 ret = member->multicast.callback(-ENETRESET,
410                                                  &member->multicast);
411                 deref_member(member);
412                 if (ret)
413                         ib_sa_free_multicast(&member->multicast);
414                 spin_lock_irq(&group->lock);
415         }
416
417         group->rec.join_state = 0;
418 out:
419         group->state = MCAST_BUSY;
420         spin_unlock_irq(&group->lock);
421 }
422
423 static void mcast_work_handler(struct work_struct *work)
424 {
425         struct mcast_group *group;
426         struct mcast_member *member;
427         struct ib_sa_multicast *multicast;
428         int status, ret;
429         u8 join_state;
430
431         group = container_of(work, typeof(*group), work);
432 retest:
433         spin_lock_irq(&group->lock);
434         while (!list_empty(&group->pending_list) ||
435                (group->state != MCAST_BUSY)) {
436
437                 if (group->state != MCAST_BUSY) {
438                         spin_unlock_irq(&group->lock);
439                         process_group_error(group);
440                         goto retest;
441                 }
442
443                 member = list_entry(group->pending_list.next,
444                                     struct mcast_member, list);
445                 multicast = &member->multicast;
446                 join_state = multicast->rec.join_state;
447                 atomic_inc(&member->refcount);
448
449                 if (join_state == (group->rec.join_state & join_state)) {
450                         status = cmp_rec(&group->rec, &multicast->rec,
451                                          multicast->comp_mask);
452                         if (!status)
453                                 join_group(group, member, join_state);
454                         else
455                                 list_del_init(&member->list);
456                         spin_unlock_irq(&group->lock);
457                         ret = multicast->callback(status, multicast);
458                 } else {
459                         spin_unlock_irq(&group->lock);
460                         status = send_join(group, member);
461                         if (!status) {
462                                 deref_member(member);
463                                 return;
464                         }
465                         ret = fail_join(group, member, status);
466                 }
467
468                 deref_member(member);
469                 if (ret)
470                         ib_sa_free_multicast(&member->multicast);
471                 spin_lock_irq(&group->lock);
472         }
473
474         join_state = get_leave_state(group);
475         if (join_state) {
476                 group->rec.join_state &= ~join_state;
477                 spin_unlock_irq(&group->lock);
478                 if (send_leave(group, join_state))
479                         goto retest;
480         } else {
481                 group->state = MCAST_IDLE;
482                 spin_unlock_irq(&group->lock);
483                 release_group(group);
484         }
485 }
486
487 /*
488  * Fail a join request if it is still active - at the head of the pending queue.
489  */
490 static void process_join_error(struct mcast_group *group, int status)
491 {
492         struct mcast_member *member;
493         int ret;
494
495         spin_lock_irq(&group->lock);
496         member = list_entry(group->pending_list.next,
497                             struct mcast_member, list);
498         if (group->last_join == member) {
499                 atomic_inc(&member->refcount);
500                 list_del_init(&member->list);
501                 spin_unlock_irq(&group->lock);
502                 ret = member->multicast.callback(status, &member->multicast);
503                 deref_member(member);
504                 if (ret)
505                         ib_sa_free_multicast(&member->multicast);
506         } else
507                 spin_unlock_irq(&group->lock);
508 }
509
510 static void join_handler(int status, struct ib_sa_mcmember_rec *rec,
511                          void *context)
512 {
513         struct mcast_group *group = context;
514         u16 pkey_index = MCAST_INVALID_PKEY_INDEX;
515
516         if (status)
517                 process_join_error(group, status);
518         else {
519                 int mgids_changed, is_mgid0;
520
521                 if (ib_find_pkey(group->port->dev->device,
522                                  group->port->port_num, be16_to_cpu(rec->pkey),
523                                  &pkey_index))
524                         pkey_index = MCAST_INVALID_PKEY_INDEX;
525
526                 spin_lock_irq(&group->port->lock);
527                 if (group->state == MCAST_BUSY &&
528                     group->pkey_index == MCAST_INVALID_PKEY_INDEX)
529                         group->pkey_index = pkey_index;
530                 mgids_changed = memcmp(&rec->mgid, &group->rec.mgid,
531                                        sizeof(group->rec.mgid));
532                 group->rec = *rec;
533                 if (mgids_changed) {
534                         rb_erase(&group->node, &group->port->table);
535                         is_mgid0 = !memcmp(&mgid0, &group->rec.mgid,
536                                            sizeof(mgid0));
537                         mcast_insert(group->port, group, is_mgid0);
538                 }
539                 spin_unlock_irq(&group->port->lock);
540         }
541         mcast_work_handler(&group->work);
542 }
543
544 static void leave_handler(int status, struct ib_sa_mcmember_rec *rec,
545                           void *context)
546 {
547         struct mcast_group *group = context;
548
549         if (status && group->retries > 0 &&
550             !send_leave(group, group->leave_state))
551                 group->retries--;
552         else
553                 mcast_work_handler(&group->work);
554 }
555
556 static struct mcast_group *acquire_group(struct mcast_port *port,
557                                          union ib_gid *mgid, gfp_t gfp_mask)
558 {
559         struct mcast_group *group, *cur_group;
560         unsigned long flags;
561         int is_mgid0;
562
563         is_mgid0 = !memcmp(&mgid0, mgid, sizeof mgid0);
564         if (!is_mgid0) {
565                 spin_lock_irqsave(&port->lock, flags);
566                 group = mcast_find(port, mgid);
567                 if (group)
568                         goto found;
569                 spin_unlock_irqrestore(&port->lock, flags);
570         }
571
572         group = kzalloc(sizeof *group, gfp_mask);
573         if (!group)
574                 return NULL;
575
576         group->retries = 3;
577         group->port = port;
578         group->rec.mgid = *mgid;
579         group->pkey_index = MCAST_INVALID_PKEY_INDEX;
580         INIT_LIST_HEAD(&group->pending_list);
581         INIT_LIST_HEAD(&group->active_list);
582         INIT_WORK(&group->work, mcast_work_handler);
583         spin_lock_init(&group->lock);
584
585         spin_lock_irqsave(&port->lock, flags);
586         cur_group = mcast_insert(port, group, is_mgid0);
587         if (cur_group) {
588                 kfree(group);
589                 group = cur_group;
590         } else
591                 atomic_inc(&port->refcount);
592 found:
593         atomic_inc(&group->refcount);
594         spin_unlock_irqrestore(&port->lock, flags);
595         return group;
596 }
597
598 /*
599  * We serialize all join requests to a single group to make our lives much
600  * easier.  Otherwise, two users could try to join the same group
601  * simultaneously, with different configurations, one could leave while the
602  * join is in progress, etc., which makes locking around error recovery
603  * difficult.
604  */
605 struct ib_sa_multicast *
606 ib_sa_join_multicast(struct ib_sa_client *client,
607                      struct ib_device *device, u8 port_num,
608                      struct ib_sa_mcmember_rec *rec,
609                      ib_sa_comp_mask comp_mask, gfp_t gfp_mask,
610                      int (*callback)(int status,
611                                      struct ib_sa_multicast *multicast),
612                      void *context)
613 {
614         struct mcast_device *dev;
615         struct mcast_member *member;
616         struct ib_sa_multicast *multicast;
617         int ret;
618
619         dev = ib_get_client_data(device, &mcast_client);
620         if (!dev)
621                 return ERR_PTR(-ENODEV);
622
623         member = kmalloc(sizeof *member, gfp_mask);
624         if (!member)
625                 return ERR_PTR(-ENOMEM);
626
627         ib_sa_client_get(client);
628         member->client = client;
629         member->multicast.rec = *rec;
630         member->multicast.comp_mask = comp_mask;
631         member->multicast.callback = callback;
632         member->multicast.context = context;
633         init_completion(&member->comp);
634         atomic_set(&member->refcount, 1);
635         member->state = MCAST_JOINING;
636
637         member->group = acquire_group(&dev->port[port_num - dev->start_port],
638                                       &rec->mgid, gfp_mask);
639         if (!member->group) {
640                 ret = -ENOMEM;
641                 goto err;
642         }
643
644         /*
645          * The user will get the multicast structure in their callback.  They
646          * could then free the multicast structure before we can return from
647          * this routine.  So we save the pointer to return before queuing
648          * any callback.
649          */
650         multicast = &member->multicast;
651         queue_join(member);
652         return multicast;
653
654 err:
655         ib_sa_client_put(client);
656         kfree(member);
657         return ERR_PTR(ret);
658 }
659 EXPORT_SYMBOL(ib_sa_join_multicast);
660
661 void ib_sa_free_multicast(struct ib_sa_multicast *multicast)
662 {
663         struct mcast_member *member;
664         struct mcast_group *group;
665
666         member = container_of(multicast, struct mcast_member, multicast);
667         group = member->group;
668
669         spin_lock_irq(&group->lock);
670         if (member->state == MCAST_MEMBER)
671                 adjust_membership(group, multicast->rec.join_state, -1);
672
673         list_del_init(&member->list);
674
675         if (group->state == MCAST_IDLE) {
676                 group->state = MCAST_BUSY;
677                 spin_unlock_irq(&group->lock);
678                 /* Continue to hold reference on group until callback */
679                 queue_work(mcast_wq, &group->work);
680         } else {
681                 spin_unlock_irq(&group->lock);
682                 release_group(group);
683         }
684
685         deref_member(member);
686         wait_for_completion(&member->comp);
687         ib_sa_client_put(member->client);
688         kfree(member);
689 }
690 EXPORT_SYMBOL(ib_sa_free_multicast);
691
692 int ib_sa_get_mcmember_rec(struct ib_device *device, u8 port_num,
693                            union ib_gid *mgid, struct ib_sa_mcmember_rec *rec)
694 {
695         struct mcast_device *dev;
696         struct mcast_port *port;
697         struct mcast_group *group;
698         unsigned long flags;
699         int ret = 0;
700
701         dev = ib_get_client_data(device, &mcast_client);
702         if (!dev)
703                 return -ENODEV;
704
705         port = &dev->port[port_num - dev->start_port];
706         spin_lock_irqsave(&port->lock, flags);
707         group = mcast_find(port, mgid);
708         if (group)
709                 *rec = group->rec;
710         else
711                 ret = -EADDRNOTAVAIL;
712         spin_unlock_irqrestore(&port->lock, flags);
713
714         return ret;
715 }
716 EXPORT_SYMBOL(ib_sa_get_mcmember_rec);
717
718 int ib_init_ah_from_mcmember(struct ib_device *device, u8 port_num,
719                              struct ib_sa_mcmember_rec *rec,
720                              struct ib_ah_attr *ah_attr)
721 {
722         int ret;
723         u16 gid_index;
724         u8 p;
725
726         ret = ib_find_cached_gid(device, &rec->port_gid,
727                                  NULL, &p, &gid_index);
728         if (ret)
729                 return ret;
730
731         memset(ah_attr, 0, sizeof *ah_attr);
732         ah_attr->dlid = be16_to_cpu(rec->mlid);
733         ah_attr->sl = rec->sl;
734         ah_attr->port_num = port_num;
735         ah_attr->static_rate = rec->rate;
736
737         ah_attr->ah_flags = IB_AH_GRH;
738         ah_attr->grh.dgid = rec->mgid;
739
740         ah_attr->grh.sgid_index = (u8) gid_index;
741         ah_attr->grh.flow_label = be32_to_cpu(rec->flow_label);
742         ah_attr->grh.hop_limit = rec->hop_limit;
743         ah_attr->grh.traffic_class = rec->traffic_class;
744
745         return 0;
746 }
747 EXPORT_SYMBOL(ib_init_ah_from_mcmember);
748
749 static void mcast_groups_event(struct mcast_port *port,
750                                enum mcast_group_state state)
751 {
752         struct mcast_group *group;
753         struct rb_node *node;
754         unsigned long flags;
755
756         spin_lock_irqsave(&port->lock, flags);
757         for (node = rb_first(&port->table); node; node = rb_next(node)) {
758                 group = rb_entry(node, struct mcast_group, node);
759                 spin_lock(&group->lock);
760                 if (group->state == MCAST_IDLE) {
761                         atomic_inc(&group->refcount);
762                         queue_work(mcast_wq, &group->work);
763                 }
764                 if (group->state != MCAST_GROUP_ERROR)
765                         group->state = state;
766                 spin_unlock(&group->lock);
767         }
768         spin_unlock_irqrestore(&port->lock, flags);
769 }
770
771 static void mcast_event_handler(struct ib_event_handler *handler,
772                                 struct ib_event *event)
773 {
774         struct mcast_device *dev;
775         int index;
776
777         dev = container_of(handler, struct mcast_device, event_handler);
778         if (!rdma_cap_ib_mcast(dev->device, event->element.port_num))
779                 return;
780
781         index = event->element.port_num - dev->start_port;
782
783         switch (event->event) {
784         case IB_EVENT_PORT_ERR:
785         case IB_EVENT_LID_CHANGE:
786         case IB_EVENT_SM_CHANGE:
787         case IB_EVENT_CLIENT_REREGISTER:
788                 mcast_groups_event(&dev->port[index], MCAST_GROUP_ERROR);
789                 break;
790         case IB_EVENT_PKEY_CHANGE:
791                 mcast_groups_event(&dev->port[index], MCAST_PKEY_EVENT);
792                 break;
793         default:
794                 break;
795         }
796 }
797
798 static void mcast_add_one(struct ib_device *device)
799 {
800         struct mcast_device *dev;
801         struct mcast_port *port;
802         int i;
803         int count = 0;
804
805         dev = kmalloc(sizeof *dev + device->phys_port_cnt * sizeof *port,
806                       GFP_KERNEL);
807         if (!dev)
808                 return;
809
810         dev->start_port = rdma_start_port(device);
811         dev->end_port = rdma_end_port(device);
812
813         for (i = 0; i <= dev->end_port - dev->start_port; i++) {
814                 if (!rdma_cap_ib_mcast(device, dev->start_port + i))
815                         continue;
816                 port = &dev->port[i];
817                 port->dev = dev;
818                 port->port_num = dev->start_port + i;
819                 spin_lock_init(&port->lock);
820                 port->table = RB_ROOT;
821                 init_completion(&port->comp);
822                 atomic_set(&port->refcount, 1);
823                 ++count;
824         }
825
826         if (!count) {
827                 kfree(dev);
828                 return;
829         }
830
831         dev->device = device;
832         ib_set_client_data(device, &mcast_client, dev);
833
834         INIT_IB_EVENT_HANDLER(&dev->event_handler, device, mcast_event_handler);
835         ib_register_event_handler(&dev->event_handler);
836 }
837
838 static void mcast_remove_one(struct ib_device *device, void *client_data)
839 {
840         struct mcast_device *dev = client_data;
841         struct mcast_port *port;
842         int i;
843
844         if (!dev)
845                 return;
846
847         ib_unregister_event_handler(&dev->event_handler);
848         flush_workqueue(mcast_wq);
849
850         for (i = 0; i <= dev->end_port - dev->start_port; i++) {
851                 if (rdma_cap_ib_mcast(device, dev->start_port + i)) {
852                         port = &dev->port[i];
853                         deref_port(port);
854                         wait_for_completion(&port->comp);
855                 }
856         }
857
858         kfree(dev);
859 }
860
861 int mcast_init(void)
862 {
863         int ret;
864
865         mcast_wq = create_singlethread_workqueue("ib_mcast");
866         if (!mcast_wq)
867                 return -ENOMEM;
868
869         ib_sa_register_client(&sa_client);
870
871         ret = ib_register_client(&mcast_client);
872         if (ret)
873                 goto err;
874         return 0;
875
876 err:
877         ib_sa_unregister_client(&sa_client);
878         destroy_workqueue(mcast_wq);
879         return ret;
880 }
881
882 void mcast_cleanup(void)
883 {
884         ib_unregister_client(&mcast_client);
885         ib_sa_unregister_client(&sa_client);
886         destroy_workqueue(mcast_wq);
887 }