Add the rt linux 4.1.3-rt3 as base
[kvmfornfv.git] / kernel / net / mac80211 / mesh_pathtbl.c
1 /*
2  * Copyright (c) 2008, 2009 open80211s Ltd.
3  * Author:     Luis Carlos Cobo <luisca@cozybit.com>
4  *
5  * This program is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License version 2 as
7  * published by the Free Software Foundation.
8  */
9
10 #include <linux/etherdevice.h>
11 #include <linux/list.h>
12 #include <linux/random.h>
13 #include <linux/slab.h>
14 #include <linux/spinlock.h>
15 #include <linux/string.h>
16 #include <net/mac80211.h>
17 #include "wme.h"
18 #include "ieee80211_i.h"
19 #include "mesh.h"
20
21 /* There will be initially 2^INIT_PATHS_SIZE_ORDER buckets */
22 #define INIT_PATHS_SIZE_ORDER   2
23
24 /* Keep the mean chain length below this constant */
25 #define MEAN_CHAIN_LEN          2
26
27 static inline bool mpath_expired(struct mesh_path *mpath)
28 {
29         return (mpath->flags & MESH_PATH_ACTIVE) &&
30                time_after(jiffies, mpath->exp_time) &&
31                !(mpath->flags & MESH_PATH_FIXED);
32 }
33
34 struct mpath_node {
35         struct hlist_node list;
36         struct rcu_head rcu;
37         /* This indirection allows two different tables to point to the same
38          * mesh_path structure, useful when resizing
39          */
40         struct mesh_path *mpath;
41 };
42
43 static struct mesh_table __rcu *mesh_paths;
44 static struct mesh_table __rcu *mpp_paths; /* Store paths for MPP&MAP */
45
46 int mesh_paths_generation;
47 int mpp_paths_generation;
48
49 /* This lock will have the grow table function as writer and add / delete nodes
50  * as readers. RCU provides sufficient protection only when reading the table
51  * (i.e. doing lookups).  Adding or adding or removing nodes requires we take
52  * the read lock or we risk operating on an old table.  The write lock is only
53  * needed when modifying the number of buckets a table.
54  */
55 static DEFINE_RWLOCK(pathtbl_resize_lock);
56
57
58 static inline struct mesh_table *resize_dereference_mesh_paths(void)
59 {
60         return rcu_dereference_protected(mesh_paths,
61                 lockdep_is_held(&pathtbl_resize_lock));
62 }
63
64 static inline struct mesh_table *resize_dereference_mpp_paths(void)
65 {
66         return rcu_dereference_protected(mpp_paths,
67                 lockdep_is_held(&pathtbl_resize_lock));
68 }
69
70 /*
71  * CAREFUL -- "tbl" must not be an expression,
72  * in particular not an rcu_dereference(), since
73  * it's used twice. So it is illegal to do
74  *      for_each_mesh_entry(rcu_dereference(...), ...)
75  */
76 #define for_each_mesh_entry(tbl, node, i) \
77         for (i = 0; i <= tbl->hash_mask; i++) \
78                 hlist_for_each_entry_rcu(node, &tbl->hash_buckets[i], list)
79
80
81 static struct mesh_table *mesh_table_alloc(int size_order)
82 {
83         int i;
84         struct mesh_table *newtbl;
85
86         newtbl = kmalloc(sizeof(struct mesh_table), GFP_ATOMIC);
87         if (!newtbl)
88                 return NULL;
89
90         newtbl->hash_buckets = kzalloc(sizeof(struct hlist_head) *
91                         (1 << size_order), GFP_ATOMIC);
92
93         if (!newtbl->hash_buckets) {
94                 kfree(newtbl);
95                 return NULL;
96         }
97
98         newtbl->hashwlock = kmalloc(sizeof(spinlock_t) *
99                         (1 << size_order), GFP_ATOMIC);
100         if (!newtbl->hashwlock) {
101                 kfree(newtbl->hash_buckets);
102                 kfree(newtbl);
103                 return NULL;
104         }
105
106         newtbl->size_order = size_order;
107         newtbl->hash_mask = (1 << size_order) - 1;
108         atomic_set(&newtbl->entries,  0);
109         get_random_bytes(&newtbl->hash_rnd,
110                         sizeof(newtbl->hash_rnd));
111         for (i = 0; i <= newtbl->hash_mask; i++)
112                 spin_lock_init(&newtbl->hashwlock[i]);
113         spin_lock_init(&newtbl->gates_lock);
114
115         return newtbl;
116 }
117
118 static void __mesh_table_free(struct mesh_table *tbl)
119 {
120         kfree(tbl->hash_buckets);
121         kfree(tbl->hashwlock);
122         kfree(tbl);
123 }
124
125 static void mesh_table_free(struct mesh_table *tbl, bool free_leafs)
126 {
127         struct hlist_head *mesh_hash;
128         struct hlist_node *p, *q;
129         struct mpath_node *gate;
130         int i;
131
132         mesh_hash = tbl->hash_buckets;
133         for (i = 0; i <= tbl->hash_mask; i++) {
134                 spin_lock_bh(&tbl->hashwlock[i]);
135                 hlist_for_each_safe(p, q, &mesh_hash[i]) {
136                         tbl->free_node(p, free_leafs);
137                         atomic_dec(&tbl->entries);
138                 }
139                 spin_unlock_bh(&tbl->hashwlock[i]);
140         }
141         if (free_leafs) {
142                 spin_lock_bh(&tbl->gates_lock);
143                 hlist_for_each_entry_safe(gate, q,
144                                          tbl->known_gates, list) {
145                         hlist_del(&gate->list);
146                         kfree(gate);
147                 }
148                 kfree(tbl->known_gates);
149                 spin_unlock_bh(&tbl->gates_lock);
150         }
151
152         __mesh_table_free(tbl);
153 }
154
155 static int mesh_table_grow(struct mesh_table *oldtbl,
156                            struct mesh_table *newtbl)
157 {
158         struct hlist_head *oldhash;
159         struct hlist_node *p, *q;
160         int i;
161
162         if (atomic_read(&oldtbl->entries)
163                         < oldtbl->mean_chain_len * (oldtbl->hash_mask + 1))
164                 return -EAGAIN;
165
166         newtbl->free_node = oldtbl->free_node;
167         newtbl->mean_chain_len = oldtbl->mean_chain_len;
168         newtbl->copy_node = oldtbl->copy_node;
169         newtbl->known_gates = oldtbl->known_gates;
170         atomic_set(&newtbl->entries, atomic_read(&oldtbl->entries));
171
172         oldhash = oldtbl->hash_buckets;
173         for (i = 0; i <= oldtbl->hash_mask; i++)
174                 hlist_for_each(p, &oldhash[i])
175                         if (oldtbl->copy_node(p, newtbl) < 0)
176                                 goto errcopy;
177
178         return 0;
179
180 errcopy:
181         for (i = 0; i <= newtbl->hash_mask; i++) {
182                 hlist_for_each_safe(p, q, &newtbl->hash_buckets[i])
183                         oldtbl->free_node(p, 0);
184         }
185         return -ENOMEM;
186 }
187
188 static u32 mesh_table_hash(const u8 *addr, struct ieee80211_sub_if_data *sdata,
189                            struct mesh_table *tbl)
190 {
191         /* Use last four bytes of hw addr and interface index as hash index */
192         return jhash_2words(*(u32 *)(addr+2), sdata->dev->ifindex,
193                             tbl->hash_rnd) & tbl->hash_mask;
194 }
195
196
197 /**
198  *
199  * mesh_path_assign_nexthop - update mesh path next hop
200  *
201  * @mpath: mesh path to update
202  * @sta: next hop to assign
203  *
204  * Locking: mpath->state_lock must be held when calling this function
205  */
206 void mesh_path_assign_nexthop(struct mesh_path *mpath, struct sta_info *sta)
207 {
208         struct sk_buff *skb;
209         struct ieee80211_hdr *hdr;
210         unsigned long flags;
211
212         rcu_assign_pointer(mpath->next_hop, sta);
213
214         spin_lock_irqsave(&mpath->frame_queue.lock, flags);
215         skb_queue_walk(&mpath->frame_queue, skb) {
216                 hdr = (struct ieee80211_hdr *) skb->data;
217                 memcpy(hdr->addr1, sta->sta.addr, ETH_ALEN);
218                 memcpy(hdr->addr2, mpath->sdata->vif.addr, ETH_ALEN);
219                 ieee80211_mps_set_frame_flags(sta->sdata, sta, hdr);
220         }
221
222         spin_unlock_irqrestore(&mpath->frame_queue.lock, flags);
223 }
224
225 static void prepare_for_gate(struct sk_buff *skb, char *dst_addr,
226                              struct mesh_path *gate_mpath)
227 {
228         struct ieee80211_hdr *hdr;
229         struct ieee80211s_hdr *mshdr;
230         int mesh_hdrlen, hdrlen;
231         char *next_hop;
232
233         hdr = (struct ieee80211_hdr *) skb->data;
234         hdrlen = ieee80211_hdrlen(hdr->frame_control);
235         mshdr = (struct ieee80211s_hdr *) (skb->data + hdrlen);
236
237         if (!(mshdr->flags & MESH_FLAGS_AE)) {
238                 /* size of the fixed part of the mesh header */
239                 mesh_hdrlen = 6;
240
241                 /* make room for the two extended addresses */
242                 skb_push(skb, 2 * ETH_ALEN);
243                 memmove(skb->data, hdr, hdrlen + mesh_hdrlen);
244
245                 hdr = (struct ieee80211_hdr *) skb->data;
246
247                 /* we preserve the previous mesh header and only add
248                  * the new addreses */
249                 mshdr = (struct ieee80211s_hdr *) (skb->data + hdrlen);
250                 mshdr->flags = MESH_FLAGS_AE_A5_A6;
251                 memcpy(mshdr->eaddr1, hdr->addr3, ETH_ALEN);
252                 memcpy(mshdr->eaddr2, hdr->addr4, ETH_ALEN);
253         }
254
255         /* update next hop */
256         hdr = (struct ieee80211_hdr *) skb->data;
257         rcu_read_lock();
258         next_hop = rcu_dereference(gate_mpath->next_hop)->sta.addr;
259         memcpy(hdr->addr1, next_hop, ETH_ALEN);
260         rcu_read_unlock();
261         memcpy(hdr->addr2, gate_mpath->sdata->vif.addr, ETH_ALEN);
262         memcpy(hdr->addr3, dst_addr, ETH_ALEN);
263 }
264
265 /**
266  *
267  * mesh_path_move_to_queue - Move or copy frames from one mpath queue to another
268  *
269  * This function is used to transfer or copy frames from an unresolved mpath to
270  * a gate mpath.  The function also adds the Address Extension field and
271  * updates the next hop.
272  *
273  * If a frame already has an Address Extension field, only the next hop and
274  * destination addresses are updated.
275  *
276  * The gate mpath must be an active mpath with a valid mpath->next_hop.
277  *
278  * @mpath: An active mpath the frames will be sent to (i.e. the gate)
279  * @from_mpath: The failed mpath
280  * @copy: When true, copy all the frames to the new mpath queue.  When false,
281  * move them.
282  */
283 static void mesh_path_move_to_queue(struct mesh_path *gate_mpath,
284                                     struct mesh_path *from_mpath,
285                                     bool copy)
286 {
287         struct sk_buff *skb, *fskb, *tmp;
288         struct sk_buff_head failq;
289         unsigned long flags;
290
291         if (WARN_ON(gate_mpath == from_mpath))
292                 return;
293         if (WARN_ON(!gate_mpath->next_hop))
294                 return;
295
296         __skb_queue_head_init(&failq);
297
298         spin_lock_irqsave(&from_mpath->frame_queue.lock, flags);
299         skb_queue_splice_init(&from_mpath->frame_queue, &failq);
300         spin_unlock_irqrestore(&from_mpath->frame_queue.lock, flags);
301
302         skb_queue_walk_safe(&failq, fskb, tmp) {
303                 if (skb_queue_len(&gate_mpath->frame_queue) >=
304                                   MESH_FRAME_QUEUE_LEN) {
305                         mpath_dbg(gate_mpath->sdata, "mpath queue full!\n");
306                         break;
307                 }
308
309                 skb = skb_copy(fskb, GFP_ATOMIC);
310                 if (WARN_ON(!skb))
311                         break;
312
313                 prepare_for_gate(skb, gate_mpath->dst, gate_mpath);
314                 skb_queue_tail(&gate_mpath->frame_queue, skb);
315
316                 if (copy)
317                         continue;
318
319                 __skb_unlink(fskb, &failq);
320                 kfree_skb(fskb);
321         }
322
323         mpath_dbg(gate_mpath->sdata, "Mpath queue for gate %pM has %d frames\n",
324                   gate_mpath->dst, skb_queue_len(&gate_mpath->frame_queue));
325
326         if (!copy)
327                 return;
328
329         spin_lock_irqsave(&from_mpath->frame_queue.lock, flags);
330         skb_queue_splice(&failq, &from_mpath->frame_queue);
331         spin_unlock_irqrestore(&from_mpath->frame_queue.lock, flags);
332 }
333
334
335 static struct mesh_path *mpath_lookup(struct mesh_table *tbl, const u8 *dst,
336                                       struct ieee80211_sub_if_data *sdata)
337 {
338         struct mesh_path *mpath;
339         struct hlist_head *bucket;
340         struct mpath_node *node;
341
342         bucket = &tbl->hash_buckets[mesh_table_hash(dst, sdata, tbl)];
343         hlist_for_each_entry_rcu(node, bucket, list) {
344                 mpath = node->mpath;
345                 if (mpath->sdata == sdata &&
346                     ether_addr_equal(dst, mpath->dst)) {
347                         if (mpath_expired(mpath)) {
348                                 spin_lock_bh(&mpath->state_lock);
349                                 mpath->flags &= ~MESH_PATH_ACTIVE;
350                                 spin_unlock_bh(&mpath->state_lock);
351                         }
352                         return mpath;
353                 }
354         }
355         return NULL;
356 }
357
358 /**
359  * mesh_path_lookup - look up a path in the mesh path table
360  * @sdata: local subif
361  * @dst: hardware address (ETH_ALEN length) of destination
362  *
363  * Returns: pointer to the mesh path structure, or NULL if not found
364  *
365  * Locking: must be called within a read rcu section.
366  */
367 struct mesh_path *
368 mesh_path_lookup(struct ieee80211_sub_if_data *sdata, const u8 *dst)
369 {
370         return mpath_lookup(rcu_dereference(mesh_paths), dst, sdata);
371 }
372
373 struct mesh_path *
374 mpp_path_lookup(struct ieee80211_sub_if_data *sdata, const u8 *dst)
375 {
376         return mpath_lookup(rcu_dereference(mpp_paths), dst, sdata);
377 }
378
379
380 /**
381  * mesh_path_lookup_by_idx - look up a path in the mesh path table by its index
382  * @idx: index
383  * @sdata: local subif, or NULL for all entries
384  *
385  * Returns: pointer to the mesh path structure, or NULL if not found.
386  *
387  * Locking: must be called within a read rcu section.
388  */
389 struct mesh_path *
390 mesh_path_lookup_by_idx(struct ieee80211_sub_if_data *sdata, int idx)
391 {
392         struct mesh_table *tbl = rcu_dereference(mesh_paths);
393         struct mpath_node *node;
394         int i;
395         int j = 0;
396
397         for_each_mesh_entry(tbl, node, i) {
398                 if (sdata && node->mpath->sdata != sdata)
399                         continue;
400                 if (j++ == idx) {
401                         if (mpath_expired(node->mpath)) {
402                                 spin_lock_bh(&node->mpath->state_lock);
403                                 node->mpath->flags &= ~MESH_PATH_ACTIVE;
404                                 spin_unlock_bh(&node->mpath->state_lock);
405                         }
406                         return node->mpath;
407                 }
408         }
409
410         return NULL;
411 }
412
413 /**
414  * mpp_path_lookup_by_idx - look up a path in the proxy path table by its index
415  * @idx: index
416  * @sdata: local subif, or NULL for all entries
417  *
418  * Returns: pointer to the proxy path structure, or NULL if not found.
419  *
420  * Locking: must be called within a read rcu section.
421  */
422 struct mesh_path *
423 mpp_path_lookup_by_idx(struct ieee80211_sub_if_data *sdata, int idx)
424 {
425         struct mesh_table *tbl = rcu_dereference(mpp_paths);
426         struct mpath_node *node;
427         int i;
428         int j = 0;
429
430         for_each_mesh_entry(tbl, node, i) {
431                 if (sdata && node->mpath->sdata != sdata)
432                         continue;
433                 if (j++ == idx)
434                         return node->mpath;
435         }
436
437         return NULL;
438 }
439
440 /**
441  * mesh_path_add_gate - add the given mpath to a mesh gate to our path table
442  * @mpath: gate path to add to table
443  */
444 int mesh_path_add_gate(struct mesh_path *mpath)
445 {
446         struct mesh_table *tbl;
447         struct mpath_node *gate, *new_gate;
448         int err;
449
450         rcu_read_lock();
451         tbl = rcu_dereference(mesh_paths);
452
453         hlist_for_each_entry_rcu(gate, tbl->known_gates, list)
454                 if (gate->mpath == mpath) {
455                         err = -EEXIST;
456                         goto err_rcu;
457                 }
458
459         new_gate = kzalloc(sizeof(struct mpath_node), GFP_ATOMIC);
460         if (!new_gate) {
461                 err = -ENOMEM;
462                 goto err_rcu;
463         }
464
465         mpath->is_gate = true;
466         mpath->sdata->u.mesh.num_gates++;
467         new_gate->mpath = mpath;
468         spin_lock_bh(&tbl->gates_lock);
469         hlist_add_head_rcu(&new_gate->list, tbl->known_gates);
470         spin_unlock_bh(&tbl->gates_lock);
471         mpath_dbg(mpath->sdata,
472                   "Mesh path: Recorded new gate: %pM. %d known gates\n",
473                   mpath->dst, mpath->sdata->u.mesh.num_gates);
474         err = 0;
475 err_rcu:
476         rcu_read_unlock();
477         return err;
478 }
479
480 /**
481  * mesh_gate_del - remove a mesh gate from the list of known gates
482  * @tbl: table which holds our list of known gates
483  * @mpath: gate mpath
484  *
485  * Locking: must be called inside rcu_read_lock() section
486  */
487 static void mesh_gate_del(struct mesh_table *tbl, struct mesh_path *mpath)
488 {
489         struct mpath_node *gate;
490         struct hlist_node *q;
491
492         hlist_for_each_entry_safe(gate, q, tbl->known_gates, list) {
493                 if (gate->mpath != mpath)
494                         continue;
495                 spin_lock_bh(&tbl->gates_lock);
496                 hlist_del_rcu(&gate->list);
497                 kfree_rcu(gate, rcu);
498                 spin_unlock_bh(&tbl->gates_lock);
499                 mpath->sdata->u.mesh.num_gates--;
500                 mpath->is_gate = false;
501                 mpath_dbg(mpath->sdata,
502                           "Mesh path: Deleted gate: %pM. %d known gates\n",
503                           mpath->dst, mpath->sdata->u.mesh.num_gates);
504                 break;
505         }
506 }
507
508 /**
509  * mesh_gate_num - number of gates known to this interface
510  * @sdata: subif data
511  */
512 int mesh_gate_num(struct ieee80211_sub_if_data *sdata)
513 {
514         return sdata->u.mesh.num_gates;
515 }
516
517 /**
518  * mesh_path_add - allocate and add a new path to the mesh path table
519  * @dst: destination address of the path (ETH_ALEN length)
520  * @sdata: local subif
521  *
522  * Returns: 0 on success
523  *
524  * State: the initial state of the new path is set to 0
525  */
526 struct mesh_path *mesh_path_add(struct ieee80211_sub_if_data *sdata,
527                                 const u8 *dst)
528 {
529         struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
530         struct ieee80211_local *local = sdata->local;
531         struct mesh_table *tbl;
532         struct mesh_path *mpath, *new_mpath;
533         struct mpath_node *node, *new_node;
534         struct hlist_head *bucket;
535         int grow = 0;
536         int err;
537         u32 hash_idx;
538
539         if (ether_addr_equal(dst, sdata->vif.addr))
540                 /* never add ourselves as neighbours */
541                 return ERR_PTR(-ENOTSUPP);
542
543         if (is_multicast_ether_addr(dst))
544                 return ERR_PTR(-ENOTSUPP);
545
546         if (atomic_add_unless(&sdata->u.mesh.mpaths, 1, MESH_MAX_MPATHS) == 0)
547                 return ERR_PTR(-ENOSPC);
548
549         read_lock_bh(&pathtbl_resize_lock);
550         tbl = resize_dereference_mesh_paths();
551
552         hash_idx = mesh_table_hash(dst, sdata, tbl);
553         bucket = &tbl->hash_buckets[hash_idx];
554
555         spin_lock(&tbl->hashwlock[hash_idx]);
556
557         hlist_for_each_entry(node, bucket, list) {
558                 mpath = node->mpath;
559                 if (mpath->sdata == sdata &&
560                     ether_addr_equal(dst, mpath->dst))
561                         goto found;
562         }
563
564         err = -ENOMEM;
565         new_mpath = kzalloc(sizeof(struct mesh_path), GFP_ATOMIC);
566         if (!new_mpath)
567                 goto err_path_alloc;
568
569         new_node = kmalloc(sizeof(struct mpath_node), GFP_ATOMIC);
570         if (!new_node)
571                 goto err_node_alloc;
572
573         memcpy(new_mpath->dst, dst, ETH_ALEN);
574         eth_broadcast_addr(new_mpath->rann_snd_addr);
575         new_mpath->is_root = false;
576         new_mpath->sdata = sdata;
577         new_mpath->flags = 0;
578         skb_queue_head_init(&new_mpath->frame_queue);
579         new_node->mpath = new_mpath;
580         new_mpath->timer.data = (unsigned long) new_mpath;
581         new_mpath->timer.function = mesh_path_timer;
582         new_mpath->exp_time = jiffies;
583         spin_lock_init(&new_mpath->state_lock);
584         init_timer(&new_mpath->timer);
585
586         hlist_add_head_rcu(&new_node->list, bucket);
587         if (atomic_inc_return(&tbl->entries) >=
588             tbl->mean_chain_len * (tbl->hash_mask + 1))
589                 grow = 1;
590
591         mesh_paths_generation++;
592
593         if (grow) {
594                 set_bit(MESH_WORK_GROW_MPATH_TABLE,  &ifmsh->wrkq_flags);
595                 ieee80211_queue_work(&local->hw, &sdata->work);
596         }
597         mpath = new_mpath;
598 found:
599         spin_unlock(&tbl->hashwlock[hash_idx]);
600         read_unlock_bh(&pathtbl_resize_lock);
601         return mpath;
602
603 err_node_alloc:
604         kfree(new_mpath);
605 err_path_alloc:
606         atomic_dec(&sdata->u.mesh.mpaths);
607         spin_unlock(&tbl->hashwlock[hash_idx]);
608         read_unlock_bh(&pathtbl_resize_lock);
609         return ERR_PTR(err);
610 }
611
612 static void mesh_table_free_rcu(struct rcu_head *rcu)
613 {
614         struct mesh_table *tbl = container_of(rcu, struct mesh_table, rcu_head);
615
616         mesh_table_free(tbl, false);
617 }
618
619 void mesh_mpath_table_grow(void)
620 {
621         struct mesh_table *oldtbl, *newtbl;
622
623         write_lock_bh(&pathtbl_resize_lock);
624         oldtbl = resize_dereference_mesh_paths();
625         newtbl = mesh_table_alloc(oldtbl->size_order + 1);
626         if (!newtbl)
627                 goto out;
628         if (mesh_table_grow(oldtbl, newtbl) < 0) {
629                 __mesh_table_free(newtbl);
630                 goto out;
631         }
632         rcu_assign_pointer(mesh_paths, newtbl);
633
634         call_rcu(&oldtbl->rcu_head, mesh_table_free_rcu);
635
636  out:
637         write_unlock_bh(&pathtbl_resize_lock);
638 }
639
640 void mesh_mpp_table_grow(void)
641 {
642         struct mesh_table *oldtbl, *newtbl;
643
644         write_lock_bh(&pathtbl_resize_lock);
645         oldtbl = resize_dereference_mpp_paths();
646         newtbl = mesh_table_alloc(oldtbl->size_order + 1);
647         if (!newtbl)
648                 goto out;
649         if (mesh_table_grow(oldtbl, newtbl) < 0) {
650                 __mesh_table_free(newtbl);
651                 goto out;
652         }
653         rcu_assign_pointer(mpp_paths, newtbl);
654         call_rcu(&oldtbl->rcu_head, mesh_table_free_rcu);
655
656  out:
657         write_unlock_bh(&pathtbl_resize_lock);
658 }
659
660 int mpp_path_add(struct ieee80211_sub_if_data *sdata,
661                  const u8 *dst, const u8 *mpp)
662 {
663         struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
664         struct ieee80211_local *local = sdata->local;
665         struct mesh_table *tbl;
666         struct mesh_path *mpath, *new_mpath;
667         struct mpath_node *node, *new_node;
668         struct hlist_head *bucket;
669         int grow = 0;
670         int err = 0;
671         u32 hash_idx;
672
673         if (ether_addr_equal(dst, sdata->vif.addr))
674                 /* never add ourselves as neighbours */
675                 return -ENOTSUPP;
676
677         if (is_multicast_ether_addr(dst))
678                 return -ENOTSUPP;
679
680         err = -ENOMEM;
681         new_mpath = kzalloc(sizeof(struct mesh_path), GFP_ATOMIC);
682         if (!new_mpath)
683                 goto err_path_alloc;
684
685         new_node = kmalloc(sizeof(struct mpath_node), GFP_ATOMIC);
686         if (!new_node)
687                 goto err_node_alloc;
688
689         read_lock_bh(&pathtbl_resize_lock);
690         memcpy(new_mpath->dst, dst, ETH_ALEN);
691         memcpy(new_mpath->mpp, mpp, ETH_ALEN);
692         new_mpath->sdata = sdata;
693         new_mpath->flags = 0;
694         skb_queue_head_init(&new_mpath->frame_queue);
695         new_node->mpath = new_mpath;
696         init_timer(&new_mpath->timer);
697         new_mpath->exp_time = jiffies;
698         spin_lock_init(&new_mpath->state_lock);
699
700         tbl = resize_dereference_mpp_paths();
701
702         hash_idx = mesh_table_hash(dst, sdata, tbl);
703         bucket = &tbl->hash_buckets[hash_idx];
704
705         spin_lock(&tbl->hashwlock[hash_idx]);
706
707         err = -EEXIST;
708         hlist_for_each_entry(node, bucket, list) {
709                 mpath = node->mpath;
710                 if (mpath->sdata == sdata &&
711                     ether_addr_equal(dst, mpath->dst))
712                         goto err_exists;
713         }
714
715         hlist_add_head_rcu(&new_node->list, bucket);
716         if (atomic_inc_return(&tbl->entries) >=
717             tbl->mean_chain_len * (tbl->hash_mask + 1))
718                 grow = 1;
719
720         spin_unlock(&tbl->hashwlock[hash_idx]);
721         read_unlock_bh(&pathtbl_resize_lock);
722
723         mpp_paths_generation++;
724
725         if (grow) {
726                 set_bit(MESH_WORK_GROW_MPP_TABLE,  &ifmsh->wrkq_flags);
727                 ieee80211_queue_work(&local->hw, &sdata->work);
728         }
729         return 0;
730
731 err_exists:
732         spin_unlock(&tbl->hashwlock[hash_idx]);
733         read_unlock_bh(&pathtbl_resize_lock);
734         kfree(new_node);
735 err_node_alloc:
736         kfree(new_mpath);
737 err_path_alloc:
738         return err;
739 }
740
741
742 /**
743  * mesh_plink_broken - deactivates paths and sends perr when a link breaks
744  *
745  * @sta: broken peer link
746  *
747  * This function must be called from the rate control algorithm if enough
748  * delivery errors suggest that a peer link is no longer usable.
749  */
750 void mesh_plink_broken(struct sta_info *sta)
751 {
752         struct mesh_table *tbl;
753         static const u8 bcast[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
754         struct mesh_path *mpath;
755         struct mpath_node *node;
756         struct ieee80211_sub_if_data *sdata = sta->sdata;
757         int i;
758
759         rcu_read_lock();
760         tbl = rcu_dereference(mesh_paths);
761         for_each_mesh_entry(tbl, node, i) {
762                 mpath = node->mpath;
763                 if (rcu_access_pointer(mpath->next_hop) == sta &&
764                     mpath->flags & MESH_PATH_ACTIVE &&
765                     !(mpath->flags & MESH_PATH_FIXED)) {
766                         spin_lock_bh(&mpath->state_lock);
767                         mpath->flags &= ~MESH_PATH_ACTIVE;
768                         ++mpath->sn;
769                         spin_unlock_bh(&mpath->state_lock);
770                         mesh_path_error_tx(sdata,
771                                 sdata->u.mesh.mshcfg.element_ttl,
772                                 mpath->dst, mpath->sn,
773                                 WLAN_REASON_MESH_PATH_DEST_UNREACHABLE, bcast);
774                 }
775         }
776         rcu_read_unlock();
777 }
778
779 static void mesh_path_node_reclaim(struct rcu_head *rp)
780 {
781         struct mpath_node *node = container_of(rp, struct mpath_node, rcu);
782         struct ieee80211_sub_if_data *sdata = node->mpath->sdata;
783
784         del_timer_sync(&node->mpath->timer);
785         atomic_dec(&sdata->u.mesh.mpaths);
786         kfree(node->mpath);
787         kfree(node);
788 }
789
790 /* needs to be called with the corresponding hashwlock taken */
791 static void __mesh_path_del(struct mesh_table *tbl, struct mpath_node *node)
792 {
793         struct mesh_path *mpath;
794         mpath = node->mpath;
795         spin_lock(&mpath->state_lock);
796         mpath->flags |= MESH_PATH_RESOLVING;
797         if (mpath->is_gate)
798                 mesh_gate_del(tbl, mpath);
799         hlist_del_rcu(&node->list);
800         call_rcu(&node->rcu, mesh_path_node_reclaim);
801         spin_unlock(&mpath->state_lock);
802         atomic_dec(&tbl->entries);
803 }
804
805 /**
806  * mesh_path_flush_by_nexthop - Deletes mesh paths if their next hop matches
807  *
808  * @sta: mesh peer to match
809  *
810  * RCU notes: this function is called when a mesh plink transitions from
811  * PLINK_ESTAB to any other state, since PLINK_ESTAB state is the only one that
812  * allows path creation. This will happen before the sta can be freed (because
813  * sta_info_destroy() calls this) so any reader in a rcu read block will be
814  * protected against the plink disappearing.
815  */
816 void mesh_path_flush_by_nexthop(struct sta_info *sta)
817 {
818         struct mesh_table *tbl;
819         struct mesh_path *mpath;
820         struct mpath_node *node;
821         int i;
822
823         rcu_read_lock();
824         read_lock_bh(&pathtbl_resize_lock);
825         tbl = resize_dereference_mesh_paths();
826         for_each_mesh_entry(tbl, node, i) {
827                 mpath = node->mpath;
828                 if (rcu_access_pointer(mpath->next_hop) == sta) {
829                         spin_lock(&tbl->hashwlock[i]);
830                         __mesh_path_del(tbl, node);
831                         spin_unlock(&tbl->hashwlock[i]);
832                 }
833         }
834         read_unlock_bh(&pathtbl_resize_lock);
835         rcu_read_unlock();
836 }
837
838 static void table_flush_by_iface(struct mesh_table *tbl,
839                                  struct ieee80211_sub_if_data *sdata)
840 {
841         struct mesh_path *mpath;
842         struct mpath_node *node;
843         int i;
844
845         WARN_ON(!rcu_read_lock_held());
846         for_each_mesh_entry(tbl, node, i) {
847                 mpath = node->mpath;
848                 if (mpath->sdata != sdata)
849                         continue;
850                 spin_lock_bh(&tbl->hashwlock[i]);
851                 __mesh_path_del(tbl, node);
852                 spin_unlock_bh(&tbl->hashwlock[i]);
853         }
854 }
855
856 /**
857  * mesh_path_flush_by_iface - Deletes all mesh paths associated with a given iface
858  *
859  * This function deletes both mesh paths as well as mesh portal paths.
860  *
861  * @sdata: interface data to match
862  *
863  */
864 void mesh_path_flush_by_iface(struct ieee80211_sub_if_data *sdata)
865 {
866         struct mesh_table *tbl;
867
868         rcu_read_lock();
869         read_lock_bh(&pathtbl_resize_lock);
870         tbl = resize_dereference_mesh_paths();
871         table_flush_by_iface(tbl, sdata);
872         tbl = resize_dereference_mpp_paths();
873         table_flush_by_iface(tbl, sdata);
874         read_unlock_bh(&pathtbl_resize_lock);
875         rcu_read_unlock();
876 }
877
878 /**
879  * mesh_path_del - delete a mesh path from the table
880  *
881  * @addr: dst address (ETH_ALEN length)
882  * @sdata: local subif
883  *
884  * Returns: 0 if successful
885  */
886 int mesh_path_del(struct ieee80211_sub_if_data *sdata, const u8 *addr)
887 {
888         struct mesh_table *tbl;
889         struct mesh_path *mpath;
890         struct mpath_node *node;
891         struct hlist_head *bucket;
892         int hash_idx;
893         int err = 0;
894
895         read_lock_bh(&pathtbl_resize_lock);
896         tbl = resize_dereference_mesh_paths();
897         hash_idx = mesh_table_hash(addr, sdata, tbl);
898         bucket = &tbl->hash_buckets[hash_idx];
899
900         spin_lock(&tbl->hashwlock[hash_idx]);
901         hlist_for_each_entry(node, bucket, list) {
902                 mpath = node->mpath;
903                 if (mpath->sdata == sdata &&
904                     ether_addr_equal(addr, mpath->dst)) {
905                         __mesh_path_del(tbl, node);
906                         goto enddel;
907                 }
908         }
909
910         err = -ENXIO;
911 enddel:
912         mesh_paths_generation++;
913         spin_unlock(&tbl->hashwlock[hash_idx]);
914         read_unlock_bh(&pathtbl_resize_lock);
915         return err;
916 }
917
918 /**
919  * mesh_path_tx_pending - sends pending frames in a mesh path queue
920  *
921  * @mpath: mesh path to activate
922  *
923  * Locking: the state_lock of the mpath structure must NOT be held when calling
924  * this function.
925  */
926 void mesh_path_tx_pending(struct mesh_path *mpath)
927 {
928         if (mpath->flags & MESH_PATH_ACTIVE)
929                 ieee80211_add_pending_skbs(mpath->sdata->local,
930                                 &mpath->frame_queue);
931 }
932
933 /**
934  * mesh_path_send_to_gates - sends pending frames to all known mesh gates
935  *
936  * @mpath: mesh path whose queue will be emptied
937  *
938  * If there is only one gate, the frames are transferred from the failed mpath
939  * queue to that gate's queue.  If there are more than one gates, the frames
940  * are copied from each gate to the next.  After frames are copied, the
941  * mpath queues are emptied onto the transmission queue.
942  */
943 int mesh_path_send_to_gates(struct mesh_path *mpath)
944 {
945         struct ieee80211_sub_if_data *sdata = mpath->sdata;
946         struct mesh_table *tbl;
947         struct mesh_path *from_mpath = mpath;
948         struct mpath_node *gate = NULL;
949         bool copy = false;
950         struct hlist_head *known_gates;
951
952         rcu_read_lock();
953         tbl = rcu_dereference(mesh_paths);
954         known_gates = tbl->known_gates;
955         rcu_read_unlock();
956
957         if (!known_gates)
958                 return -EHOSTUNREACH;
959
960         hlist_for_each_entry_rcu(gate, known_gates, list) {
961                 if (gate->mpath->sdata != sdata)
962                         continue;
963
964                 if (gate->mpath->flags & MESH_PATH_ACTIVE) {
965                         mpath_dbg(sdata, "Forwarding to %pM\n", gate->mpath->dst);
966                         mesh_path_move_to_queue(gate->mpath, from_mpath, copy);
967                         from_mpath = gate->mpath;
968                         copy = true;
969                 } else {
970                         mpath_dbg(sdata,
971                                   "Not forwarding %p (flags %#x)\n",
972                                   gate->mpath, gate->mpath->flags);
973                 }
974         }
975
976         hlist_for_each_entry_rcu(gate, known_gates, list)
977                 if (gate->mpath->sdata == sdata) {
978                         mpath_dbg(sdata, "Sending to %pM\n", gate->mpath->dst);
979                         mesh_path_tx_pending(gate->mpath);
980                 }
981
982         return (from_mpath == mpath) ? -EHOSTUNREACH : 0;
983 }
984
985 /**
986  * mesh_path_discard_frame - discard a frame whose path could not be resolved
987  *
988  * @skb: frame to discard
989  * @sdata: network subif the frame was to be sent through
990  *
991  * Locking: the function must me called within a rcu_read_lock region
992  */
993 void mesh_path_discard_frame(struct ieee80211_sub_if_data *sdata,
994                              struct sk_buff *skb)
995 {
996         kfree_skb(skb);
997         sdata->u.mesh.mshstats.dropped_frames_no_route++;
998 }
999
1000 /**
1001  * mesh_path_flush_pending - free the pending queue of a mesh path
1002  *
1003  * @mpath: mesh path whose queue has to be freed
1004  *
1005  * Locking: the function must me called within a rcu_read_lock region
1006  */
1007 void mesh_path_flush_pending(struct mesh_path *mpath)
1008 {
1009         struct sk_buff *skb;
1010
1011         while ((skb = skb_dequeue(&mpath->frame_queue)) != NULL)
1012                 mesh_path_discard_frame(mpath->sdata, skb);
1013 }
1014
1015 /**
1016  * mesh_path_fix_nexthop - force a specific next hop for a mesh path
1017  *
1018  * @mpath: the mesh path to modify
1019  * @next_hop: the next hop to force
1020  *
1021  * Locking: this function must be called holding mpath->state_lock
1022  */
1023 void mesh_path_fix_nexthop(struct mesh_path *mpath, struct sta_info *next_hop)
1024 {
1025         spin_lock_bh(&mpath->state_lock);
1026         mesh_path_assign_nexthop(mpath, next_hop);
1027         mpath->sn = 0xffff;
1028         mpath->metric = 0;
1029         mpath->hop_count = 0;
1030         mpath->exp_time = 0;
1031         mpath->flags |= MESH_PATH_FIXED;
1032         mesh_path_activate(mpath);
1033         spin_unlock_bh(&mpath->state_lock);
1034         mesh_path_tx_pending(mpath);
1035 }
1036
1037 static void mesh_path_node_free(struct hlist_node *p, bool free_leafs)
1038 {
1039         struct mesh_path *mpath;
1040         struct mpath_node *node = hlist_entry(p, struct mpath_node, list);
1041         mpath = node->mpath;
1042         hlist_del_rcu(p);
1043         if (free_leafs) {
1044                 del_timer_sync(&mpath->timer);
1045                 kfree(mpath);
1046         }
1047         kfree(node);
1048 }
1049
1050 static int mesh_path_node_copy(struct hlist_node *p, struct mesh_table *newtbl)
1051 {
1052         struct mesh_path *mpath;
1053         struct mpath_node *node, *new_node;
1054         u32 hash_idx;
1055
1056         new_node = kmalloc(sizeof(struct mpath_node), GFP_ATOMIC);
1057         if (new_node == NULL)
1058                 return -ENOMEM;
1059
1060         node = hlist_entry(p, struct mpath_node, list);
1061         mpath = node->mpath;
1062         new_node->mpath = mpath;
1063         hash_idx = mesh_table_hash(mpath->dst, mpath->sdata, newtbl);
1064         hlist_add_head(&new_node->list,
1065                         &newtbl->hash_buckets[hash_idx]);
1066         return 0;
1067 }
1068
1069 int mesh_pathtbl_init(void)
1070 {
1071         struct mesh_table *tbl_path, *tbl_mpp;
1072         int ret;
1073
1074         tbl_path = mesh_table_alloc(INIT_PATHS_SIZE_ORDER);
1075         if (!tbl_path)
1076                 return -ENOMEM;
1077         tbl_path->free_node = &mesh_path_node_free;
1078         tbl_path->copy_node = &mesh_path_node_copy;
1079         tbl_path->mean_chain_len = MEAN_CHAIN_LEN;
1080         tbl_path->known_gates = kzalloc(sizeof(struct hlist_head), GFP_ATOMIC);
1081         if (!tbl_path->known_gates) {
1082                 ret = -ENOMEM;
1083                 goto free_path;
1084         }
1085         INIT_HLIST_HEAD(tbl_path->known_gates);
1086
1087
1088         tbl_mpp = mesh_table_alloc(INIT_PATHS_SIZE_ORDER);
1089         if (!tbl_mpp) {
1090                 ret = -ENOMEM;
1091                 goto free_path;
1092         }
1093         tbl_mpp->free_node = &mesh_path_node_free;
1094         tbl_mpp->copy_node = &mesh_path_node_copy;
1095         tbl_mpp->mean_chain_len = MEAN_CHAIN_LEN;
1096         tbl_mpp->known_gates = kzalloc(sizeof(struct hlist_head), GFP_ATOMIC);
1097         if (!tbl_mpp->known_gates) {
1098                 ret = -ENOMEM;
1099                 goto free_mpp;
1100         }
1101         INIT_HLIST_HEAD(tbl_mpp->known_gates);
1102
1103         /* Need no locking since this is during init */
1104         RCU_INIT_POINTER(mesh_paths, tbl_path);
1105         RCU_INIT_POINTER(mpp_paths, tbl_mpp);
1106
1107         return 0;
1108
1109 free_mpp:
1110         mesh_table_free(tbl_mpp, true);
1111 free_path:
1112         mesh_table_free(tbl_path, true);
1113         return ret;
1114 }
1115
1116 void mesh_path_expire(struct ieee80211_sub_if_data *sdata)
1117 {
1118         struct mesh_table *tbl;
1119         struct mesh_path *mpath;
1120         struct mpath_node *node;
1121         int i;
1122
1123         rcu_read_lock();
1124         tbl = rcu_dereference(mesh_paths);
1125         for_each_mesh_entry(tbl, node, i) {
1126                 if (node->mpath->sdata != sdata)
1127                         continue;
1128                 mpath = node->mpath;
1129                 if ((!(mpath->flags & MESH_PATH_RESOLVING)) &&
1130                     (!(mpath->flags & MESH_PATH_FIXED)) &&
1131                      time_after(jiffies, mpath->exp_time + MESH_PATH_EXPIRE))
1132                         mesh_path_del(mpath->sdata, mpath->dst);
1133         }
1134         rcu_read_unlock();
1135 }
1136
1137 void mesh_pathtbl_unregister(void)
1138 {
1139         /* no need for locking during exit path */
1140         mesh_table_free(rcu_dereference_protected(mesh_paths, 1), true);
1141         mesh_table_free(rcu_dereference_protected(mpp_paths, 1), true);
1142 }