These changes are the raw update to linux-4.4.6-rt14. Kernel sources
[kvmfornfv.git] / kernel / drivers / staging / lustre / lustre / libcfs / hash.c
index a55567e..0308744 100644 (file)
@@ -159,7 +159,7 @@ cfs_hash_rw_unlock(union cfs_hash_lock *lock, int exclusive)
 }
 
 /** No lock hash */
-static cfs_hash_lock_ops_t cfs_hash_nl_lops = {
+static struct cfs_hash_lock_ops cfs_hash_nl_lops = {
        .hs_lock        = cfs_hash_nl_lock,
        .hs_unlock      = cfs_hash_nl_unlock,
        .hs_bkt_lock    = cfs_hash_nl_lock,
@@ -167,7 +167,7 @@ static cfs_hash_lock_ops_t cfs_hash_nl_lops = {
 };
 
 /** no bucket lock, one spinlock to protect everything */
-static cfs_hash_lock_ops_t cfs_hash_nbl_lops = {
+static struct cfs_hash_lock_ops cfs_hash_nbl_lops = {
        .hs_lock        = cfs_hash_spin_lock,
        .hs_unlock      = cfs_hash_spin_unlock,
        .hs_bkt_lock    = cfs_hash_nl_lock,
@@ -175,7 +175,7 @@ static cfs_hash_lock_ops_t cfs_hash_nbl_lops = {
 };
 
 /** spin bucket lock, rehash is enabled */
-static cfs_hash_lock_ops_t cfs_hash_bkt_spin_lops = {
+static struct cfs_hash_lock_ops cfs_hash_bkt_spin_lops = {
        .hs_lock        = cfs_hash_rw_lock,
        .hs_unlock      = cfs_hash_rw_unlock,
        .hs_bkt_lock    = cfs_hash_spin_lock,
@@ -183,7 +183,7 @@ static cfs_hash_lock_ops_t cfs_hash_bkt_spin_lops = {
 };
 
 /** rw bucket lock, rehash is enabled */
-static cfs_hash_lock_ops_t cfs_hash_bkt_rw_lops = {
+static struct cfs_hash_lock_ops cfs_hash_bkt_rw_lops = {
        .hs_lock        = cfs_hash_rw_lock,
        .hs_unlock      = cfs_hash_rw_unlock,
        .hs_bkt_lock    = cfs_hash_rw_lock,
@@ -191,7 +191,7 @@ static cfs_hash_lock_ops_t cfs_hash_bkt_rw_lops = {
 };
 
 /** spin bucket lock, rehash is disabled */
-static cfs_hash_lock_ops_t cfs_hash_nr_bkt_spin_lops = {
+static struct cfs_hash_lock_ops cfs_hash_nr_bkt_spin_lops = {
        .hs_lock        = cfs_hash_nl_lock,
        .hs_unlock      = cfs_hash_nl_unlock,
        .hs_bkt_lock    = cfs_hash_spin_lock,
@@ -199,7 +199,7 @@ static cfs_hash_lock_ops_t cfs_hash_nr_bkt_spin_lops = {
 };
 
 /** rw bucket lock, rehash is disabled */
-static cfs_hash_lock_ops_t cfs_hash_nr_bkt_rw_lops = {
+static struct cfs_hash_lock_ops cfs_hash_nr_bkt_rw_lops = {
        .hs_lock        = cfs_hash_nl_lock,
        .hs_unlock      = cfs_hash_nl_unlock,
        .hs_bkt_lock    = cfs_hash_rw_lock,
@@ -239,21 +239,22 @@ cfs_hash_lock_setup(struct cfs_hash *hs)
  * Simple hash head without depth tracking
  * new element is always added to head of hlist
  */
-typedef struct {
+struct cfs_hash_head {
        struct hlist_head       hh_head;        /**< entries list */
-} cfs_hash_head_t;
+};
 
 static int
 cfs_hash_hh_hhead_size(struct cfs_hash *hs)
 {
-       return sizeof(cfs_hash_head_t);
+       return sizeof(struct cfs_hash_head);
 }
 
 static struct hlist_head *
 cfs_hash_hh_hhead(struct cfs_hash *hs, struct cfs_hash_bd *bd)
 {
-       cfs_hash_head_t *head = (cfs_hash_head_t *)&bd->bd_bucket->hsb_head[0];
+       struct cfs_hash_head *head;
 
+       head = (struct cfs_hash_head *)&bd->bd_bucket->hsb_head[0];
        return &head[bd->bd_offset].hh_head;
 }
 
@@ -277,23 +278,23 @@ cfs_hash_hh_hnode_del(struct cfs_hash *hs, struct cfs_hash_bd *bd,
  * Simple hash head with depth tracking
  * new element is always added to head of hlist
  */
-typedef struct {
+struct cfs_hash_head_dep {
        struct hlist_head       hd_head;        /**< entries list */
-       unsigned int        hd_depth;       /**< list length */
-} cfs_hash_head_dep_t;
+       unsigned int            hd_depth;       /**< list length */
+};
 
 static int
 cfs_hash_hd_hhead_size(struct cfs_hash *hs)
 {
-       return sizeof(cfs_hash_head_dep_t);
+       return sizeof(struct cfs_hash_head_dep);
 }
 
 static struct hlist_head *
 cfs_hash_hd_hhead(struct cfs_hash *hs, struct cfs_hash_bd *bd)
 {
-       cfs_hash_head_dep_t   *head;
+       struct cfs_hash_head_dep   *head;
 
-       head = (cfs_hash_head_dep_t *)&bd->bd_bucket->hsb_head[0];
+       head = (struct cfs_hash_head_dep *)&bd->bd_bucket->hsb_head[0];
        return &head[bd->bd_offset].hd_head;
 }
 
@@ -301,8 +302,10 @@ static int
 cfs_hash_hd_hnode_add(struct cfs_hash *hs, struct cfs_hash_bd *bd,
                      struct hlist_node *hnode)
 {
-       cfs_hash_head_dep_t *hh = container_of(cfs_hash_hd_hhead(hs, bd),
-                                              cfs_hash_head_dep_t, hd_head);
+       struct cfs_hash_head_dep *hh;
+
+       hh = container_of(cfs_hash_hd_hhead(hs, bd),
+                         struct cfs_hash_head_dep, hd_head);
        hlist_add_head(hnode, &hh->hd_head);
        return ++hh->hd_depth;
 }
@@ -311,8 +314,10 @@ static int
 cfs_hash_hd_hnode_del(struct cfs_hash *hs, struct cfs_hash_bd *bd,
                      struct hlist_node *hnode)
 {
-       cfs_hash_head_dep_t *hh = container_of(cfs_hash_hd_hhead(hs, bd),
-                                              cfs_hash_head_dep_t, hd_head);
+       struct cfs_hash_head_dep *hh;
+
+       hh = container_of(cfs_hash_hd_hhead(hs, bd),
+                         struct cfs_hash_head_dep, hd_head);
        hlist_del_init(hnode);
        return --hh->hd_depth;
 }
@@ -321,23 +326,23 @@ cfs_hash_hd_hnode_del(struct cfs_hash *hs, struct cfs_hash_bd *bd,
  * double links hash head without depth tracking
  * new element is always added to tail of hlist
  */
-typedef struct {
+struct cfs_hash_dhead {
        struct hlist_head       dh_head;        /**< entries list */
        struct hlist_node       *dh_tail;       /**< the last entry */
-} cfs_hash_dhead_t;
+};
 
 static int
 cfs_hash_dh_hhead_size(struct cfs_hash *hs)
 {
-       return sizeof(cfs_hash_dhead_t);
+       return sizeof(struct cfs_hash_dhead);
 }
 
 static struct hlist_head *
 cfs_hash_dh_hhead(struct cfs_hash *hs, struct cfs_hash_bd *bd)
 {
-       cfs_hash_dhead_t *head;
+       struct cfs_hash_dhead *head;
 
-       head = (cfs_hash_dhead_t *)&bd->bd_bucket->hsb_head[0];
+       head = (struct cfs_hash_dhead *)&bd->bd_bucket->hsb_head[0];
        return &head[bd->bd_offset].dh_head;
 }
 
@@ -345,9 +350,10 @@ static int
 cfs_hash_dh_hnode_add(struct cfs_hash *hs, struct cfs_hash_bd *bd,
                      struct hlist_node *hnode)
 {
-       cfs_hash_dhead_t *dh = container_of(cfs_hash_dh_hhead(hs, bd),
-                                           cfs_hash_dhead_t, dh_head);
+       struct cfs_hash_dhead *dh;
 
+       dh = container_of(cfs_hash_dh_hhead(hs, bd),
+                         struct cfs_hash_dhead, dh_head);
        if (dh->dh_tail != NULL) /* not empty */
                hlist_add_behind(hnode, dh->dh_tail);
        else /* empty list */
@@ -360,9 +366,10 @@ static int
 cfs_hash_dh_hnode_del(struct cfs_hash *hs, struct cfs_hash_bd *bd,
                      struct hlist_node *hnd)
 {
-       cfs_hash_dhead_t *dh = container_of(cfs_hash_dh_hhead(hs, bd),
-                                           cfs_hash_dhead_t, dh_head);
+       struct cfs_hash_dhead *dh;
 
+       dh = container_of(cfs_hash_dh_hhead(hs, bd),
+                         struct cfs_hash_dhead, dh_head);
        if (hnd->next == NULL) { /* it's the tail */
                dh->dh_tail = (hnd->pprev == &dh->dh_head.first) ? NULL :
                              container_of(hnd->pprev, struct hlist_node, next);
@@ -375,24 +382,24 @@ cfs_hash_dh_hnode_del(struct cfs_hash *hs, struct cfs_hash_bd *bd,
  * double links hash head with depth tracking
  * new element is always added to tail of hlist
  */
-typedef struct {
+struct cfs_hash_dhead_dep {
        struct hlist_head       dd_head;        /**< entries list */
        struct hlist_node       *dd_tail;       /**< the last entry */
        unsigned int        dd_depth;       /**< list length */
-} cfs_hash_dhead_dep_t;
+};
 
 static int
 cfs_hash_dd_hhead_size(struct cfs_hash *hs)
 {
-       return sizeof(cfs_hash_dhead_dep_t);
+       return sizeof(struct cfs_hash_dhead_dep);
 }
 
 static struct hlist_head *
 cfs_hash_dd_hhead(struct cfs_hash *hs, struct cfs_hash_bd *bd)
 {
-       cfs_hash_dhead_dep_t *head;
+       struct cfs_hash_dhead_dep *head;
 
-       head = (cfs_hash_dhead_dep_t *)&bd->bd_bucket->hsb_head[0];
+       head = (struct cfs_hash_dhead_dep *)&bd->bd_bucket->hsb_head[0];
        return &head[bd->bd_offset].dd_head;
 }
 
@@ -400,9 +407,10 @@ static int
 cfs_hash_dd_hnode_add(struct cfs_hash *hs, struct cfs_hash_bd *bd,
                      struct hlist_node *hnode)
 {
-       cfs_hash_dhead_dep_t *dh = container_of(cfs_hash_dd_hhead(hs, bd),
-                                               cfs_hash_dhead_dep_t, dd_head);
+       struct cfs_hash_dhead_dep *dh;
 
+       dh = container_of(cfs_hash_dd_hhead(hs, bd),
+                         struct cfs_hash_dhead_dep, dd_head);
        if (dh->dd_tail != NULL) /* not empty */
                hlist_add_behind(hnode, dh->dd_tail);
        else /* empty list */
@@ -415,9 +423,10 @@ static int
 cfs_hash_dd_hnode_del(struct cfs_hash *hs, struct cfs_hash_bd *bd,
                      struct hlist_node *hnd)
 {
-       cfs_hash_dhead_dep_t *dh = container_of(cfs_hash_dd_hhead(hs, bd),
-                                               cfs_hash_dhead_dep_t, dd_head);
+       struct cfs_hash_dhead_dep *dh;
 
+       dh = container_of(cfs_hash_dd_hhead(hs, bd),
+                         struct cfs_hash_dhead_dep, dd_head);
        if (hnd->next == NULL) { /* it's the tail */
                dh->dd_tail = (hnd->pprev == &dh->dd_head.first) ? NULL :
                              container_of(hnd->pprev, struct hlist_node, next);
@@ -426,32 +435,32 @@ cfs_hash_dd_hnode_del(struct cfs_hash *hs, struct cfs_hash_bd *bd,
        return --dh->dd_depth;
 }
 
-static cfs_hash_hlist_ops_t cfs_hash_hh_hops = {
-       .hop_hhead      = cfs_hash_hh_hhead,
-       .hop_hhead_size = cfs_hash_hh_hhead_size,
-       .hop_hnode_add  = cfs_hash_hh_hnode_add,
-       .hop_hnode_del  = cfs_hash_hh_hnode_del,
+static struct cfs_hash_hlist_ops cfs_hash_hh_hops = {
+       .hop_hhead      = cfs_hash_hh_hhead,
+       .hop_hhead_size = cfs_hash_hh_hhead_size,
+       .hop_hnode_add  = cfs_hash_hh_hnode_add,
+       .hop_hnode_del  = cfs_hash_hh_hnode_del,
 };
 
-static cfs_hash_hlist_ops_t cfs_hash_hd_hops = {
-       .hop_hhead      = cfs_hash_hd_hhead,
-       .hop_hhead_size = cfs_hash_hd_hhead_size,
-       .hop_hnode_add  = cfs_hash_hd_hnode_add,
-       .hop_hnode_del  = cfs_hash_hd_hnode_del,
+static struct cfs_hash_hlist_ops cfs_hash_hd_hops = {
+       .hop_hhead      = cfs_hash_hd_hhead,
+       .hop_hhead_size = cfs_hash_hd_hhead_size,
+       .hop_hnode_add  = cfs_hash_hd_hnode_add,
+       .hop_hnode_del  = cfs_hash_hd_hnode_del,
 };
 
-static cfs_hash_hlist_ops_t cfs_hash_dh_hops = {
-       .hop_hhead      = cfs_hash_dh_hhead,
-       .hop_hhead_size = cfs_hash_dh_hhead_size,
-       .hop_hnode_add  = cfs_hash_dh_hnode_add,
-       .hop_hnode_del  = cfs_hash_dh_hnode_del,
+static struct cfs_hash_hlist_ops cfs_hash_dh_hops = {
+       .hop_hhead      = cfs_hash_dh_hhead,
+       .hop_hhead_size = cfs_hash_dh_hhead_size,
+       .hop_hnode_add  = cfs_hash_dh_hnode_add,
+       .hop_hnode_del  = cfs_hash_dh_hnode_del,
 };
 
-static cfs_hash_hlist_ops_t cfs_hash_dd_hops = {
-       .hop_hhead      = cfs_hash_dd_hhead,
-       .hop_hhead_size = cfs_hash_dd_hhead_size,
-       .hop_hnode_add  = cfs_hash_dd_hnode_add,
-       .hop_hnode_del  = cfs_hash_dd_hnode_del,
+static struct cfs_hash_hlist_ops cfs_hash_dd_hops = {
+       .hop_hhead      = cfs_hash_dd_hhead,
+       .hop_hhead_size = cfs_hash_dd_hhead_size,
+       .hop_hnode_add  = cfs_hash_dd_hnode_add,
+       .hop_hnode_del  = cfs_hash_dd_hnode_del,
 };
 
 static void
@@ -588,16 +597,16 @@ EXPORT_SYMBOL(cfs_hash_bd_move_locked);
 
 enum {
        /** always set, for sanity (avoid ZERO intent) */
-       CFS_HS_LOOKUP_MASK_FIND     = 1 << 0,
+       CFS_HS_LOOKUP_MASK_FIND     = BIT(0),
        /** return entry with a ref */
-       CFS_HS_LOOKUP_MASK_REF      = 1 << 1,
+       CFS_HS_LOOKUP_MASK_REF      = BIT(1),
        /** add entry if not existing */
-       CFS_HS_LOOKUP_MASK_ADD      = 1 << 2,
+       CFS_HS_LOOKUP_MASK_ADD      = BIT(2),
        /** delete entry, ignore other masks */
-       CFS_HS_LOOKUP_MASK_DEL      = 1 << 3,
+       CFS_HS_LOOKUP_MASK_DEL      = BIT(3),
 };
 
-typedef enum cfs_hash_lookup_intent {
+enum cfs_hash_lookup_intent {
        /** return item w/o refcount */
        CFS_HS_LOOKUP_IT_PEEK       = CFS_HS_LOOKUP_MASK_FIND,
        /** return item with refcount */
@@ -612,12 +621,12 @@ typedef enum cfs_hash_lookup_intent {
        /** delete if existed */
        CFS_HS_LOOKUP_IT_FINDDEL    = (CFS_HS_LOOKUP_MASK_FIND |
                                       CFS_HS_LOOKUP_MASK_DEL)
-} cfs_hash_lookup_intent_t;
+};
 
 static struct hlist_node *
 cfs_hash_bd_lookup_intent(struct cfs_hash *hs, struct cfs_hash_bd *bd,
                          const void *key, struct hlist_node *hnode,
-                         cfs_hash_lookup_intent_t intent)
+                         enum cfs_hash_lookup_intent intent)
 
 {
        struct hlist_head  *hhead = cfs_hash_bd_hhead(hs, bd);
@@ -677,8 +686,8 @@ cfs_hash_bd_findadd_locked(struct cfs_hash *hs, struct cfs_hash_bd *bd,
                           int noref)
 {
        return cfs_hash_bd_lookup_intent(hs, bd, key, hnode,
-                                        CFS_HS_LOOKUP_IT_ADD |
-                                        (!noref * CFS_HS_LOOKUP_MASK_REF));
+                                        (!noref * CFS_HS_LOOKUP_MASK_REF) |
+                                        CFS_HS_LOOKUP_IT_ADD);
 }
 EXPORT_SYMBOL(cfs_hash_bd_findadd_locked);
 
@@ -756,7 +765,7 @@ cfs_hash_multi_bd_findadd_locked(struct cfs_hash *hs,
        unsigned           i;
 
        LASSERT(hnode != NULL);
-       intent = CFS_HS_LOOKUP_IT_PEEK | (!noref * CFS_HS_LOOKUP_MASK_REF);
+       intent = (!noref * CFS_HS_LOOKUP_MASK_REF) | CFS_HS_LOOKUP_IT_PEEK;
 
        cfs_hash_for_each_bd(bds, n, i) {
                ehnode = cfs_hash_bd_lookup_intent(hs, &bds[i], key,
@@ -1019,7 +1028,7 @@ struct cfs_hash *
 cfs_hash_create(char *name, unsigned cur_bits, unsigned max_bits,
                unsigned bkt_bits, unsigned extra_bytes,
                unsigned min_theta, unsigned max_theta,
-               cfs_hash_ops_t *ops, unsigned flags)
+               struct cfs_hash_ops *ops, unsigned flags)
 {
        struct cfs_hash *hs;
        int      len;
@@ -1480,16 +1489,16 @@ cfs_hash_for_each_tight(struct cfs_hash *hs, cfs_hash_for_each_cb_t func,
        return count;
 }
 
-typedef struct {
-       cfs_hash_cond_opt_cb_t  func;
-       void               *arg;
-} cfs_hash_cond_arg_t;
+struct cfs_hash_cond_arg {
+       cfs_hash_cond_opt_cb_t  func;
+       void                    *arg;
+};
 
 static int
 cfs_hash_cond_del_locked(struct cfs_hash *hs, struct cfs_hash_bd *bd,
                         struct hlist_node *hnode, void *data)
 {
-       cfs_hash_cond_arg_t *cond = data;
+       struct cfs_hash_cond_arg *cond = data;
 
        if (cond->func(cfs_hash_object(hs, hnode), cond->arg))
                cfs_hash_bd_del_locked(hs, bd, hnode);
@@ -1504,7 +1513,7 @@ cfs_hash_cond_del_locked(struct cfs_hash *hs, struct cfs_hash_bd *bd,
 void
 cfs_hash_cond_del(struct cfs_hash *hs, cfs_hash_cond_opt_cb_t func, void *data)
 {
-       cfs_hash_cond_arg_t arg = {
+       struct cfs_hash_cond_arg arg = {
                .func   = func,
                .arg    = data,
        };
@@ -1623,8 +1632,12 @@ cfs_hash_for_each_relax(struct cfs_hash *hs, cfs_hash_for_each_cb_t func,
                                if (rc) /* callback wants to break iteration */
                                        break;
                        }
+                       if (rc) /* callback wants to break iteration */
+                               break;
                }
                cfs_hash_bd_unlock(hs, &bd, 0);
+               if (rc) /* callback wants to break iteration */
+                       break;
        }
        cfs_hash_unlock(hs, 0);