2 * 2002-10-18 written by Jim Houston jim.houston@ccur.com
3 * Copyright (C) 2002 by Concurrent Computer Corporation
4 * Distributed under the GNU GPL license version 2.
6 * Modified by George Anzinger to reuse immediately and to use
7 * find bit instructions. Also removed _irq on spinlocks.
9 * Modified by Nadia Derbey to make it RCU safe.
11 * Small id to pointer translation service.
13 * It uses a radix tree like structure as a sparse array indexed
14 * by the id to obtain the pointer. The bitmap makes allocating
17 * You call it to allocate an id (an int) an associate with that id a
18 * pointer or what ever, we treat it as a (void *). You can pass this
19 * id to a user for him to pass back at a later time. You then pass
20 * that id to this code and it returns your pointer.
23 #ifndef TEST // to test in user space...
24 #include <linux/slab.h>
25 #include <linux/init.h>
26 #include <linux/export.h>
28 #include <linux/err.h>
29 #include <linux/string.h>
30 #include <linux/idr.h>
31 #include <linux/spinlock.h>
32 #include <linux/percpu.h>
33 #include <linux/locallock.h>
35 #define MAX_IDR_SHIFT (sizeof(int) * 8 - 1)
36 #define MAX_IDR_BIT (1U << MAX_IDR_SHIFT)
38 /* Leave the possibility of an incomplete final layer */
39 #define MAX_IDR_LEVEL ((MAX_IDR_SHIFT + IDR_BITS - 1) / IDR_BITS)
41 /* Number of id_layer structs to leave in free list */
42 #define MAX_IDR_FREE (MAX_IDR_LEVEL * 2)
44 static struct kmem_cache *idr_layer_cache;
45 static DEFINE_PER_CPU(struct idr_layer *, idr_preload_head);
46 static DEFINE_PER_CPU(int, idr_preload_cnt);
47 static DEFINE_SPINLOCK(simple_ida_lock);
49 /* the maximum ID which can be allocated given idr->layers */
50 static int idr_max(int layers)
52 int bits = min_t(int, layers * IDR_BITS, MAX_IDR_SHIFT);
54 return (1 << bits) - 1;
58 * Prefix mask for an idr_layer at @layer. For layer 0, the prefix mask is
59 * all bits except for the lower IDR_BITS. For layer 1, 2 * IDR_BITS, and
62 static int idr_layer_prefix_mask(int layer)
64 return ~idr_max(layer + 1);
67 static struct idr_layer *get_from_free_list(struct idr *idp)
72 spin_lock_irqsave(&idp->lock, flags);
73 if ((p = idp->id_free)) {
74 idp->id_free = p->ary[0];
78 spin_unlock_irqrestore(&idp->lock, flags);
83 * idr_layer_alloc - allocate a new idr_layer
84 * @gfp_mask: allocation mask
85 * @layer_idr: optional idr to allocate from
87 * If @layer_idr is %NULL, directly allocate one using @gfp_mask or fetch
88 * one from the per-cpu preload buffer. If @layer_idr is not %NULL, fetch
89 * an idr_layer from @idr->id_free.
91 * @layer_idr is to maintain backward compatibility with the old alloc
92 * interface - idr_pre_get() and idr_get_new*() - and will be removed
93 * together with per-pool preload buffer.
95 static struct idr_layer *idr_layer_alloc(gfp_t gfp_mask, struct idr *layer_idr)
97 struct idr_layer *new;
99 /* this is the old path, bypass to get_from_free_list() */
101 return get_from_free_list(layer_idr);
104 * Try to allocate directly from kmem_cache. We want to try this
105 * before preload buffer; otherwise, non-preloading idr_alloc()
106 * users will end up taking advantage of preloading ones. As the
107 * following is allowed to fail for preloaded cases, suppress
110 new = kmem_cache_zalloc(idr_layer_cache, gfp_mask | __GFP_NOWARN);
115 * Try to fetch one from the per-cpu preload buffer if in process
116 * context. See idr_preload() for details.
118 if (!in_interrupt()) {
120 new = __this_cpu_read(idr_preload_head);
122 __this_cpu_write(idr_preload_head, new->ary[0]);
123 __this_cpu_dec(idr_preload_cnt);
132 * Both failed. Try kmem_cache again w/o adding __GFP_NOWARN so
133 * that memory allocation failure warning is printed as intended.
135 return kmem_cache_zalloc(idr_layer_cache, gfp_mask);
138 static void idr_layer_rcu_free(struct rcu_head *head)
140 struct idr_layer *layer;
142 layer = container_of(head, struct idr_layer, rcu_head);
143 kmem_cache_free(idr_layer_cache, layer);
146 static inline void free_layer(struct idr *idr, struct idr_layer *p)
149 RCU_INIT_POINTER(idr->hint, NULL);
150 call_rcu(&p->rcu_head, idr_layer_rcu_free);
153 /* only called when idp->lock is held */
154 static void __move_to_free_list(struct idr *idp, struct idr_layer *p)
156 p->ary[0] = idp->id_free;
161 static void move_to_free_list(struct idr *idp, struct idr_layer *p)
166 * Depends on the return element being zeroed.
168 spin_lock_irqsave(&idp->lock, flags);
169 __move_to_free_list(idp, p);
170 spin_unlock_irqrestore(&idp->lock, flags);
173 static void idr_mark_full(struct idr_layer **pa, int id)
175 struct idr_layer *p = pa[0];
178 __set_bit(id & IDR_MASK, p->bitmap);
180 * If this layer is full mark the bit in the layer above to
181 * show that this part of the radix tree is full. This may
182 * complete the layer above and require walking up the radix
185 while (bitmap_full(p->bitmap, IDR_SIZE)) {
189 __set_bit((id & IDR_MASK), p->bitmap);
193 static int __idr_pre_get(struct idr *idp, gfp_t gfp_mask)
195 while (idp->id_free_cnt < MAX_IDR_FREE) {
196 struct idr_layer *new;
197 new = kmem_cache_zalloc(idr_layer_cache, gfp_mask);
200 move_to_free_list(idp, new);
206 * sub_alloc - try to allocate an id without growing the tree depth
208 * @starting_id: id to start search at
209 * @pa: idr_layer[MAX_IDR_LEVEL] used as backtrack buffer
210 * @gfp_mask: allocation mask for idr_layer_alloc()
211 * @layer_idr: optional idr passed to idr_layer_alloc()
213 * Allocate an id in range [@starting_id, INT_MAX] from @idp without
214 * growing its depth. Returns
216 * the allocated id >= 0 if successful,
217 * -EAGAIN if the tree needs to grow for allocation to succeed,
218 * -ENOSPC if the id space is exhausted,
219 * -ENOMEM if more idr_layers need to be allocated.
221 static int sub_alloc(struct idr *idp, int *starting_id, struct idr_layer **pa,
222 gfp_t gfp_mask, struct idr *layer_idr)
225 struct idr_layer *p, *new;
235 * We run around this while until we reach the leaf node...
237 n = (id >> (IDR_BITS*l)) & IDR_MASK;
238 m = find_next_zero_bit(p->bitmap, IDR_SIZE, n);
240 /* no space available go back to previous layer. */
243 id = (id | ((1 << (IDR_BITS * l)) - 1)) + 1;
245 /* if already at the top layer, we need to grow */
246 if (id > idr_max(idp->layers)) {
253 /* If we need to go up one layer, continue the
254 * loop; otherwise, restart from the top.
256 sh = IDR_BITS * (l + 1);
257 if (oid >> sh == id >> sh)
264 id = ((id >> sh) ^ n ^ m) << sh;
266 if ((id >= MAX_IDR_BIT) || (id < 0))
271 * Create the layer below if it is missing.
274 new = idr_layer_alloc(gfp_mask, layer_idr);
278 new->prefix = id & idr_layer_prefix_mask(new->layer);
279 rcu_assign_pointer(p->ary[m], new);
290 static int idr_get_empty_slot(struct idr *idp, int starting_id,
291 struct idr_layer **pa, gfp_t gfp_mask,
292 struct idr *layer_idr)
294 struct idr_layer *p, *new;
301 layers = idp->layers;
303 if (!(p = idr_layer_alloc(gfp_mask, layer_idr)))
309 * Add a new layer to the top of the tree if the requested
310 * id is larger than the currently allocated space.
312 while (id > idr_max(layers)) {
315 /* special case: if the tree is currently empty,
316 * then we grow the tree by moving the top node
320 WARN_ON_ONCE(p->prefix);
323 if (!(new = idr_layer_alloc(gfp_mask, layer_idr))) {
325 * The allocation failed. If we built part of
326 * the structure tear it down.
328 spin_lock_irqsave(&idp->lock, flags);
329 for (new = p; p && p != idp->top; new = p) {
333 bitmap_clear(new->bitmap, 0, IDR_SIZE);
334 __move_to_free_list(idp, new);
336 spin_unlock_irqrestore(&idp->lock, flags);
341 new->layer = layers-1;
342 new->prefix = id & idr_layer_prefix_mask(new->layer);
343 if (bitmap_full(p->bitmap, IDR_SIZE))
344 __set_bit(0, new->bitmap);
347 rcu_assign_pointer(idp->top, p);
348 idp->layers = layers;
349 v = sub_alloc(idp, &id, pa, gfp_mask, layer_idr);
356 * @id and @pa are from a successful allocation from idr_get_empty_slot().
357 * Install the user pointer @ptr and mark the slot full.
359 static void idr_fill_slot(struct idr *idr, void *ptr, int id,
360 struct idr_layer **pa)
362 /* update hint used for lookup, cleared from free_layer() */
363 rcu_assign_pointer(idr->hint, pa[0]);
365 rcu_assign_pointer(pa[0]->ary[id & IDR_MASK], (struct idr_layer *)ptr);
367 idr_mark_full(pa, id);
370 #ifdef CONFIG_PREEMPT_RT_FULL
371 static DEFINE_LOCAL_IRQ_LOCK(idr_lock);
373 static inline void idr_preload_lock(void)
375 local_lock(idr_lock);
378 static inline void idr_preload_unlock(void)
380 local_unlock(idr_lock);
383 void idr_preload_end(void)
385 idr_preload_unlock();
387 EXPORT_SYMBOL(idr_preload_end);
389 static inline void idr_preload_lock(void)
394 static inline void idr_preload_unlock(void)
401 * idr_preload - preload for idr_alloc()
402 * @gfp_mask: allocation mask to use for preloading
404 * Preload per-cpu layer buffer for idr_alloc(). Can only be used from
405 * process context and each idr_preload() invocation should be matched with
406 * idr_preload_end(). Note that preemption is disabled while preloaded.
408 * The first idr_alloc() in the preloaded section can be treated as if it
409 * were invoked with @gfp_mask used for preloading. This allows using more
410 * permissive allocation masks for idrs protected by spinlocks.
412 * For example, if idr_alloc() below fails, the failure can be treated as
413 * if idr_alloc() were called with GFP_KERNEL rather than GFP_NOWAIT.
415 * idr_preload(GFP_KERNEL);
418 * id = idr_alloc(idr, ptr, start, end, GFP_NOWAIT);
425 void idr_preload(gfp_t gfp_mask)
428 * Consuming preload buffer from non-process context breaks preload
429 * allocation guarantee. Disallow usage from those contexts.
431 WARN_ON_ONCE(in_interrupt());
432 might_sleep_if(gfp_mask & __GFP_WAIT);
437 * idr_alloc() is likely to succeed w/o full idr_layer buffer and
438 * return value from idr_alloc() needs to be checked for failure
439 * anyway. Silently give up if allocation fails. The caller can
440 * treat failures from idr_alloc() as if idr_alloc() were called
441 * with @gfp_mask which should be enough.
443 while (__this_cpu_read(idr_preload_cnt) < MAX_IDR_FREE) {
444 struct idr_layer *new;
446 idr_preload_unlock();
447 new = kmem_cache_zalloc(idr_layer_cache, gfp_mask);
452 /* link the new one to per-cpu preload list */
453 new->ary[0] = __this_cpu_read(idr_preload_head);
454 __this_cpu_write(idr_preload_head, new);
455 __this_cpu_inc(idr_preload_cnt);
458 EXPORT_SYMBOL(idr_preload);
461 * idr_alloc - allocate new idr entry
462 * @idr: the (initialized) idr
463 * @ptr: pointer to be associated with the new id
464 * @start: the minimum id (inclusive)
465 * @end: the maximum id (exclusive, <= 0 for max)
466 * @gfp_mask: memory allocation flags
468 * Allocate an id in [start, end) and associate it with @ptr. If no ID is
469 * available in the specified range, returns -ENOSPC. On memory allocation
470 * failure, returns -ENOMEM.
472 * Note that @end is treated as max when <= 0. This is to always allow
473 * using @start + N as @end as long as N is inside integer range.
475 * The user is responsible for exclusively synchronizing all operations
476 * which may modify @idr. However, read-only accesses such as idr_find()
477 * or iteration can be performed under RCU read lock provided the user
478 * destroys @ptr in RCU-safe way after removal from idr.
480 int idr_alloc(struct idr *idr, void *ptr, int start, int end, gfp_t gfp_mask)
482 int max = end > 0 ? end - 1 : INT_MAX; /* inclusive upper limit */
483 struct idr_layer *pa[MAX_IDR_LEVEL + 1];
486 might_sleep_if(gfp_mask & __GFP_WAIT);
489 if (WARN_ON_ONCE(start < 0))
491 if (unlikely(max < start))
495 id = idr_get_empty_slot(idr, start, pa, gfp_mask, NULL);
496 if (unlikely(id < 0))
498 if (unlikely(id > max))
501 idr_fill_slot(idr, ptr, id, pa);
504 EXPORT_SYMBOL_GPL(idr_alloc);
507 * idr_alloc_cyclic - allocate new idr entry in a cyclical fashion
508 * @idr: the (initialized) idr
509 * @ptr: pointer to be associated with the new id
510 * @start: the minimum id (inclusive)
511 * @end: the maximum id (exclusive, <= 0 for max)
512 * @gfp_mask: memory allocation flags
514 * Essentially the same as idr_alloc, but prefers to allocate progressively
515 * higher ids if it can. If the "cur" counter wraps, then it will start again
516 * at the "start" end of the range and allocate one that has already been used.
518 int idr_alloc_cyclic(struct idr *idr, void *ptr, int start, int end,
523 id = idr_alloc(idr, ptr, max(start, idr->cur), end, gfp_mask);
525 id = idr_alloc(idr, ptr, start, end, gfp_mask);
531 EXPORT_SYMBOL(idr_alloc_cyclic);
533 static void idr_remove_warning(int id)
535 WARN(1, "idr_remove called for id=%d which is not allocated.\n", id);
538 static void sub_remove(struct idr *idp, int shift, int id)
540 struct idr_layer *p = idp->top;
541 struct idr_layer **pa[MAX_IDR_LEVEL + 1];
542 struct idr_layer ***paa = &pa[0];
543 struct idr_layer *to_free;
549 while ((shift > 0) && p) {
550 n = (id >> shift) & IDR_MASK;
551 __clear_bit(n, p->bitmap);
557 if (likely(p != NULL && test_bit(n, p->bitmap))) {
558 __clear_bit(n, p->bitmap);
559 RCU_INIT_POINTER(p->ary[n], NULL);
561 while(*paa && ! --((**paa)->count)){
563 free_layer(idp, to_free);
570 free_layer(idp, to_free);
572 idr_remove_warning(id);
576 * idr_remove - remove the given id and free its slot
580 void idr_remove(struct idr *idp, int id)
583 struct idr_layer *to_free;
588 if (id > idr_max(idp->layers)) {
589 idr_remove_warning(id);
593 sub_remove(idp, (idp->layers - 1) * IDR_BITS, id);
594 if (idp->top && idp->top->count == 1 && (idp->layers > 1) &&
597 * Single child at leftmost slot: we can shrink the tree.
598 * This level is not needed anymore since when layers are
599 * inserted, they are inserted at the top of the existing
603 p = idp->top->ary[0];
604 rcu_assign_pointer(idp->top, p);
607 bitmap_clear(to_free->bitmap, 0, IDR_SIZE);
608 free_layer(idp, to_free);
611 EXPORT_SYMBOL(idr_remove);
613 static void __idr_remove_all(struct idr *idp)
618 struct idr_layer *pa[MAX_IDR_LEVEL + 1];
619 struct idr_layer **paa = &pa[0];
621 n = idp->layers * IDR_BITS;
623 RCU_INIT_POINTER(idp->top, NULL);
624 max = idr_max(idp->layers);
627 while (id >= 0 && id <= max) {
629 while (n > IDR_BITS && p) {
631 p = p->ary[(id >> n) & IDR_MASK];
637 /* Get the highest bit that the above add changed from 0->1. */
638 while (n < fls(id ^ bt_mask)) {
640 free_layer(idp, *paa);
649 * idr_destroy - release all cached layers within an idr tree
652 * Free all id mappings and all idp_layers. After this function, @idp is
653 * completely unused and can be freed / recycled. The caller is
654 * responsible for ensuring that no one else accesses @idp during or after
657 * A typical clean-up sequence for objects stored in an idr tree will use
658 * idr_for_each() to free all objects, if necessary, then idr_destroy() to
659 * free up the id mappings and cached idr_layers.
661 void idr_destroy(struct idr *idp)
663 __idr_remove_all(idp);
665 while (idp->id_free_cnt) {
666 struct idr_layer *p = get_from_free_list(idp);
667 kmem_cache_free(idr_layer_cache, p);
670 EXPORT_SYMBOL(idr_destroy);
672 void *idr_find_slowpath(struct idr *idp, int id)
680 p = rcu_dereference_raw(idp->top);
683 n = (p->layer+1) * IDR_BITS;
685 if (id > idr_max(p->layer + 1))
691 BUG_ON(n != p->layer*IDR_BITS);
692 p = rcu_dereference_raw(p->ary[(id >> n) & IDR_MASK]);
696 EXPORT_SYMBOL(idr_find_slowpath);
699 * idr_for_each - iterate through all stored pointers
701 * @fn: function to be called for each pointer
702 * @data: data passed back to callback function
704 * Iterate over the pointers registered with the given idr. The
705 * callback function will be called for each pointer currently
706 * registered, passing the id, the pointer and the data pointer passed
707 * to this function. It is not safe to modify the idr tree while in
708 * the callback, so functions such as idr_get_new and idr_remove are
711 * We check the return of @fn each time. If it returns anything other
712 * than %0, we break out and return that value.
714 * The caller must serialize idr_for_each() vs idr_get_new() and idr_remove().
716 int idr_for_each(struct idr *idp,
717 int (*fn)(int id, void *p, void *data), void *data)
719 int n, id, max, error = 0;
721 struct idr_layer *pa[MAX_IDR_LEVEL + 1];
722 struct idr_layer **paa = &pa[0];
724 n = idp->layers * IDR_BITS;
725 *paa = rcu_dereference_raw(idp->top);
726 max = idr_max(idp->layers);
729 while (id >= 0 && id <= max) {
733 p = rcu_dereference_raw(p->ary[(id >> n) & IDR_MASK]);
738 error = fn(id, (void *)p, data);
744 while (n < fls(id)) {
752 EXPORT_SYMBOL(idr_for_each);
755 * idr_get_next - lookup next object of id to given id.
757 * @nextidp: pointer to lookup key
759 * Returns pointer to registered object with id, which is next number to
760 * given id. After being looked up, *@nextidp will be updated for the next
763 * This function can be called under rcu_read_lock(), given that the leaf
764 * pointers lifetimes are correctly managed.
766 void *idr_get_next(struct idr *idp, int *nextidp)
768 struct idr_layer *p, *pa[MAX_IDR_LEVEL + 1];
769 struct idr_layer **paa = &pa[0];
774 p = *paa = rcu_dereference_raw(idp->top);
777 n = (p->layer + 1) * IDR_BITS;
778 max = idr_max(p->layer + 1);
780 while (id >= 0 && id <= max) {
784 p = rcu_dereference_raw(p->ary[(id >> n) & IDR_MASK]);
794 * Proceed to the next layer at the current level. Unlike
795 * idr_for_each(), @id isn't guaranteed to be aligned to
796 * layer boundary at this point and adding 1 << n may
797 * incorrectly skip IDs. Make sure we jump to the
798 * beginning of the next layer using round_up().
800 id = round_up(id + 1, 1 << n);
801 while (n < fls(id)) {
808 EXPORT_SYMBOL(idr_get_next);
812 * idr_replace - replace pointer for given id
814 * @ptr: pointer you want associated with the id
817 * Replace the pointer registered with an id and return the old value.
818 * A %-ENOENT return indicates that @id was not found.
819 * A %-EINVAL return indicates that @id was not within valid constraints.
821 * The caller must serialize with writers.
823 void *idr_replace(struct idr *idp, void *ptr, int id)
826 struct idr_layer *p, *old_p;
829 return ERR_PTR(-EINVAL);
833 return ERR_PTR(-ENOENT);
835 if (id > idr_max(p->layer + 1))
836 return ERR_PTR(-ENOENT);
838 n = p->layer * IDR_BITS;
839 while ((n > 0) && p) {
840 p = p->ary[(id >> n) & IDR_MASK];
845 if (unlikely(p == NULL || !test_bit(n, p->bitmap)))
846 return ERR_PTR(-ENOENT);
849 rcu_assign_pointer(p->ary[n], ptr);
853 EXPORT_SYMBOL(idr_replace);
855 void __init idr_init_cache(void)
857 idr_layer_cache = kmem_cache_create("idr_layer_cache",
858 sizeof(struct idr_layer), 0, SLAB_PANIC, NULL);
862 * idr_init - initialize idr handle
865 * This function is use to set up the handle (@idp) that you will pass
866 * to the rest of the functions.
868 void idr_init(struct idr *idp)
870 memset(idp, 0, sizeof(struct idr));
871 spin_lock_init(&idp->lock);
873 EXPORT_SYMBOL(idr_init);
875 static int idr_has_entry(int id, void *p, void *data)
880 bool idr_is_empty(struct idr *idp)
882 return !idr_for_each(idp, idr_has_entry, NULL);
884 EXPORT_SYMBOL(idr_is_empty);
887 * DOC: IDA description
888 * IDA - IDR based ID allocator
890 * This is id allocator without id -> pointer translation. Memory
891 * usage is much lower than full blown idr because each id only
892 * occupies a bit. ida uses a custom leaf node which contains
893 * IDA_BITMAP_BITS slots.
895 * 2007-04-25 written by Tejun Heo <htejun@gmail.com>
898 static void free_bitmap(struct ida *ida, struct ida_bitmap *bitmap)
902 if (!ida->free_bitmap) {
903 spin_lock_irqsave(&ida->idr.lock, flags);
904 if (!ida->free_bitmap) {
905 ida->free_bitmap = bitmap;
908 spin_unlock_irqrestore(&ida->idr.lock, flags);
915 * ida_pre_get - reserve resources for ida allocation
917 * @gfp_mask: memory allocation flag
919 * This function should be called prior to locking and calling the
920 * following function. It preallocates enough memory to satisfy the
921 * worst possible allocation.
923 * If the system is REALLY out of memory this function returns %0,
926 int ida_pre_get(struct ida *ida, gfp_t gfp_mask)
928 /* allocate idr_layers */
929 if (!__idr_pre_get(&ida->idr, gfp_mask))
932 /* allocate free_bitmap */
933 if (!ida->free_bitmap) {
934 struct ida_bitmap *bitmap;
936 bitmap = kmalloc(sizeof(struct ida_bitmap), gfp_mask);
940 free_bitmap(ida, bitmap);
945 EXPORT_SYMBOL(ida_pre_get);
948 * ida_get_new_above - allocate new ID above or equal to a start id
950 * @starting_id: id to start search at
951 * @p_id: pointer to the allocated handle
953 * Allocate new ID above or equal to @starting_id. It should be called
954 * with any required locks.
956 * If memory is required, it will return %-EAGAIN, you should unlock
957 * and go back to the ida_pre_get() call. If the ida is full, it will
960 * @p_id returns a value in the range @starting_id ... %0x7fffffff.
962 int ida_get_new_above(struct ida *ida, int starting_id, int *p_id)
964 struct idr_layer *pa[MAX_IDR_LEVEL + 1];
965 struct ida_bitmap *bitmap;
967 int idr_id = starting_id / IDA_BITMAP_BITS;
968 int offset = starting_id % IDA_BITMAP_BITS;
972 /* get vacant slot */
973 t = idr_get_empty_slot(&ida->idr, idr_id, pa, 0, &ida->idr);
975 return t == -ENOMEM ? -EAGAIN : t;
977 if (t * IDA_BITMAP_BITS >= MAX_IDR_BIT)
984 /* if bitmap isn't there, create a new one */
985 bitmap = (void *)pa[0]->ary[idr_id & IDR_MASK];
987 spin_lock_irqsave(&ida->idr.lock, flags);
988 bitmap = ida->free_bitmap;
989 ida->free_bitmap = NULL;
990 spin_unlock_irqrestore(&ida->idr.lock, flags);
995 memset(bitmap, 0, sizeof(struct ida_bitmap));
996 rcu_assign_pointer(pa[0]->ary[idr_id & IDR_MASK],
1001 /* lookup for empty slot */
1002 t = find_next_zero_bit(bitmap->bitmap, IDA_BITMAP_BITS, offset);
1003 if (t == IDA_BITMAP_BITS) {
1004 /* no empty slot after offset, continue to the next chunk */
1010 id = idr_id * IDA_BITMAP_BITS + t;
1011 if (id >= MAX_IDR_BIT)
1014 __set_bit(t, bitmap->bitmap);
1015 if (++bitmap->nr_busy == IDA_BITMAP_BITS)
1016 idr_mark_full(pa, idr_id);
1020 /* Each leaf node can handle nearly a thousand slots and the
1021 * whole idea of ida is to have small memory foot print.
1022 * Throw away extra resources one by one after each successful
1025 if (ida->idr.id_free_cnt || ida->free_bitmap) {
1026 struct idr_layer *p = get_from_free_list(&ida->idr);
1028 kmem_cache_free(idr_layer_cache, p);
1033 EXPORT_SYMBOL(ida_get_new_above);
1036 * ida_remove - remove the given ID
1040 void ida_remove(struct ida *ida, int id)
1042 struct idr_layer *p = ida->idr.top;
1043 int shift = (ida->idr.layers - 1) * IDR_BITS;
1044 int idr_id = id / IDA_BITMAP_BITS;
1045 int offset = id % IDA_BITMAP_BITS;
1047 struct ida_bitmap *bitmap;
1049 if (idr_id > idr_max(ida->idr.layers))
1052 /* clear full bits while looking up the leaf idr_layer */
1053 while ((shift > 0) && p) {
1054 n = (idr_id >> shift) & IDR_MASK;
1055 __clear_bit(n, p->bitmap);
1063 n = idr_id & IDR_MASK;
1064 __clear_bit(n, p->bitmap);
1066 bitmap = (void *)p->ary[n];
1067 if (!bitmap || !test_bit(offset, bitmap->bitmap))
1070 /* update bitmap and remove it if empty */
1071 __clear_bit(offset, bitmap->bitmap);
1072 if (--bitmap->nr_busy == 0) {
1073 __set_bit(n, p->bitmap); /* to please idr_remove() */
1074 idr_remove(&ida->idr, idr_id);
1075 free_bitmap(ida, bitmap);
1081 WARN(1, "ida_remove called for id=%d which is not allocated.\n", id);
1083 EXPORT_SYMBOL(ida_remove);
1086 * ida_destroy - release all cached layers within an ida tree
1089 void ida_destroy(struct ida *ida)
1091 idr_destroy(&ida->idr);
1092 kfree(ida->free_bitmap);
1094 EXPORT_SYMBOL(ida_destroy);
1097 * ida_simple_get - get a new id.
1098 * @ida: the (initialized) ida.
1099 * @start: the minimum id (inclusive, < 0x8000000)
1100 * @end: the maximum id (exclusive, < 0x8000000 or 0)
1101 * @gfp_mask: memory allocation flags
1103 * Allocates an id in the range start <= id < end, or returns -ENOSPC.
1104 * On memory allocation failure, returns -ENOMEM.
1106 * Use ida_simple_remove() to get rid of an id.
1108 int ida_simple_get(struct ida *ida, unsigned int start, unsigned int end,
1113 unsigned long flags;
1115 BUG_ON((int)start < 0);
1116 BUG_ON((int)end < 0);
1121 BUG_ON(end < start);
1126 if (!ida_pre_get(ida, gfp_mask))
1129 spin_lock_irqsave(&simple_ida_lock, flags);
1130 ret = ida_get_new_above(ida, start, &id);
1133 ida_remove(ida, id);
1139 spin_unlock_irqrestore(&simple_ida_lock, flags);
1141 if (unlikely(ret == -EAGAIN))
1146 EXPORT_SYMBOL(ida_simple_get);
1149 * ida_simple_remove - remove an allocated id.
1150 * @ida: the (initialized) ida.
1151 * @id: the id returned by ida_simple_get.
1153 void ida_simple_remove(struct ida *ida, unsigned int id)
1155 unsigned long flags;
1157 BUG_ON((int)id < 0);
1158 spin_lock_irqsave(&simple_ida_lock, flags);
1159 ida_remove(ida, id);
1160 spin_unlock_irqrestore(&simple_ida_lock, flags);
1162 EXPORT_SYMBOL(ida_simple_remove);
1165 * ida_init - initialize ida handle
1168 * This function is use to set up the handle (@ida) that you will pass
1169 * to the rest of the functions.
1171 void ida_init(struct ida *ida)
1173 memset(ida, 0, sizeof(struct ida));
1174 idr_init(&ida->idr);
1177 EXPORT_SYMBOL(ida_init);