2 * Copyright (c) 2006 Oracle. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <linux/kernel.h>
34 #include <linux/slab.h>
35 #include <linux/rculist.h>
36 #include <linux/llist.h>
37 #include <linux/delay.h>
42 static DEFINE_PER_CPU(unsigned long, clean_list_grace);
43 #define CLEAN_LIST_BUSY_BIT 0
46 * This is stored as mr->r_trans_private.
49 struct rds_ib_device *device;
50 struct rds_ib_mr_pool *pool;
53 struct llist_node llnode;
55 /* unmap_list is for freeing */
56 struct list_head unmap_list;
57 unsigned int remap_count;
59 struct scatterlist *sg;
66 * Our own little FMR pool
68 struct rds_ib_mr_pool {
69 unsigned int pool_type;
70 struct mutex flush_lock; /* serialize fmr invalidate */
71 struct delayed_work flush_worker; /* flush worker */
73 atomic_t item_count; /* total # of MRs */
74 atomic_t dirty_count; /* # dirty of MRs */
76 struct llist_head drop_list; /* MRs that have reached their max_maps limit */
77 struct llist_head free_list; /* unused MRs */
78 struct llist_head clean_list; /* global unused & unamapped MRs */
79 wait_queue_head_t flush_wait;
81 atomic_t free_pinned; /* memory pinned by free MRs */
82 unsigned long max_items;
83 unsigned long max_items_soft;
84 unsigned long max_free_pinned;
85 struct ib_fmr_attr fmr_attr;
88 static struct workqueue_struct *rds_ib_fmr_wq;
90 int rds_ib_fmr_init(void)
92 rds_ib_fmr_wq = create_workqueue("rds_fmr_flushd");
98 /* By the time this is called all the IB devices should have been torn down and
99 * had their pools freed. As each pool is freed its work struct is waited on,
100 * so the pool flushing work queue should be idle by the time we get here.
102 void rds_ib_fmr_exit(void)
104 destroy_workqueue(rds_ib_fmr_wq);
107 static int rds_ib_flush_mr_pool(struct rds_ib_mr_pool *pool, int free_all, struct rds_ib_mr **);
108 static void rds_ib_teardown_mr(struct rds_ib_mr *ibmr);
109 static void rds_ib_mr_pool_flush_worker(struct work_struct *work);
111 static struct rds_ib_device *rds_ib_get_device(__be32 ipaddr)
113 struct rds_ib_device *rds_ibdev;
114 struct rds_ib_ipaddr *i_ipaddr;
117 list_for_each_entry_rcu(rds_ibdev, &rds_ib_devices, list) {
118 list_for_each_entry_rcu(i_ipaddr, &rds_ibdev->ipaddr_list, list) {
119 if (i_ipaddr->ipaddr == ipaddr) {
120 atomic_inc(&rds_ibdev->refcount);
131 static int rds_ib_add_ipaddr(struct rds_ib_device *rds_ibdev, __be32 ipaddr)
133 struct rds_ib_ipaddr *i_ipaddr;
135 i_ipaddr = kmalloc(sizeof *i_ipaddr, GFP_KERNEL);
139 i_ipaddr->ipaddr = ipaddr;
141 spin_lock_irq(&rds_ibdev->spinlock);
142 list_add_tail_rcu(&i_ipaddr->list, &rds_ibdev->ipaddr_list);
143 spin_unlock_irq(&rds_ibdev->spinlock);
148 static void rds_ib_remove_ipaddr(struct rds_ib_device *rds_ibdev, __be32 ipaddr)
150 struct rds_ib_ipaddr *i_ipaddr;
151 struct rds_ib_ipaddr *to_free = NULL;
154 spin_lock_irq(&rds_ibdev->spinlock);
155 list_for_each_entry_rcu(i_ipaddr, &rds_ibdev->ipaddr_list, list) {
156 if (i_ipaddr->ipaddr == ipaddr) {
157 list_del_rcu(&i_ipaddr->list);
162 spin_unlock_irq(&rds_ibdev->spinlock);
165 kfree_rcu(to_free, rcu);
168 int rds_ib_update_ipaddr(struct rds_ib_device *rds_ibdev, __be32 ipaddr)
170 struct rds_ib_device *rds_ibdev_old;
172 rds_ibdev_old = rds_ib_get_device(ipaddr);
174 return rds_ib_add_ipaddr(rds_ibdev, ipaddr);
176 if (rds_ibdev_old != rds_ibdev) {
177 rds_ib_remove_ipaddr(rds_ibdev_old, ipaddr);
178 rds_ib_dev_put(rds_ibdev_old);
179 return rds_ib_add_ipaddr(rds_ibdev, ipaddr);
181 rds_ib_dev_put(rds_ibdev_old);
186 void rds_ib_add_conn(struct rds_ib_device *rds_ibdev, struct rds_connection *conn)
188 struct rds_ib_connection *ic = conn->c_transport_data;
190 /* conn was previously on the nodev_conns_list */
191 spin_lock_irq(&ib_nodev_conns_lock);
192 BUG_ON(list_empty(&ib_nodev_conns));
193 BUG_ON(list_empty(&ic->ib_node));
194 list_del(&ic->ib_node);
196 spin_lock(&rds_ibdev->spinlock);
197 list_add_tail(&ic->ib_node, &rds_ibdev->conn_list);
198 spin_unlock(&rds_ibdev->spinlock);
199 spin_unlock_irq(&ib_nodev_conns_lock);
201 ic->rds_ibdev = rds_ibdev;
202 atomic_inc(&rds_ibdev->refcount);
205 void rds_ib_remove_conn(struct rds_ib_device *rds_ibdev, struct rds_connection *conn)
207 struct rds_ib_connection *ic = conn->c_transport_data;
209 /* place conn on nodev_conns_list */
210 spin_lock(&ib_nodev_conns_lock);
212 spin_lock_irq(&rds_ibdev->spinlock);
213 BUG_ON(list_empty(&ic->ib_node));
214 list_del(&ic->ib_node);
215 spin_unlock_irq(&rds_ibdev->spinlock);
217 list_add_tail(&ic->ib_node, &ib_nodev_conns);
219 spin_unlock(&ib_nodev_conns_lock);
221 ic->rds_ibdev = NULL;
222 rds_ib_dev_put(rds_ibdev);
225 void rds_ib_destroy_nodev_conns(void)
227 struct rds_ib_connection *ic, *_ic;
230 /* avoid calling conn_destroy with irqs off */
231 spin_lock_irq(&ib_nodev_conns_lock);
232 list_splice(&ib_nodev_conns, &tmp_list);
233 spin_unlock_irq(&ib_nodev_conns_lock);
235 list_for_each_entry_safe(ic, _ic, &tmp_list, ib_node)
236 rds_conn_destroy(ic->conn);
239 struct rds_ib_mr_pool *rds_ib_create_mr_pool(struct rds_ib_device *rds_ibdev,
242 struct rds_ib_mr_pool *pool;
244 pool = kzalloc(sizeof(*pool), GFP_KERNEL);
246 return ERR_PTR(-ENOMEM);
248 pool->pool_type = pool_type;
249 init_llist_head(&pool->free_list);
250 init_llist_head(&pool->drop_list);
251 init_llist_head(&pool->clean_list);
252 mutex_init(&pool->flush_lock);
253 init_waitqueue_head(&pool->flush_wait);
254 INIT_DELAYED_WORK(&pool->flush_worker, rds_ib_mr_pool_flush_worker);
256 if (pool_type == RDS_IB_MR_1M_POOL) {
257 /* +1 allows for unaligned MRs */
258 pool->fmr_attr.max_pages = RDS_FMR_1M_MSG_SIZE + 1;
259 pool->max_items = RDS_FMR_1M_POOL_SIZE;
261 /* pool_type == RDS_IB_MR_8K_POOL */
262 pool->fmr_attr.max_pages = RDS_FMR_8K_MSG_SIZE + 1;
263 pool->max_items = RDS_FMR_8K_POOL_SIZE;
266 pool->max_free_pinned = pool->max_items * pool->fmr_attr.max_pages / 4;
267 pool->fmr_attr.max_maps = rds_ibdev->fmr_max_remaps;
268 pool->fmr_attr.page_shift = PAGE_SHIFT;
269 pool->max_items_soft = rds_ibdev->max_fmrs * 3 / 4;
274 void rds_ib_get_mr_info(struct rds_ib_device *rds_ibdev, struct rds_info_rdma_connection *iinfo)
276 struct rds_ib_mr_pool *pool_1m = rds_ibdev->mr_1m_pool;
278 iinfo->rdma_mr_max = pool_1m->max_items;
279 iinfo->rdma_mr_size = pool_1m->fmr_attr.max_pages;
282 void rds_ib_destroy_mr_pool(struct rds_ib_mr_pool *pool)
284 cancel_delayed_work_sync(&pool->flush_worker);
285 rds_ib_flush_mr_pool(pool, 1, NULL);
286 WARN_ON(atomic_read(&pool->item_count));
287 WARN_ON(atomic_read(&pool->free_pinned));
291 static inline struct rds_ib_mr *rds_ib_reuse_fmr(struct rds_ib_mr_pool *pool)
293 struct rds_ib_mr *ibmr = NULL;
294 struct llist_node *ret;
298 flag = this_cpu_ptr(&clean_list_grace);
299 set_bit(CLEAN_LIST_BUSY_BIT, flag);
300 ret = llist_del_first(&pool->clean_list);
302 ibmr = llist_entry(ret, struct rds_ib_mr, llnode);
304 clear_bit(CLEAN_LIST_BUSY_BIT, flag);
309 static inline void wait_clean_list_grace(void)
314 for_each_online_cpu(cpu) {
315 flag = &per_cpu(clean_list_grace, cpu);
316 while (test_bit(CLEAN_LIST_BUSY_BIT, flag))
321 static struct rds_ib_mr *rds_ib_alloc_fmr(struct rds_ib_device *rds_ibdev,
324 struct rds_ib_mr_pool *pool;
325 struct rds_ib_mr *ibmr = NULL;
326 int err = 0, iter = 0;
328 if (npages <= RDS_FMR_8K_MSG_SIZE)
329 pool = rds_ibdev->mr_8k_pool;
331 pool = rds_ibdev->mr_1m_pool;
333 if (atomic_read(&pool->dirty_count) >= pool->max_items / 10)
334 queue_delayed_work(rds_ib_fmr_wq, &pool->flush_worker, 10);
336 /* Switch pools if one of the pool is reaching upper limit */
337 if (atomic_read(&pool->dirty_count) >= pool->max_items * 9 / 10) {
338 if (pool->pool_type == RDS_IB_MR_8K_POOL)
339 pool = rds_ibdev->mr_1m_pool;
341 pool = rds_ibdev->mr_8k_pool;
345 ibmr = rds_ib_reuse_fmr(pool);
349 /* No clean MRs - now we have the choice of either
350 * allocating a fresh MR up to the limit imposed by the
351 * driver, or flush any dirty unused MRs.
352 * We try to avoid stalling in the send path if possible,
353 * so we allocate as long as we're allowed to.
355 * We're fussy with enforcing the FMR limit, though. If the driver
356 * tells us we can't use more than N fmrs, we shouldn't start
358 if (atomic_inc_return(&pool->item_count) <= pool->max_items)
361 atomic_dec(&pool->item_count);
364 if (pool->pool_type == RDS_IB_MR_8K_POOL)
365 rds_ib_stats_inc(s_ib_rdma_mr_8k_pool_depleted);
367 rds_ib_stats_inc(s_ib_rdma_mr_1m_pool_depleted);
368 return ERR_PTR(-EAGAIN);
371 /* We do have some empty MRs. Flush them out. */
372 if (pool->pool_type == RDS_IB_MR_8K_POOL)
373 rds_ib_stats_inc(s_ib_rdma_mr_8k_pool_wait);
375 rds_ib_stats_inc(s_ib_rdma_mr_1m_pool_wait);
376 rds_ib_flush_mr_pool(pool, 0, &ibmr);
381 ibmr = kzalloc_node(sizeof(*ibmr), GFP_KERNEL, rdsibdev_to_node(rds_ibdev));
387 ibmr->fmr = ib_alloc_fmr(rds_ibdev->pd,
388 (IB_ACCESS_LOCAL_WRITE |
389 IB_ACCESS_REMOTE_READ |
390 IB_ACCESS_REMOTE_WRITE|
391 IB_ACCESS_REMOTE_ATOMIC),
393 if (IS_ERR(ibmr->fmr)) {
394 err = PTR_ERR(ibmr->fmr);
396 printk(KERN_WARNING "RDS/IB: ib_alloc_fmr failed (err=%d)\n", err);
401 if (pool->pool_type == RDS_IB_MR_8K_POOL)
402 rds_ib_stats_inc(s_ib_rdma_mr_8k_alloc);
404 rds_ib_stats_inc(s_ib_rdma_mr_1m_alloc);
411 ib_dealloc_fmr(ibmr->fmr);
414 atomic_dec(&pool->item_count);
418 static int rds_ib_map_fmr(struct rds_ib_device *rds_ibdev, struct rds_ib_mr *ibmr,
419 struct scatterlist *sg, unsigned int nents)
421 struct ib_device *dev = rds_ibdev->dev;
422 struct scatterlist *scat = sg;
426 int page_cnt, sg_dma_len;
430 sg_dma_len = ib_dma_map_sg(dev, sg, nents,
432 if (unlikely(!sg_dma_len)) {
433 printk(KERN_WARNING "RDS/IB: dma_map_sg failed!\n");
440 for (i = 0; i < sg_dma_len; ++i) {
441 unsigned int dma_len = ib_sg_dma_len(dev, &scat[i]);
442 u64 dma_addr = ib_sg_dma_address(dev, &scat[i]);
444 if (dma_addr & ~PAGE_MASK) {
450 if ((dma_addr + dma_len) & ~PAGE_MASK) {
451 if (i < sg_dma_len - 1)
460 page_cnt += len >> PAGE_SHIFT;
461 if (page_cnt > ibmr->pool->fmr_attr.max_pages)
464 dma_pages = kmalloc_node(sizeof(u64) * page_cnt, GFP_ATOMIC,
465 rdsibdev_to_node(rds_ibdev));
470 for (i = 0; i < sg_dma_len; ++i) {
471 unsigned int dma_len = ib_sg_dma_len(dev, &scat[i]);
472 u64 dma_addr = ib_sg_dma_address(dev, &scat[i]);
474 for (j = 0; j < dma_len; j += PAGE_SIZE)
475 dma_pages[page_cnt++] =
476 (dma_addr & PAGE_MASK) + j;
479 ret = ib_map_phys_fmr(ibmr->fmr,
480 dma_pages, page_cnt, io_addr);
484 /* Success - we successfully remapped the MR, so we can
485 * safely tear down the old mapping. */
486 rds_ib_teardown_mr(ibmr);
489 ibmr->sg_len = nents;
490 ibmr->sg_dma_len = sg_dma_len;
493 if (ibmr->pool->pool_type == RDS_IB_MR_8K_POOL)
494 rds_ib_stats_inc(s_ib_rdma_mr_8k_used);
496 rds_ib_stats_inc(s_ib_rdma_mr_1m_used);
505 void rds_ib_sync_mr(void *trans_private, int direction)
507 struct rds_ib_mr *ibmr = trans_private;
508 struct rds_ib_device *rds_ibdev = ibmr->device;
511 case DMA_FROM_DEVICE:
512 ib_dma_sync_sg_for_cpu(rds_ibdev->dev, ibmr->sg,
513 ibmr->sg_dma_len, DMA_BIDIRECTIONAL);
516 ib_dma_sync_sg_for_device(rds_ibdev->dev, ibmr->sg,
517 ibmr->sg_dma_len, DMA_BIDIRECTIONAL);
522 static void __rds_ib_teardown_mr(struct rds_ib_mr *ibmr)
524 struct rds_ib_device *rds_ibdev = ibmr->device;
526 if (ibmr->sg_dma_len) {
527 ib_dma_unmap_sg(rds_ibdev->dev,
528 ibmr->sg, ibmr->sg_len,
530 ibmr->sg_dma_len = 0;
533 /* Release the s/g list */
537 for (i = 0; i < ibmr->sg_len; ++i) {
538 struct page *page = sg_page(&ibmr->sg[i]);
540 /* FIXME we need a way to tell a r/w MR
542 WARN_ON(!page->mapping && irqs_disabled());
543 set_page_dirty(page);
553 static void rds_ib_teardown_mr(struct rds_ib_mr *ibmr)
555 unsigned int pinned = ibmr->sg_len;
557 __rds_ib_teardown_mr(ibmr);
559 struct rds_ib_mr_pool *pool = ibmr->pool;
561 atomic_sub(pinned, &pool->free_pinned);
565 static inline unsigned int rds_ib_flush_goal(struct rds_ib_mr_pool *pool, int free_all)
567 unsigned int item_count;
569 item_count = atomic_read(&pool->item_count);
577 * given an llist of mrs, put them all into the list_head for more processing
579 static unsigned int llist_append_to_list(struct llist_head *llist,
580 struct list_head *list)
582 struct rds_ib_mr *ibmr;
583 struct llist_node *node;
584 struct llist_node *next;
585 unsigned int count = 0;
587 node = llist_del_all(llist);
590 ibmr = llist_entry(node, struct rds_ib_mr, llnode);
591 list_add_tail(&ibmr->unmap_list, list);
599 * this takes a list head of mrs and turns it into linked llist nodes
600 * of clusters. Each cluster has linked llist nodes of
601 * MR_CLUSTER_SIZE mrs that are ready for reuse.
603 static void list_to_llist_nodes(struct rds_ib_mr_pool *pool,
604 struct list_head *list,
605 struct llist_node **nodes_head,
606 struct llist_node **nodes_tail)
608 struct rds_ib_mr *ibmr;
609 struct llist_node *cur = NULL;
610 struct llist_node **next = nodes_head;
612 list_for_each_entry(ibmr, list, unmap_list) {
622 * Flush our pool of MRs.
623 * At a minimum, all currently unused MRs are unmapped.
624 * If the number of MRs allocated exceeds the limit, we also try
625 * to free as many MRs as needed to get back to this limit.
627 static int rds_ib_flush_mr_pool(struct rds_ib_mr_pool *pool,
628 int free_all, struct rds_ib_mr **ibmr_ret)
630 struct rds_ib_mr *ibmr, *next;
631 struct llist_node *clean_nodes;
632 struct llist_node *clean_tail;
633 LIST_HEAD(unmap_list);
635 unsigned long unpinned = 0;
636 unsigned int nfreed = 0, dirty_to_clean = 0, free_goal;
639 if (pool->pool_type == RDS_IB_MR_8K_POOL)
640 rds_ib_stats_inc(s_ib_rdma_mr_8k_pool_flush);
642 rds_ib_stats_inc(s_ib_rdma_mr_1m_pool_flush);
646 while (!mutex_trylock(&pool->flush_lock)) {
647 ibmr = rds_ib_reuse_fmr(pool);
650 finish_wait(&pool->flush_wait, &wait);
654 prepare_to_wait(&pool->flush_wait, &wait,
655 TASK_UNINTERRUPTIBLE);
656 if (llist_empty(&pool->clean_list))
659 ibmr = rds_ib_reuse_fmr(pool);
662 finish_wait(&pool->flush_wait, &wait);
666 finish_wait(&pool->flush_wait, &wait);
668 mutex_lock(&pool->flush_lock);
671 ibmr = rds_ib_reuse_fmr(pool);
678 /* Get the list of all MRs to be dropped. Ordering matters -
679 * we want to put drop_list ahead of free_list.
681 dirty_to_clean = llist_append_to_list(&pool->drop_list, &unmap_list);
682 dirty_to_clean += llist_append_to_list(&pool->free_list, &unmap_list);
684 llist_append_to_list(&pool->clean_list, &unmap_list);
686 free_goal = rds_ib_flush_goal(pool, free_all);
688 if (list_empty(&unmap_list))
691 /* String all ib_mr's onto one list and hand them to ib_unmap_fmr */
692 list_for_each_entry(ibmr, &unmap_list, unmap_list)
693 list_add(&ibmr->fmr->list, &fmr_list);
695 ret = ib_unmap_fmr(&fmr_list);
697 printk(KERN_WARNING "RDS/IB: ib_unmap_fmr failed (err=%d)\n", ret);
699 /* Now we can destroy the DMA mapping and unpin any pages */
700 list_for_each_entry_safe(ibmr, next, &unmap_list, unmap_list) {
701 unpinned += ibmr->sg_len;
702 __rds_ib_teardown_mr(ibmr);
703 if (nfreed < free_goal ||
704 ibmr->remap_count >= pool->fmr_attr.max_maps) {
705 if (ibmr->pool->pool_type == RDS_IB_MR_8K_POOL)
706 rds_ib_stats_inc(s_ib_rdma_mr_8k_free);
708 rds_ib_stats_inc(s_ib_rdma_mr_1m_free);
709 list_del(&ibmr->unmap_list);
710 ib_dealloc_fmr(ibmr->fmr);
716 if (!list_empty(&unmap_list)) {
717 /* we have to make sure that none of the things we're about
718 * to put on the clean list would race with other cpus trying
719 * to pull items off. The llist would explode if we managed to
720 * remove something from the clean list and then add it back again
721 * while another CPU was spinning on that same item in llist_del_first.
723 * This is pretty unlikely, but just in case wait for an llist grace period
724 * here before adding anything back into the clean list.
726 wait_clean_list_grace();
728 list_to_llist_nodes(pool, &unmap_list, &clean_nodes, &clean_tail);
730 *ibmr_ret = llist_entry(clean_nodes, struct rds_ib_mr, llnode);
732 /* more than one entry in llist nodes */
733 if (clean_nodes->next)
734 llist_add_batch(clean_nodes->next, clean_tail, &pool->clean_list);
738 atomic_sub(unpinned, &pool->free_pinned);
739 atomic_sub(dirty_to_clean, &pool->dirty_count);
740 atomic_sub(nfreed, &pool->item_count);
743 mutex_unlock(&pool->flush_lock);
744 if (waitqueue_active(&pool->flush_wait))
745 wake_up(&pool->flush_wait);
750 static void rds_ib_mr_pool_flush_worker(struct work_struct *work)
752 struct rds_ib_mr_pool *pool = container_of(work, struct rds_ib_mr_pool, flush_worker.work);
754 rds_ib_flush_mr_pool(pool, 0, NULL);
757 void rds_ib_free_mr(void *trans_private, int invalidate)
759 struct rds_ib_mr *ibmr = trans_private;
760 struct rds_ib_mr_pool *pool = ibmr->pool;
761 struct rds_ib_device *rds_ibdev = ibmr->device;
763 rdsdebug("RDS/IB: free_mr nents %u\n", ibmr->sg_len);
765 /* Return it to the pool's free list */
766 if (ibmr->remap_count >= pool->fmr_attr.max_maps)
767 llist_add(&ibmr->llnode, &pool->drop_list);
769 llist_add(&ibmr->llnode, &pool->free_list);
771 atomic_add(ibmr->sg_len, &pool->free_pinned);
772 atomic_inc(&pool->dirty_count);
774 /* If we've pinned too many pages, request a flush */
775 if (atomic_read(&pool->free_pinned) >= pool->max_free_pinned ||
776 atomic_read(&pool->dirty_count) >= pool->max_items / 5)
777 queue_delayed_work(rds_ib_fmr_wq, &pool->flush_worker, 10);
780 if (likely(!in_interrupt())) {
781 rds_ib_flush_mr_pool(pool, 0, NULL);
783 /* We get here if the user created a MR marked
784 * as use_once and invalidate at the same time.
786 queue_delayed_work(rds_ib_fmr_wq,
787 &pool->flush_worker, 10);
791 rds_ib_dev_put(rds_ibdev);
794 void rds_ib_flush_mrs(void)
796 struct rds_ib_device *rds_ibdev;
798 down_read(&rds_ib_devices_lock);
799 list_for_each_entry(rds_ibdev, &rds_ib_devices, list) {
800 if (rds_ibdev->mr_8k_pool)
801 rds_ib_flush_mr_pool(rds_ibdev->mr_8k_pool, 0, NULL);
803 if (rds_ibdev->mr_1m_pool)
804 rds_ib_flush_mr_pool(rds_ibdev->mr_1m_pool, 0, NULL);
806 up_read(&rds_ib_devices_lock);
809 void *rds_ib_get_mr(struct scatterlist *sg, unsigned long nents,
810 struct rds_sock *rs, u32 *key_ret)
812 struct rds_ib_device *rds_ibdev;
813 struct rds_ib_mr *ibmr = NULL;
816 rds_ibdev = rds_ib_get_device(rs->rs_bound_addr);
822 if (!rds_ibdev->mr_8k_pool || !rds_ibdev->mr_1m_pool) {
827 ibmr = rds_ib_alloc_fmr(rds_ibdev, nents);
829 rds_ib_dev_put(rds_ibdev);
833 ret = rds_ib_map_fmr(rds_ibdev, ibmr, sg, nents);
835 *key_ret = ibmr->fmr->rkey;
837 printk(KERN_WARNING "RDS/IB: map_fmr failed (errno=%d)\n", ret);
839 ibmr->device = rds_ibdev;
845 rds_ib_free_mr(ibmr, 0);
849 rds_ib_dev_put(rds_ibdev);