Code Review
/
kvmfornfv.git
/ blobdiff
commit
grep
author
committer
pickaxe
?
search:
re
summary
|
shortlog
|
log
|
commit
|
commitdiff
|
review
|
tree
raw
|
inline
| side by side
These changes are the raw update to linux-4.4.6-rt14. Kernel sources
[kvmfornfv.git]
/
kernel
/
mm
/
dmapool.c
diff --git
a/kernel/mm/dmapool.c
b/kernel/mm/dmapool.c
index
fd5fe43
..
57312b5
100644
(file)
--- a/
kernel/mm/dmapool.c
+++ b/
kernel/mm/dmapool.c
@@
-242,7
+242,7
@@
static struct dma_page *pool_alloc_page(struct dma_pool *pool, gfp_t mem_flags)
return page;
}
return page;
}
-static inline
int
is_page_busy(struct dma_page *page)
+static inline
bool
is_page_busy(struct dma_page *page)
{
return page->in_use != 0;
}
{
return page->in_use != 0;
}
@@
-271,6
+271,9
@@
void dma_pool_destroy(struct dma_pool *pool)
{
bool empty = false;
{
bool empty = false;
+ if (unlikely(!pool))
+ return;
+
mutex_lock(&pools_reg_lock);
mutex_lock(&pools_lock);
list_del(&pool->pools);
mutex_lock(&pools_reg_lock);
mutex_lock(&pools_lock);
list_del(&pool->pools);
@@
-323,7
+326,7
@@
void *dma_pool_alloc(struct dma_pool *pool, gfp_t mem_flags,
size_t offset;
void *retval;
size_t offset;
void *retval;
- might_sleep_if(
mem_flags & __GFP_WAIT
);
+ might_sleep_if(
gfpflags_allow_blocking(mem_flags)
);
spin_lock_irqsave(&pool->lock, flags);
list_for_each_entry(page, &pool->page_list, page_list) {
spin_lock_irqsave(&pool->lock, flags);
list_for_each_entry(page, &pool->page_list, page_list) {
@@
-334,7
+337,7
@@
void *dma_pool_alloc(struct dma_pool *pool, gfp_t mem_flags,
/* pool_alloc_page() might sleep, so temporarily drop &pool->lock */
spin_unlock_irqrestore(&pool->lock, flags);
/* pool_alloc_page() might sleep, so temporarily drop &pool->lock */
spin_unlock_irqrestore(&pool->lock, flags);
- page = pool_alloc_page(pool, mem_flags);
+ page = pool_alloc_page(pool, mem_flags
& (~__GFP_ZERO)
);
if (!page)
return NULL;
if (!page)
return NULL;
@@
-372,9
+375,14
@@
void *dma_pool_alloc(struct dma_pool *pool, gfp_t mem_flags,
break;
}
}
break;
}
}
- memset(retval, POOL_POISON_ALLOCATED, pool->size);
+ if (!(mem_flags & __GFP_ZERO))
+ memset(retval, POOL_POISON_ALLOCATED, pool->size);
#endif
spin_unlock_irqrestore(&pool->lock, flags);
#endif
spin_unlock_irqrestore(&pool->lock, flags);
+
+ if (mem_flags & __GFP_ZERO)
+ memset(retval, 0, pool->size);
+
return retval;
}
EXPORT_SYMBOL(dma_pool_alloc);
return retval;
}
EXPORT_SYMBOL(dma_pool_alloc);
@@
-386,7
+394,7
@@
static struct dma_page *pool_find_page(struct dma_pool *pool, dma_addr_t dma)
list_for_each_entry(page, &pool->page_list, page_list) {
if (dma < page->dma)
continue;
list_for_each_entry(page, &pool->page_list, page_list) {
if (dma < page->dma)
continue;
- if (
dma < (page->dma + pool->allocation)
)
+ if (
(dma - page->dma) < pool->allocation
)
return page;
}
return NULL;
return page;
}
return NULL;