Code Review
/
kvmfornfv.git
/ blobdiff
commit
grep
author
committer
pickaxe
?
search:
re
summary
|
shortlog
|
log
|
commit
|
commitdiff
|
review
|
tree
raw
|
inline
| side by side
These changes are the raw update to linux-4.4.6-rt14. Kernel sources
[kvmfornfv.git]
/
kernel
/
mm
/
cma.c
diff --git
a/kernel/mm/cma.c
b/kernel/mm/cma.c
index
3a7a67b
..
ea506eb
100644
(file)
--- a/
kernel/mm/cma.c
+++ b/
kernel/mm/cma.c
@@
-182,7
+182,7
@@
int __init cma_init_reserved_mem(phys_addr_t base, phys_addr_t size,
if (!size || !memblock_is_region_reserved(base, size))
return -EINVAL;
if (!size || !memblock_is_region_reserved(base, size))
return -EINVAL;
- /* ensure minimal alignment requied by mm core */
+ /* ensure minimal alignment requi
r
ed by mm core */
alignment = PAGE_SIZE << max(MAX_ORDER - 1, pageblock_order);
/* alignment should be aligned with order_per_bit */
alignment = PAGE_SIZE << max(MAX_ORDER - 1, pageblock_order);
/* alignment should be aligned with order_per_bit */
@@
-238,7
+238,7
@@
int __init cma_declare_contiguous(phys_addr_t base,
/*
* high_memory isn't direct mapped memory so retrieving its physical
* address isn't appropriate. But it would be useful to check the
/*
* high_memory isn't direct mapped memory so retrieving its physical
* address isn't appropriate. But it would be useful to check the
- * physical address of the highmem boundary so it's justfiable to get
+ * physical address of the highmem boundary so it's just
i
fiable to get
* the physical address from it. On x86 there is a validation check for
* this case, so the following workaround is needed to avoid it.
*/
* the physical address from it. On x86 there is a validation check for
* this case, so the following workaround is needed to avoid it.
*/
@@
-316,13
+316,15
@@
int __init cma_declare_contiguous(phys_addr_t base,
*/
if (base < highmem_start && limit > highmem_start) {
addr = memblock_alloc_range(size, alignment,
*/
if (base < highmem_start && limit > highmem_start) {
addr = memblock_alloc_range(size, alignment,
- highmem_start, limit);
+ highmem_start, limit,
+ MEMBLOCK_NONE);
limit = highmem_start;
}
if (!addr) {
addr = memblock_alloc_range(size, alignment, base,
limit = highmem_start;
}
if (!addr) {
addr = memblock_alloc_range(size, alignment, base,
- limit);
+ limit,
+ MEMBLOCK_NONE);
if (!addr) {
ret = -ENOMEM;
goto err;
if (!addr) {
ret = -ENOMEM;
goto err;
@@
-359,9
+361,11
@@
err:
* This function allocates part of contiguous memory on specific
* contiguous memory area.
*/
* This function allocates part of contiguous memory on specific
* contiguous memory area.
*/
-struct page *cma_alloc(struct cma *cma,
unsigned in
t count, unsigned int align)
+struct page *cma_alloc(struct cma *cma,
size_
t count, unsigned int align)
{
{
- unsigned long mask, offset, pfn, start = 0;
+ unsigned long mask, offset;
+ unsigned long pfn = -1;
+ unsigned long start = 0;
unsigned long bitmap_maxno, bitmap_no, bitmap_count;
struct page *page = NULL;
int ret;
unsigned long bitmap_maxno, bitmap_no, bitmap_count;
struct page *page = NULL;
int ret;
@@
-369,7
+373,7
@@
struct page *cma_alloc(struct cma *cma, unsigned int count, unsigned int align)
if (!cma || !cma->count)
return NULL;
if (!cma || !cma->count)
return NULL;
- pr_debug("%s(cma %p, count %
d
, align %d)\n", __func__, (void *)cma,
+ pr_debug("%s(cma %p, count %
zu
, align %d)\n", __func__, (void *)cma,
count, align);
if (!count)
count, align);
if (!count)
@@
-416,7
+420,7
@@
struct page *cma_alloc(struct cma *cma, unsigned int count, unsigned int align)
start = bitmap_no + mask + 1;
}
start = bitmap_no + mask + 1;
}
- trace_cma_alloc(p
age ? pfn : -1UL
, page, count, align);
+ trace_cma_alloc(p
fn
, page, count, align);
pr_debug("%s(): returned %p\n", __func__, page);
return page;
pr_debug("%s(): returned %p\n", __func__, page);
return page;