These changes are the raw update to linux-4.4.6-rt14. Kernel sources
[kvmfornfv.git] / kernel / arch / arm / mm / dma-mapping.c
index 6e4b9ff..534a60a 100644 (file)
@@ -39,6 +39,7 @@
 #include <asm/system_info.h>
 #include <asm/dma-contiguous.h>
 
+#include "dma.h"
 #include "mm.h"
 
 /*
@@ -148,11 +149,14 @@ static void *arm_coherent_dma_alloc(struct device *dev, size_t size,
        dma_addr_t *handle, gfp_t gfp, struct dma_attrs *attrs);
 static void arm_coherent_dma_free(struct device *dev, size_t size, void *cpu_addr,
                                  dma_addr_t handle, struct dma_attrs *attrs);
+static int arm_coherent_dma_mmap(struct device *dev, struct vm_area_struct *vma,
+                void *cpu_addr, dma_addr_t dma_addr, size_t size,
+                struct dma_attrs *attrs);
 
 struct dma_map_ops arm_coherent_dma_ops = {
        .alloc                  = arm_coherent_dma_alloc,
        .free                   = arm_coherent_dma_free,
-       .mmap                   = arm_dma_mmap,
+       .mmap                   = arm_coherent_dma_mmap,
        .get_sgtable            = arm_dma_get_sgtable,
        .map_page               = arm_coherent_dma_map_page,
        .map_sg                 = arm_dma_map_sg,
@@ -645,14 +649,18 @@ static void *__dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
        size = PAGE_ALIGN(size);
        want_vaddr = !dma_get_attr(DMA_ATTR_NO_KERNEL_MAPPING, attrs);
 
-       if (is_coherent || nommu())
+       if (nommu())
+               addr = __alloc_simple_buffer(dev, size, gfp, &page);
+       else if (dev_get_cma_area(dev) && (gfp & __GFP_DIRECT_RECLAIM))
+               addr = __alloc_from_contiguous(dev, size, prot, &page,
+                                              caller, want_vaddr);
+       else if (is_coherent)
                addr = __alloc_simple_buffer(dev, size, gfp, &page);
-       else if (!(gfp & __GFP_WAIT))
+       else if (!gfpflags_allow_blocking(gfp))
                addr = __alloc_from_pool(size, &page);
-       else if (!dev_get_cma_area(dev))
-               addr = __alloc_remap_buffer(dev, size, gfp, prot, &page, caller, want_vaddr);
        else
-               addr = __alloc_from_contiguous(dev, size, prot, &page, caller, want_vaddr);
+               addr = __alloc_remap_buffer(dev, size, gfp, prot, &page,
+                                           caller, want_vaddr);
 
        if (page)
                *handle = pfn_to_dma(dev, page_to_pfn(page));
@@ -668,10 +676,6 @@ void *arm_dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
                    gfp_t gfp, struct dma_attrs *attrs)
 {
        pgprot_t prot = __get_dma_pgprot(attrs, PAGE_KERNEL);
-       void *memory;
-
-       if (dma_alloc_from_coherent(dev, size, handle, &memory))
-               return memory;
 
        return __dma_alloc(dev, size, handle, gfp, prot, false,
                           attrs, __builtin_return_address(0));
@@ -680,20 +684,11 @@ void *arm_dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
 static void *arm_coherent_dma_alloc(struct device *dev, size_t size,
        dma_addr_t *handle, gfp_t gfp, struct dma_attrs *attrs)
 {
-       pgprot_t prot = __get_dma_pgprot(attrs, PAGE_KERNEL);
-       void *memory;
-
-       if (dma_alloc_from_coherent(dev, size, handle, &memory))
-               return memory;
-
-       return __dma_alloc(dev, size, handle, gfp, prot, true,
+       return __dma_alloc(dev, size, handle, gfp, PAGE_KERNEL, true,
                           attrs, __builtin_return_address(0));
 }
 
-/*
- * Create userspace mapping for the DMA-coherent memory.
- */
-int arm_dma_mmap(struct device *dev, struct vm_area_struct *vma,
+static int __arm_dma_mmap(struct device *dev, struct vm_area_struct *vma,
                 void *cpu_addr, dma_addr_t dma_addr, size_t size,
                 struct dma_attrs *attrs)
 {
@@ -704,8 +699,6 @@ int arm_dma_mmap(struct device *dev, struct vm_area_struct *vma,
        unsigned long pfn = dma_to_pfn(dev, dma_addr);
        unsigned long off = vma->vm_pgoff;
 
-       vma->vm_page_prot = __get_dma_pgprot(attrs, vma->vm_page_prot);
-
        if (dma_mmap_from_coherent(dev, vma, cpu_addr, size, &ret))
                return ret;
 
@@ -720,6 +713,26 @@ int arm_dma_mmap(struct device *dev, struct vm_area_struct *vma,
        return ret;
 }
 
+/*
+ * Create userspace mapping for the DMA-coherent memory.
+ */
+static int arm_coherent_dma_mmap(struct device *dev, struct vm_area_struct *vma,
+                void *cpu_addr, dma_addr_t dma_addr, size_t size,
+                struct dma_attrs *attrs)
+{
+       return __arm_dma_mmap(dev, vma, cpu_addr, dma_addr, size, attrs);
+}
+
+int arm_dma_mmap(struct device *dev, struct vm_area_struct *vma,
+                void *cpu_addr, dma_addr_t dma_addr, size_t size,
+                struct dma_attrs *attrs)
+{
+#ifdef CONFIG_MMU
+       vma->vm_page_prot = __get_dma_pgprot(attrs, vma->vm_page_prot);
+#endif /* CONFIG_MMU */
+       return __arm_dma_mmap(dev, vma, cpu_addr, dma_addr, size, attrs);
+}
+
 /*
  * Free a buffer as defined by the above mapping.
  */
@@ -730,17 +743,14 @@ static void __arm_dma_free(struct device *dev, size_t size, void *cpu_addr,
        struct page *page = pfn_to_page(dma_to_pfn(dev, handle));
        bool want_vaddr = !dma_get_attr(DMA_ATTR_NO_KERNEL_MAPPING, attrs);
 
-       if (dma_release_from_coherent(dev, get_order(size), cpu_addr))
-               return;
-
        size = PAGE_ALIGN(size);
 
-       if (is_coherent || nommu()) {
+       if (nommu()) {
                __dma_free_buffer(page, size);
-       } else if (__free_from_pool(cpu_addr, size)) {
+       } else if (!is_coherent && __free_from_pool(cpu_addr, size)) {
                return;
        } else if (!dev_get_cma_area(dev)) {
-               if (want_vaddr)
+               if (want_vaddr && !is_coherent)
                        __dma_free_remap(cpu_addr, size);
                __dma_free_buffer(page, size);
        } else {
@@ -1239,7 +1249,7 @@ __iommu_create_mapping(struct device *dev, struct page **pages, size_t size)
        struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev);
        unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
        dma_addr_t dma_addr, iova;
-       int i, ret = DMA_ERROR_CODE;
+       int i;
 
        dma_addr = __alloc_iova(mapping, size);
        if (dma_addr == DMA_ERROR_CODE)
@@ -1247,6 +1257,8 @@ __iommu_create_mapping(struct device *dev, struct page **pages, size_t size)
 
        iova = dma_addr;
        for (i = 0; i < count; ) {
+               int ret;
+
                unsigned int next_pfn = page_to_pfn(pages[i]) + 1;
                phys_addr_t phys = page_to_phys(pages[i]);
                unsigned int len, j;
@@ -1351,7 +1363,7 @@ static void *arm_iommu_alloc_attrs(struct device *dev, size_t size,
        *handle = DMA_ERROR_CODE;
        size = PAGE_ALIGN(size);
 
-       if (!(gfp & __GFP_WAIT))
+       if (!gfpflags_allow_blocking(gfp))
                return __iommu_alloc_atomic(dev, size, handle);
 
        /*
@@ -1395,12 +1407,19 @@ static int arm_iommu_mmap_attrs(struct device *dev, struct vm_area_struct *vma,
        unsigned long uaddr = vma->vm_start;
        unsigned long usize = vma->vm_end - vma->vm_start;
        struct page **pages = __iommu_get_pages(cpu_addr, attrs);
+       unsigned long nr_pages = PAGE_ALIGN(size) >> PAGE_SHIFT;
+       unsigned long off = vma->vm_pgoff;
 
        vma->vm_page_prot = __get_dma_pgprot(attrs, vma->vm_page_prot);
 
        if (!pages)
                return -ENXIO;
 
+       if (off >= nr_pages || (usize >> PAGE_SHIFT) > nr_pages - off)
+               return -ENXIO;
+
+       pages += off;
+
        do {
                int ret = vm_insert_page(vma, uaddr, *pages++);
                if (ret) {