X-Git-Url: https://gerrit.opnfv.org/gerrit/gitweb?a=blobdiff_plain;f=kernel%2Fdrivers%2Fstaging%2Fandroid%2Fion%2Fion.c;h=e237e9f3312d6b99e5d2eac4a07f534ce03690c2;hb=e09b41010ba33a20a87472ee821fa407a5b8da36;hp=b0b96ab31954a98718d1af05121656abc1533e17;hpb=f93b97fd65072de626c074dbe099a1fff05ce060;p=kvmfornfv.git diff --git a/kernel/drivers/staging/android/ion/ion.c b/kernel/drivers/staging/android/ion/ion.c index b0b96ab31..e237e9f33 100644 --- a/kernel/drivers/staging/android/ion/ion.c +++ b/kernel/drivers/staging/android/ion/ion.c @@ -1,5 +1,5 @@ /* - + * * drivers/staging/android/ion/ion.c * * Copyright (C) 2011 Google, Inc. @@ -213,10 +213,10 @@ static struct ion_buffer *ion_buffer_create(struct ion_heap *heap, "heap->ops->map_dma should return ERR_PTR on error")) table = ERR_PTR(-EINVAL); if (IS_ERR(table)) { - heap->ops->free(buffer); - kfree(buffer); - return ERR_CAST(table); + ret = -EINVAL; + goto err1; } + buffer->sg_table = table; if (ion_buffer_fault_user_mappings(buffer)) { int num_pages = PAGE_ALIGN(buffer->size) / PAGE_SIZE; @@ -226,7 +226,7 @@ static struct ion_buffer *ion_buffer_create(struct ion_heap *heap, buffer->pages = vmalloc(sizeof(struct page *) * num_pages); if (!buffer->pages) { ret = -ENOMEM; - goto err1; + goto err; } for_each_sg(table->sgl, sg, table->nents, i) { @@ -235,23 +235,22 @@ static struct ion_buffer *ion_buffer_create(struct ion_heap *heap, for (j = 0; j < sg->length / PAGE_SIZE; j++) buffer->pages[k++] = page++; } - - if (ret) - goto err; } buffer->dev = dev; buffer->size = len; INIT_LIST_HEAD(&buffer->vmas); mutex_init(&buffer->lock); - /* this will set up dma addresses for the sglist -- it is not - technically correct as per the dma api -- a specific - device isn't really taking ownership here. However, in practice on - our systems the only dma_address space is physical addresses. - Additionally, we can't afford the overhead of invalidating every - allocation via dma_map_sg. The implicit contract here is that - memory coming from the heaps is ready for dma, ie if it has a - cached mapping that mapping has been invalidated */ + /* + * this will set up dma addresses for the sglist -- it is not + * technically correct as per the dma api -- a specific + * device isn't really taking ownership here. However, in practice on + * our systems the only dma_address space is physical addresses. + * Additionally, we can't afford the overhead of invalidating every + * allocation via dma_map_sg. The implicit contract here is that + * memory coming from the heaps is ready for dma, ie if it has a + * cached mapping that mapping has been invalidated + */ for_each_sg(buffer->sg_table->sgl, sg, buffer->sg_table->nents, i) sg_dma_address(sg) = sg_phys(sg); mutex_lock(&dev->buffer_lock); @@ -261,9 +260,8 @@ static struct ion_buffer *ion_buffer_create(struct ion_heap *heap, err: heap->ops->unmap_dma(heap, buffer); - heap->ops->free(buffer); err1: - vfree(buffer->pages); + heap->ops->free(buffer); err2: kfree(buffer); return ERR_PTR(ret); @@ -753,8 +751,10 @@ struct ion_client *ion_client_create(struct ion_device *dev, get_task_struct(current->group_leader); task_lock(current->group_leader); pid = task_pid_nr(current->group_leader); - /* don't bother to store task struct for kernel threads, - they can't be killed anyway */ + /* + * don't bother to store task struct for kernel threads, + * they can't be killed anyway + */ if (current->group_leader->flags & PF_KTHREAD) { put_task_struct(current->group_leader); task = NULL; @@ -997,7 +997,7 @@ static void ion_vm_close(struct vm_area_struct *vma) mutex_unlock(&buffer->lock); } -static struct vm_operations_struct ion_vma_ops = { +static const struct vm_operations_struct ion_vma_ops = { .open = ion_vm_open, .close = ion_vm_close, .fault = ion_vm_fault, @@ -1103,10 +1103,10 @@ static struct dma_buf_ops dma_buf_ops = { struct dma_buf *ion_share_dma_buf(struct ion_client *client, struct ion_handle *handle) { + DEFINE_DMA_BUF_EXPORT_INFO(exp_info); struct ion_buffer *buffer; struct dma_buf *dmabuf; bool valid_handle; - DEFINE_DMA_BUF_EXPORT_INFO(exp_info); mutex_lock(&client->lock); valid_handle = ion_handle_validate(client, handle); @@ -1179,13 +1179,13 @@ struct ion_handle *ion_import_dma_buf(struct ion_client *client, int fd) mutex_unlock(&client->lock); goto end; } - mutex_unlock(&client->lock); handle = ion_handle_create(client, buffer); - if (IS_ERR(handle)) + if (IS_ERR(handle)) { + mutex_unlock(&client->lock); goto end; + } - mutex_lock(&client->lock); ret = ion_handle_add(client, handle); mutex_unlock(&client->lock); if (ret) { @@ -1466,7 +1466,6 @@ static const struct file_operations debug_heap_fops = { .release = single_release, }; -#ifdef DEBUG_HEAP_SHRINKER static int debug_shrink_set(void *data, u64 val) { struct ion_heap *heap = data; @@ -1474,15 +1473,14 @@ static int debug_shrink_set(void *data, u64 val) int objs; sc.gfp_mask = -1; - sc.nr_to_scan = 0; - - if (!val) - return 0; + sc.nr_to_scan = val; - objs = heap->shrinker.shrink(&heap->shrinker, &sc); - sc.nr_to_scan = objs; + if (!val) { + objs = heap->shrinker.count_objects(&heap->shrinker, &sc); + sc.nr_to_scan = objs; + } - heap->shrinker.shrink(&heap->shrinker, &sc); + heap->shrinker.scan_objects(&heap->shrinker, &sc); return 0; } @@ -1495,14 +1493,13 @@ static int debug_shrink_get(void *data, u64 *val) sc.gfp_mask = -1; sc.nr_to_scan = 0; - objs = heap->shrinker.shrink(&heap->shrinker, &sc); + objs = heap->shrinker.count_objects(&heap->shrinker, &sc); *val = objs; return 0; } DEFINE_SIMPLE_ATTRIBUTE(debug_shrink_fops, debug_shrink_get, debug_shrink_set, "%llu\n"); -#endif void ion_device_add_heap(struct ion_device *dev, struct ion_heap *heap) { @@ -1524,8 +1521,10 @@ void ion_device_add_heap(struct ion_device *dev, struct ion_heap *heap) heap->dev = dev; down_write(&dev->lock); - /* use negative heap->id to reverse the priority -- when traversing - the list later attempt higher id numbers first */ + /* + * use negative heap->id to reverse the priority -- when traversing + * the list later attempt higher id numbers first + */ plist_node_init(&heap->node, -heap->id); plist_add(&heap->node, &dev->heaps); debug_file = debugfs_create_file(heap->name, 0664, @@ -1540,8 +1539,7 @@ void ion_device_add_heap(struct ion_device *dev, struct ion_heap *heap) path, heap->name); } -#ifdef DEBUG_HEAP_SHRINKER - if (heap->shrinker.shrink) { + if (heap->shrinker.count_objects && heap->shrinker.scan_objects) { char debug_name[64]; snprintf(debug_name, 64, "%s_shrink", heap->name); @@ -1556,9 +1554,10 @@ void ion_device_add_heap(struct ion_device *dev, struct ion_heap *heap) path, debug_name); } } -#endif + up_write(&dev->lock); } +EXPORT_SYMBOL(ion_device_add_heap); struct ion_device *ion_device_create(long (*custom_ioctl) (struct ion_client *client, @@ -1579,6 +1578,7 @@ struct ion_device *ion_device_create(long (*custom_ioctl) ret = misc_register(&idev->dev); if (ret) { pr_err("ion: failed to register misc device.\n"); + kfree(idev); return ERR_PTR(ret); } @@ -1607,6 +1607,7 @@ debugfs_done: idev->clients = RB_ROOT; return idev; } +EXPORT_SYMBOL(ion_device_create); void ion_device_destroy(struct ion_device *dev) { @@ -1615,6 +1616,7 @@ void ion_device_destroy(struct ion_device *dev) /* XXX need to free the heaps and clients ? */ kfree(dev); } +EXPORT_SYMBOL(ion_device_destroy); void __init ion_reserve(struct ion_platform_data *data) {