These changes are the raw update to linux-4.4.6-rt14. Kernel sources
[kvmfornfv.git] / kernel / drivers / block / null_blk.c
index 65cd61a..09e3c0d 100644 (file)
@@ -8,6 +8,7 @@
 #include <linux/slab.h>
 #include <linux/blk-mq.h>
 #include <linux/hrtimer.h>
+#include <linux/lightnvm.h>
 
 struct nullb_cmd {
        struct list_head list;
@@ -17,6 +18,7 @@ struct nullb_cmd {
        struct bio *bio;
        unsigned int tag;
        struct nullb_queue *nq;
+       struct hrtimer timer;
 };
 
 struct nullb_queue {
@@ -39,23 +41,14 @@ struct nullb {
 
        struct nullb_queue *queues;
        unsigned int nr_queues;
+       char disk_name[DISK_NAME_LEN];
 };
 
 static LIST_HEAD(nullb_list);
 static struct mutex lock;
 static int null_major;
 static int nullb_indexes;
-
-struct completion_queue {
-       struct llist_head list;
-       struct hrtimer timer;
-};
-
-/*
- * These are per-cpu for now, they will need to be configured by the
- * complete_queues parameter and appropriately mapped.
- */
-static DEFINE_PER_CPU(struct completion_queue, completion_queues);
+static struct kmem_cache *ppa_cache;
 
 enum {
        NULL_IRQ_NONE           = 0,
@@ -99,7 +92,7 @@ static int null_set_queue_mode(const char *str, const struct kernel_param *kp)
        return null_param_store_val(str, &queue_mode, NULL_Q_BIO, NULL_Q_MQ);
 }
 
-static struct kernel_param_ops null_queue_mode_param_ops = {
+static const struct kernel_param_ops null_queue_mode_param_ops = {
        .set    = null_set_queue_mode,
        .get    = param_get_int,
 };
@@ -119,6 +112,10 @@ static int nr_devices = 2;
 module_param(nr_devices, int, S_IRUGO);
 MODULE_PARM_DESC(nr_devices, "Number of devices to register");
 
+static bool use_lightnvm;
+module_param(use_lightnvm, bool, S_IRUGO);
+MODULE_PARM_DESC(use_lightnvm, "Register as a LightNVM device");
+
 static int irqmode = NULL_IRQ_SOFTIRQ;
 
 static int null_set_irqmode(const char *str, const struct kernel_param *kp)
@@ -127,7 +124,7 @@ static int null_set_irqmode(const char *str, const struct kernel_param *kp)
                                        NULL_IRQ_TIMER);
 }
 
-static struct kernel_param_ops null_irqmode_param_ops = {
+static const struct kernel_param_ops null_irqmode_param_ops = {
        .set    = null_set_irqmode,
        .get    = param_get_int,
 };
@@ -135,8 +132,8 @@ static struct kernel_param_ops null_irqmode_param_ops = {
 device_param_cb(irqmode, &null_irqmode_param_ops, &irqmode, S_IRUGO);
 MODULE_PARM_DESC(irqmode, "IRQ completion handler. 0-none, 1-softirq, 2-timer");
 
-static int completion_nsec = 10000;
-module_param(completion_nsec, int, S_IRUGO);
+static unsigned long completion_nsec = 10000;
+module_param(completion_nsec, ulong, S_IRUGO);
 MODULE_PARM_DESC(completion_nsec, "Time in ns to complete a request in hardware. Default: 10,000ns");
 
 static int hw_queue_depth = 64;
@@ -173,6 +170,8 @@ static void free_cmd(struct nullb_cmd *cmd)
        put_tag(cmd->nq, cmd->tag);
 }
 
+static enum hrtimer_restart null_cmd_timer_expired(struct hrtimer *timer);
+
 static struct nullb_cmd *__alloc_cmd(struct nullb_queue *nq)
 {
        struct nullb_cmd *cmd;
@@ -183,6 +182,11 @@ static struct nullb_cmd *__alloc_cmd(struct nullb_queue *nq)
                cmd = &nq->cmds[tag];
                cmd->tag = tag;
                cmd->nq = nq;
+               if (irqmode == NULL_IRQ_TIMER) {
+                       hrtimer_init(&cmd->timer, CLOCK_MONOTONIC,
+                                    HRTIMER_MODE_REL);
+                       cmd->timer.function = null_cmd_timer_expired;
+               }
                return cmd;
        }
 
@@ -213,6 +217,11 @@ static struct nullb_cmd *alloc_cmd(struct nullb_queue *nq, int can_wait)
 
 static void end_cmd(struct nullb_cmd *cmd)
 {
+       struct request_queue *q = NULL;
+
+       if (cmd->rq)
+               q = cmd->rq->q;
+
        switch (queue_mode)  {
        case NULL_Q_MQ:
                blk_mq_end_request(cmd->rq, 0);
@@ -222,45 +231,34 @@ static void end_cmd(struct nullb_cmd *cmd)
                blk_end_request_all(cmd->rq, 0);
                break;
        case NULL_Q_BIO:
-               bio_endio(cmd->bio, 0);
+               bio_endio(cmd->bio);
                break;
        }
 
        free_cmd(cmd);
+
+       /* Restart queue if needed, as we are freeing a tag */
+       if (queue_mode == NULL_Q_RQ && blk_queue_stopped(q)) {
+               unsigned long flags;
+
+               spin_lock_irqsave(q->queue_lock, flags);
+               blk_start_queue_async(q);
+               spin_unlock_irqrestore(q->queue_lock, flags);
+       }
 }
 
 static enum hrtimer_restart null_cmd_timer_expired(struct hrtimer *timer)
 {
-       struct completion_queue *cq;
-       struct llist_node *entry;
-       struct nullb_cmd *cmd;
-
-       cq = &per_cpu(completion_queues, smp_processor_id());
-
-       while ((entry = llist_del_all(&cq->list)) != NULL) {
-               entry = llist_reverse_order(entry);
-               do {
-                       cmd = container_of(entry, struct nullb_cmd, ll_list);
-                       entry = entry->next;
-                       end_cmd(cmd);
-               } while (entry);
-       }
+       end_cmd(container_of(timer, struct nullb_cmd, timer));
 
        return HRTIMER_NORESTART;
 }
 
 static void null_cmd_end_timer(struct nullb_cmd *cmd)
 {
-       struct completion_queue *cq = &per_cpu(completion_queues, get_cpu());
-
-       cmd->ll_list.next = NULL;
-       if (llist_add(&cmd->ll_list, &cq->list)) {
-               ktime_t kt = ktime_set(0, completion_nsec);
+       ktime_t kt = ktime_set(0, completion_nsec);
 
-               hrtimer_start(&cq->timer, kt, HRTIMER_MODE_REL);
-       }
-
-       put_cpu();
+       hrtimer_start(&cmd->timer, kt, HRTIMER_MODE_REL);
 }
 
 static void null_softirq_done_fn(struct request *rq)
@@ -278,7 +276,7 @@ static inline void null_handle_cmd(struct nullb_cmd *cmd)
        case NULL_IRQ_SOFTIRQ:
                switch (queue_mode)  {
                case NULL_Q_MQ:
-                       blk_mq_complete_request(cmd->rq);
+                       blk_mq_complete_request(cmd->rq, cmd->rq->errors);
                        break;
                case NULL_Q_RQ:
                        blk_complete_request(cmd->rq);
@@ -310,7 +308,7 @@ static struct nullb_queue *nullb_to_queue(struct nullb *nullb)
        return &nullb->queues[index];
 }
 
-static void null_queue_bio(struct request_queue *q, struct bio *bio)
+static blk_qc_t null_queue_bio(struct request_queue *q, struct bio *bio)
 {
        struct nullb *nullb = q->queuedata;
        struct nullb_queue *nq = nullb_to_queue(nullb);
@@ -320,6 +318,7 @@ static void null_queue_bio(struct request_queue *q, struct bio *bio)
        cmd->bio = bio;
 
        null_handle_cmd(cmd);
+       return BLK_QC_T_NONE;
 }
 
 static int null_rq_prep_fn(struct request_queue *q, struct request *req)
@@ -334,6 +333,7 @@ static int null_rq_prep_fn(struct request_queue *q, struct request *req)
                req->special = cmd;
                return BLKPREP_OK;
        }
+       blk_stop_queue(q);
 
        return BLKPREP_DEFER;
 }
@@ -356,6 +356,10 @@ static int null_queue_rq(struct blk_mq_hw_ctx *hctx,
 {
        struct nullb_cmd *cmd = blk_mq_rq_to_pdu(bd->rq);
 
+       if (irqmode == NULL_IRQ_TIMER) {
+               hrtimer_init(&cmd->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+               cmd->timer.function = null_cmd_timer_expired;
+       }
        cmd->rq = bd->rq;
        cmd->nq = hctx->driver_data;
 
@@ -394,18 +398,177 @@ static struct blk_mq_ops null_mq_ops = {
        .complete       = null_softirq_done_fn,
 };
 
+static void cleanup_queue(struct nullb_queue *nq)
+{
+       kfree(nq->tag_map);
+       kfree(nq->cmds);
+}
+
+static void cleanup_queues(struct nullb *nullb)
+{
+       int i;
+
+       for (i = 0; i < nullb->nr_queues; i++)
+               cleanup_queue(&nullb->queues[i]);
+
+       kfree(nullb->queues);
+}
+
 static void null_del_dev(struct nullb *nullb)
 {
        list_del_init(&nullb->list);
 
-       del_gendisk(nullb->disk);
+       if (use_lightnvm)
+               nvm_unregister(nullb->disk_name);
+       else
+               del_gendisk(nullb->disk);
        blk_cleanup_queue(nullb->q);
        if (queue_mode == NULL_Q_MQ)
                blk_mq_free_tag_set(&nullb->tag_set);
-       put_disk(nullb->disk);
+       if (!use_lightnvm)
+               put_disk(nullb->disk);
+       cleanup_queues(nullb);
        kfree(nullb);
 }
 
+#ifdef CONFIG_NVM
+
+static void null_lnvm_end_io(struct request *rq, int error)
+{
+       struct nvm_rq *rqd = rq->end_io_data;
+       struct nvm_dev *dev = rqd->dev;
+
+       dev->mt->end_io(rqd, error);
+
+       blk_put_request(rq);
+}
+
+static int null_lnvm_submit_io(struct nvm_dev *dev, struct nvm_rq *rqd)
+{
+       struct request_queue *q = dev->q;
+       struct request *rq;
+       struct bio *bio = rqd->bio;
+
+       rq = blk_mq_alloc_request(q, bio_rw(bio), GFP_KERNEL, 0);
+       if (IS_ERR(rq))
+               return -ENOMEM;
+
+       rq->cmd_type = REQ_TYPE_DRV_PRIV;
+       rq->__sector = bio->bi_iter.bi_sector;
+       rq->ioprio = bio_prio(bio);
+
+       if (bio_has_data(bio))
+               rq->nr_phys_segments = bio_phys_segments(q, bio);
+
+       rq->__data_len = bio->bi_iter.bi_size;
+       rq->bio = rq->biotail = bio;
+
+       rq->end_io_data = rqd;
+
+       blk_execute_rq_nowait(q, NULL, rq, 0, null_lnvm_end_io);
+
+       return 0;
+}
+
+static int null_lnvm_id(struct nvm_dev *dev, struct nvm_id *id)
+{
+       sector_t size = gb * 1024 * 1024 * 1024ULL;
+       sector_t blksize;
+       struct nvm_id_group *grp;
+
+       id->ver_id = 0x1;
+       id->vmnt = 0;
+       id->cgrps = 1;
+       id->cap = 0x3;
+       id->dom = 0x1;
+
+       id->ppaf.blk_offset = 0;
+       id->ppaf.blk_len = 16;
+       id->ppaf.pg_offset = 16;
+       id->ppaf.pg_len = 16;
+       id->ppaf.sect_offset = 32;
+       id->ppaf.sect_len = 8;
+       id->ppaf.pln_offset = 40;
+       id->ppaf.pln_len = 8;
+       id->ppaf.lun_offset = 48;
+       id->ppaf.lun_len = 8;
+       id->ppaf.ch_offset = 56;
+       id->ppaf.ch_len = 8;
+
+       do_div(size, bs); /* convert size to pages */
+       do_div(size, 256); /* concert size to pgs pr blk */
+       grp = &id->groups[0];
+       grp->mtype = 0;
+       grp->fmtype = 0;
+       grp->num_ch = 1;
+       grp->num_pg = 256;
+       blksize = size;
+       do_div(size, (1 << 16));
+       grp->num_lun = size + 1;
+       do_div(blksize, grp->num_lun);
+       grp->num_blk = blksize;
+       grp->num_pln = 1;
+
+       grp->fpg_sz = bs;
+       grp->csecs = bs;
+       grp->trdt = 25000;
+       grp->trdm = 25000;
+       grp->tprt = 500000;
+       grp->tprm = 500000;
+       grp->tbet = 1500000;
+       grp->tbem = 1500000;
+       grp->mpos = 0x010101; /* single plane rwe */
+       grp->cpar = hw_queue_depth;
+
+       return 0;
+}
+
+static void *null_lnvm_create_dma_pool(struct nvm_dev *dev, char *name)
+{
+       mempool_t *virtmem_pool;
+
+       virtmem_pool = mempool_create_slab_pool(64, ppa_cache);
+       if (!virtmem_pool) {
+               pr_err("null_blk: Unable to create virtual memory pool\n");
+               return NULL;
+       }
+
+       return virtmem_pool;
+}
+
+static void null_lnvm_destroy_dma_pool(void *pool)
+{
+       mempool_destroy(pool);
+}
+
+static void *null_lnvm_dev_dma_alloc(struct nvm_dev *dev, void *pool,
+                               gfp_t mem_flags, dma_addr_t *dma_handler)
+{
+       return mempool_alloc(pool, mem_flags);
+}
+
+static void null_lnvm_dev_dma_free(void *pool, void *entry,
+                                                       dma_addr_t dma_handler)
+{
+       mempool_free(entry, pool);
+}
+
+static struct nvm_dev_ops null_lnvm_dev_ops = {
+       .identity               = null_lnvm_id,
+       .submit_io              = null_lnvm_submit_io,
+
+       .create_dma_pool        = null_lnvm_create_dma_pool,
+       .destroy_dma_pool       = null_lnvm_destroy_dma_pool,
+       .dev_dma_alloc          = null_lnvm_dev_dma_alloc,
+       .dev_dma_free           = null_lnvm_dev_dma_free,
+
+       /* Simulate nvme protocol restriction */
+       .max_phys_sect          = 64,
+};
+#else
+static struct nvm_dev_ops null_lnvm_dev_ops;
+#endif /* CONFIG_NVM */
+
 static int null_open(struct block_device *bdev, fmode_t mode)
 {
        return 0;
@@ -447,22 +610,6 @@ static int setup_commands(struct nullb_queue *nq)
        return 0;
 }
 
-static void cleanup_queue(struct nullb_queue *nq)
-{
-       kfree(nq->tag_map);
-       kfree(nq->cmds);
-}
-
-static void cleanup_queues(struct nullb *nullb)
-{
-       int i;
-
-       for (i = 0; i < nullb->nr_queues; i++)
-               cleanup_queue(&nullb->queues[i]);
-
-       kfree(nullb->queues);
-}
-
 static int setup_queues(struct nullb *nullb)
 {
        nullb->queues = kzalloc(submit_queues * sizeof(struct nullb_queue),
@@ -561,11 +708,6 @@ static int null_add_dev(void)
        queue_flag_set_unlocked(QUEUE_FLAG_NONROT, nullb->q);
        queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, nullb->q);
 
-       disk = nullb->disk = alloc_disk_node(1, home_node);
-       if (!disk) {
-               rv = -ENOMEM;
-               goto out_cleanup_blk_queue;
-       }
 
        mutex_lock(&lock);
        list_add_tail(&nullb->list, &nullb_list);
@@ -575,9 +717,23 @@ static int null_add_dev(void)
        blk_queue_logical_block_size(nullb->q, bs);
        blk_queue_physical_block_size(nullb->q, bs);
 
+       sprintf(nullb->disk_name, "nullb%d", nullb->index);
+
+       if (use_lightnvm) {
+               rv = nvm_register(nullb->q, nullb->disk_name,
+                                                       &null_lnvm_dev_ops);
+               if (rv)
+                       goto out_cleanup_blk_queue;
+               goto done;
+       }
+
+       disk = nullb->disk = alloc_disk_node(1, home_node);
+       if (!disk) {
+               rv = -ENOMEM;
+               goto out_cleanup_lightnvm;
+       }
        size = gb * 1024 * 1024 * 1024ULL;
-       sector_div(size, bs);
-       set_capacity(disk, size);
+       set_capacity(disk, size >> 9);
 
        disk->flags |= GENHD_FL_EXT_DEVT | GENHD_FL_SUPPRESS_PARTITION_INFO;
        disk->major             = null_major;
@@ -585,10 +741,15 @@ static int null_add_dev(void)
        disk->fops              = &null_fops;
        disk->private_data      = nullb;
        disk->queue             = nullb->q;
-       sprintf(disk->disk_name, "nullb%d", nullb->index);
+       strncpy(disk->disk_name, nullb->disk_name, DISK_NAME_LEN);
+
        add_disk(disk);
+done:
        return 0;
 
+out_cleanup_lightnvm:
+       if (use_lightnvm)
+               nvm_unregister(nullb->disk_name);
 out_cleanup_blk_queue:
        blk_cleanup_queue(nullb->q);
 out_cleanup_tags:
@@ -604,7 +765,9 @@ out:
 
 static int __init null_init(void)
 {
+       int ret = 0;
        unsigned int i;
+       struct nullb *nullb;
 
        if (bs > PAGE_SIZE) {
                pr_warn("null_blk: invalid block size\n");
@@ -612,6 +775,18 @@ static int __init null_init(void)
                bs = PAGE_SIZE;
        }
 
+       if (use_lightnvm && bs != 4096) {
+               pr_warn("null_blk: LightNVM only supports 4k block size\n");
+               pr_warn("null_blk: defaults block size to 4k\n");
+               bs = 4096;
+       }
+
+       if (use_lightnvm && queue_mode != NULL_Q_MQ) {
+               pr_warn("null_blk: LightNVM only supported for blk-mq\n");
+               pr_warn("null_blk: defaults queue mode to blk-mq\n");
+               queue_mode = NULL_Q_MQ;
+       }
+
        if (queue_mode == NULL_Q_MQ && use_per_node_hctx) {
                if (submit_queues < nr_online_nodes) {
                        pr_warn("null_blk: submit_queues param is set to %u.",
@@ -625,32 +800,38 @@ static int __init null_init(void)
 
        mutex_init(&lock);
 
-       /* Initialize a separate list for each CPU for issuing softirqs */
-       for_each_possible_cpu(i) {
-               struct completion_queue *cq = &per_cpu(completion_queues, i);
-
-               init_llist_head(&cq->list);
-
-               if (irqmode != NULL_IRQ_TIMER)
-                       continue;
-
-               hrtimer_init(&cq->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
-               cq->timer.function = null_cmd_timer_expired;
-       }
-
        null_major = register_blkdev(0, "nullb");
        if (null_major < 0)
                return null_major;
 
-       for (i = 0; i < nr_devices; i++) {
-               if (null_add_dev()) {
-                       unregister_blkdev(null_major, "nullb");
-                       return -EINVAL;
+       if (use_lightnvm) {
+               ppa_cache = kmem_cache_create("ppa_cache", 64 * sizeof(u64),
+                                                               0, 0, NULL);
+               if (!ppa_cache) {
+                       pr_err("null_blk: unable to create ppa cache\n");
+                       ret = -ENOMEM;
+                       goto err_ppa;
                }
        }
 
+       for (i = 0; i < nr_devices; i++) {
+               ret = null_add_dev();
+               if (ret)
+                       goto err_dev;
+       }
+
        pr_info("null: module loaded\n");
        return 0;
+
+err_dev:
+       while (!list_empty(&nullb_list)) {
+               nullb = list_entry(nullb_list.next, struct nullb, list);
+               null_del_dev(nullb);
+       }
+       kmem_cache_destroy(ppa_cache);
+err_ppa:
+       unregister_blkdev(null_major, "nullb");
+       return ret;
 }
 
 static void __exit null_exit(void)
@@ -665,6 +846,8 @@ static void __exit null_exit(void)
                null_del_dev(nullb);
        }
        mutex_unlock(&lock);
+
+       kmem_cache_destroy(ppa_cache);
 }
 
 module_init(null_init);