These changes are the raw update to linux-4.4.6-rt14. Kernel sources
[kvmfornfv.git] / kernel / drivers / target / target_core_iblock.c
index 8c96568..2c53dce 100644 (file)
 #include <linux/genhd.h>
 #include <linux/file.h>
 #include <linux/module.h>
-#include <scsi/scsi.h>
-#include <scsi/scsi_host.h>
+#include <scsi/scsi_proto.h>
 #include <asm/unaligned.h>
 
 #include <target/target_core_base.h>
 #include <target/target_core_backend.h>
-#include <target/target_core_backend_configfs.h>
 
 #include "target_core_iblock.h"
 
@@ -54,17 +52,11 @@ static inline struct iblock_dev *IBLOCK_DEV(struct se_device *dev)
 }
 
 
-static struct se_subsystem_api iblock_template;
-
-/*     iblock_attach_hba(): (Part of se_subsystem_api_t template)
- *
- *
- */
 static int iblock_attach_hba(struct se_hba *hba, u32 host_id)
 {
        pr_debug("CORE_HBA[%d] - TCM iBlock HBA Driver %s on"
                " Generic Target Core Stack %s\n", hba->hba_id,
-               IBLOCK_VERSION, TARGET_CORE_MOD_VERSION);
+               IBLOCK_VERSION, TARGET_CORE_VERSION);
        return 0;
 }
 
@@ -113,6 +105,8 @@ static int iblock_configure_device(struct se_device *dev)
        mode = FMODE_READ|FMODE_EXCL;
        if (!ib_dev->ibd_readonly)
                mode |= FMODE_WRITE;
+       else
+               dev->dev_flags |= DF_READ_ONLY;
 
        bd = blkdev_get_by_path(ib_dev->ibd_udev_path, mode, ib_dev);
        if (IS_ERR(bd)) {
@@ -127,27 +121,11 @@ static int iblock_configure_device(struct se_device *dev)
        dev->dev_attrib.hw_max_sectors = queue_max_hw_sectors(q);
        dev->dev_attrib.hw_queue_depth = q->nr_requests;
 
-       /*
-        * Check if the underlying struct block_device request_queue supports
-        * the QUEUE_FLAG_DISCARD bit for UNMAP/WRITE_SAME in SCSI + TRIM
-        * in ATA and we need to set TPE=1
-        */
-       if (blk_queue_discard(q)) {
-               dev->dev_attrib.max_unmap_lba_count =
-                               q->limits.max_discard_sectors;
-
-               /*
-                * Currently hardcoded to 1 in Linux/SCSI code..
-                */
-               dev->dev_attrib.max_unmap_block_desc_count = 1;
-               dev->dev_attrib.unmap_granularity =
-                               q->limits.discard_granularity >> 9;
-               dev->dev_attrib.unmap_granularity_alignment =
-                               q->limits.discard_alignment;
-
+       if (target_configure_unmap_from_queue(&dev->dev_attrib, q,
+                                             dev->dev_attrib.hw_block_size))
                pr_debug("IBLOCK: BLOCK Discard support available,"
-                               " disabled by default\n");
-       }
+                        " disabled by default\n");
+
        /*
         * Enable write same emulation for IBLOCK and use 0xFFFF as
         * the smaller WRITE_SAME(10) only has a two-byte block count.
@@ -161,17 +139,17 @@ static int iblock_configure_device(struct se_device *dev)
        if (bi) {
                struct bio_set *bs = ib_dev->ibd_bio_set;
 
-               if (!strcmp(bi->name, "T10-DIF-TYPE3-IP") ||
-                   !strcmp(bi->name, "T10-DIF-TYPE1-IP")) {
+               if (!strcmp(bi->profile->name, "T10-DIF-TYPE3-IP") ||
+                   !strcmp(bi->profile->name, "T10-DIF-TYPE1-IP")) {
                        pr_err("IBLOCK export of blk_integrity: %s not"
-                              " supported\n", bi->name);
+                              " supported\n", bi->profile->name);
                        ret = -ENOSYS;
                        goto out_blkdev_put;
                }
 
-               if (!strcmp(bi->name, "T10-DIF-TYPE3-CRC")) {
+               if (!strcmp(bi->profile->name, "T10-DIF-TYPE3-CRC")) {
                        dev->dev_attrib.pi_prot_type = TARGET_DIF_TYPE3_PROT;
-               } else if (!strcmp(bi->name, "T10-DIF-TYPE1-CRC")) {
+               } else if (!strcmp(bi->profile->name, "T10-DIF-TYPE1-CRC")) {
                        dev->dev_attrib.pi_prot_type = TARGET_DIF_TYPE1_PROT;
                }
 
@@ -198,6 +176,14 @@ out:
        return ret;
 }
 
+static void iblock_dev_call_rcu(struct rcu_head *p)
+{
+       struct se_device *dev = container_of(p, struct se_device, rcu_head);
+       struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
+
+       kfree(ib_dev);
+}
+
 static void iblock_free_device(struct se_device *dev)
 {
        struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
@@ -207,7 +193,7 @@ static void iblock_free_device(struct se_device *dev)
        if (ib_dev->ibd_bio_set != NULL)
                bioset_free(ib_dev->ibd_bio_set);
 
-       kfree(ib_dev);
+       call_rcu(&dev->rcu_head, iblock_dev_call_rcu);
 }
 
 static unsigned long long iblock_emulate_read_cap_with_block_size(
@@ -306,20 +292,13 @@ static void iblock_complete_cmd(struct se_cmd *cmd)
        kfree(ibr);
 }
 
-static void iblock_bio_done(struct bio *bio, int err)
+static void iblock_bio_done(struct bio *bio)
 {
        struct se_cmd *cmd = bio->bi_private;
        struct iblock_req *ibr = cmd->priv;
 
-       /*
-        * Set -EIO if !BIO_UPTODATE and the passed is still err=0
-        */
-       if (!test_bit(BIO_UPTODATE, &bio->bi_flags) && !err)
-               err = -EIO;
-
-       if (err != 0) {
-               pr_err("test_bit(BIO_UPTODATE) failed for bio: %p,"
-                       " err: %d\n", bio, err);
+       if (bio->bi_error) {
+               pr_err("bio error: %p,  err: %d\n", bio, bio->bi_error);
                /*
                 * Bump the ib_bio_err_cnt and release bio.
                 */
@@ -370,15 +349,15 @@ static void iblock_submit_bios(struct bio_list *list, int rw)
        blk_finish_plug(&plug);
 }
 
-static void iblock_end_io_flush(struct bio *bio, int err)
+static void iblock_end_io_flush(struct bio *bio)
 {
        struct se_cmd *cmd = bio->bi_private;
 
-       if (err)
-               pr_err("IBLOCK: cache flush failed: %d\n", err);
+       if (bio->bi_error)
+               pr_err("IBLOCK: cache flush failed: %d\n", bio->bi_error);
 
        if (cmd) {
-               if (err)
+               if (bio->bi_error)
                        target_complete_cmd(cmd, SAM_STAT_CHECK_CONDITION);
                else
                        target_complete_cmd(cmd, SAM_STAT_GOOD);
@@ -415,13 +394,16 @@ iblock_execute_sync_cache(struct se_cmd *cmd)
 }
 
 static sense_reason_t
-iblock_do_unmap(struct se_cmd *cmd, void *priv,
-               sector_t lba, sector_t nolb)
+iblock_execute_unmap(struct se_cmd *cmd, sector_t lba, sector_t nolb)
 {
-       struct block_device *bdev = priv;
+       struct block_device *bdev = IBLOCK_DEV(cmd->se_dev)->ibd_bd;
+       struct se_device *dev = cmd->se_dev;
        int ret;
 
-       ret = blkdev_issue_discard(bdev, lba, nolb, GFP_KERNEL, 0);
+       ret = blkdev_issue_discard(bdev,
+                                  target_to_linux_sector(dev, lba),
+                                  target_to_linux_sector(dev,  nolb),
+                                  GFP_KERNEL, 0);
        if (ret < 0) {
                pr_err("blkdev_issue_discard() failed: %d\n", ret);
                return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
@@ -430,30 +412,6 @@ iblock_do_unmap(struct se_cmd *cmd, void *priv,
        return 0;
 }
 
-static sense_reason_t
-iblock_execute_unmap(struct se_cmd *cmd)
-{
-       struct block_device *bdev = IBLOCK_DEV(cmd->se_dev)->ibd_bd;
-
-       return sbc_execute_unmap(cmd, iblock_do_unmap, bdev);
-}
-
-static sense_reason_t
-iblock_execute_write_same_unmap(struct se_cmd *cmd)
-{
-       struct block_device *bdev = IBLOCK_DEV(cmd->se_dev)->ibd_bd;
-       sector_t lba = cmd->t_task_lba;
-       sector_t nolb = sbc_get_write_same_sectors(cmd);
-       sense_reason_t ret;
-
-       ret = iblock_do_unmap(cmd, bdev, lba, nolb);
-       if (ret)
-               return ret;
-
-       target_complete_cmd(cmd, GOOD);
-       return 0;
-}
-
 static sense_reason_t
 iblock_execute_write_same(struct se_cmd *cmd)
 {
@@ -461,8 +419,10 @@ iblock_execute_write_same(struct se_cmd *cmd)
        struct scatterlist *sg;
        struct bio *bio;
        struct bio_list list;
-       sector_t block_lba = cmd->t_task_lba;
-       sector_t sectors = sbc_get_write_same_sectors(cmd);
+       struct se_device *dev = cmd->se_dev;
+       sector_t block_lba = target_to_linux_sector(dev, cmd->t_task_lba);
+       sector_t sectors = target_to_linux_sector(dev,
+                                       sbc_get_write_same_sectors(cmd));
 
        if (cmd->prot_op) {
                pr_err("WRITE_SAME: Protection information with IBLOCK"
@@ -676,12 +636,12 @@ iblock_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents,
                  enum dma_data_direction data_direction)
 {
        struct se_device *dev = cmd->se_dev;
+       sector_t block_lba = target_to_linux_sector(dev, cmd->t_task_lba);
        struct iblock_req *ibr;
        struct bio *bio, *bio_start;
        struct bio_list list;
        struct scatterlist *sg;
        u32 sg_num = sgl_nents;
-       sector_t block_lba;
        unsigned bio_cnt;
        int rw = 0;
        int i;
@@ -707,24 +667,6 @@ iblock_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents,
                rw = READ;
        }
 
-       /*
-        * Convert the blocksize advertised to the initiator to the 512 byte
-        * units unconditionally used by the Linux block layer.
-        */
-       if (dev->dev_attrib.block_size == 4096)
-               block_lba = (cmd->t_task_lba << 3);
-       else if (dev->dev_attrib.block_size == 2048)
-               block_lba = (cmd->t_task_lba << 2);
-       else if (dev->dev_attrib.block_size == 1024)
-               block_lba = (cmd->t_task_lba << 1);
-       else if (dev->dev_attrib.block_size == 512)
-               block_lba = cmd->t_task_lba;
-       else {
-               pr_err("Unsupported SCSI -> BLOCK LBA conversion:"
-                               " %u\n", dev->dev_attrib.block_size);
-               return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
-       }
-
        ibr = kzalloc(sizeof(struct iblock_req), GFP_KERNEL);
        if (!ibr)
                goto fail;
@@ -845,7 +787,6 @@ static struct sbc_ops iblock_sbc_ops = {
        .execute_rw             = iblock_execute_rw,
        .execute_sync_cache     = iblock_execute_sync_cache,
        .execute_write_same     = iblock_execute_write_same,
-       .execute_write_same_unmap = iblock_execute_write_same_unmap,
        .execute_unmap          = iblock_execute_unmap,
 };
 
@@ -864,42 +805,7 @@ static bool iblock_get_write_cache(struct se_device *dev)
        return q->flush_flags & REQ_FLUSH;
 }
 
-DEF_TB_DEFAULT_ATTRIBS(iblock);
-
-static struct configfs_attribute *iblock_backend_dev_attrs[] = {
-       &iblock_dev_attrib_emulate_model_alias.attr,
-       &iblock_dev_attrib_emulate_dpo.attr,
-       &iblock_dev_attrib_emulate_fua_write.attr,
-       &iblock_dev_attrib_emulate_fua_read.attr,
-       &iblock_dev_attrib_emulate_write_cache.attr,
-       &iblock_dev_attrib_emulate_ua_intlck_ctrl.attr,
-       &iblock_dev_attrib_emulate_tas.attr,
-       &iblock_dev_attrib_emulate_tpu.attr,
-       &iblock_dev_attrib_emulate_tpws.attr,
-       &iblock_dev_attrib_emulate_caw.attr,
-       &iblock_dev_attrib_emulate_3pc.attr,
-       &iblock_dev_attrib_pi_prot_type.attr,
-       &iblock_dev_attrib_hw_pi_prot_type.attr,
-       &iblock_dev_attrib_pi_prot_format.attr,
-       &iblock_dev_attrib_enforce_pr_isids.attr,
-       &iblock_dev_attrib_is_nonrot.attr,
-       &iblock_dev_attrib_emulate_rest_reord.attr,
-       &iblock_dev_attrib_force_pr_aptpl.attr,
-       &iblock_dev_attrib_hw_block_size.attr,
-       &iblock_dev_attrib_block_size.attr,
-       &iblock_dev_attrib_hw_max_sectors.attr,
-       &iblock_dev_attrib_optimal_sectors.attr,
-       &iblock_dev_attrib_hw_queue_depth.attr,
-       &iblock_dev_attrib_queue_depth.attr,
-       &iblock_dev_attrib_max_unmap_lba_count.attr,
-       &iblock_dev_attrib_max_unmap_block_desc_count.attr,
-       &iblock_dev_attrib_unmap_granularity.attr,
-       &iblock_dev_attrib_unmap_granularity_alignment.attr,
-       &iblock_dev_attrib_max_write_same_len.attr,
-       NULL,
-};
-
-static struct se_subsystem_api iblock_template = {
+static const struct target_backend_ops iblock_ops = {
        .name                   = "iblock",
        .inquiry_prod           = "IBLOCK",
        .inquiry_rev            = IBLOCK_VERSION,
@@ -919,21 +825,17 @@ static struct se_subsystem_api iblock_template = {
        .get_io_min             = iblock_get_io_min,
        .get_io_opt             = iblock_get_io_opt,
        .get_write_cache        = iblock_get_write_cache,
+       .tb_dev_attrib_attrs    = sbc_attrib_attrs,
 };
 
 static int __init iblock_module_init(void)
 {
-       struct target_backend_cits *tbc = &iblock_template.tb_cits;
-
-       target_core_setup_sub_cits(&iblock_template);
-       tbc->tb_dev_attrib_cit.ct_attrs = iblock_backend_dev_attrs;
-
-       return transport_subsystem_register(&iblock_template);
+       return transport_backend_register(&iblock_ops);
 }
 
 static void __exit iblock_module_exit(void)
 {
-       transport_subsystem_release(&iblock_template);
+       target_backend_unregister(&iblock_ops);
 }
 
 MODULE_DESCRIPTION("TCM IBLOCK subsystem plugin");