Upgrade to 4.4.50-rt62
[kvmfornfv.git] / kernel / drivers / target / target_core_transport.c
index 94f4ffa..aa517c4 100644 (file)
@@ -1270,23 +1270,6 @@ target_setup_cmd_from_cdb(struct se_cmd *cmd, unsigned char *cdb)
 
        trace_target_sequencer_start(cmd);
 
-       /*
-        * Check for an existing UNIT ATTENTION condition
-        */
-       ret = target_scsi3_ua_check(cmd);
-       if (ret)
-               return ret;
-
-       ret = target_alua_state_check(cmd);
-       if (ret)
-               return ret;
-
-       ret = target_check_reservation(cmd);
-       if (ret) {
-               cmd->scsi_status = SAM_STAT_RESERVATION_CONFLICT;
-               return ret;
-       }
-
        ret = dev->transport->parse_cdb(cmd);
        if (ret == TCM_UNSUPPORTED_SCSI_OPCODE)
                pr_warn_ratelimited("%s/%s: Unsupported SCSI Opcode 0x%02x, sending CHECK_CONDITION.\n",
@@ -1697,6 +1680,7 @@ void transport_generic_request_failure(struct se_cmd *cmd,
        case TCM_LOGICAL_BLOCK_GUARD_CHECK_FAILED:
        case TCM_LOGICAL_BLOCK_APP_TAG_CHECK_FAILED:
        case TCM_LOGICAL_BLOCK_REF_TAG_CHECK_FAILED:
+       case TCM_COPY_TARGET_DEVICE_NOT_REACHABLE:
                break;
        case TCM_OUT_OF_RESOURCES:
                sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
@@ -1749,20 +1733,45 @@ queue_full:
 }
 EXPORT_SYMBOL(transport_generic_request_failure);
 
-void __target_execute_cmd(struct se_cmd *cmd)
+void __target_execute_cmd(struct se_cmd *cmd, bool do_checks)
 {
        sense_reason_t ret;
 
-       if (cmd->execute_cmd) {
-               ret = cmd->execute_cmd(cmd);
-               if (ret) {
-                       spin_lock_irq(&cmd->t_state_lock);
-                       cmd->transport_state &= ~(CMD_T_BUSY|CMD_T_SENT);
-                       spin_unlock_irq(&cmd->t_state_lock);
+       if (!cmd->execute_cmd) {
+               ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+               goto err;
+       }
+       if (do_checks) {
+               /*
+                * Check for an existing UNIT ATTENTION condition after
+                * target_handle_task_attr() has done SAM task attr
+                * checking, and possibly have already defered execution
+                * out to target_restart_delayed_cmds() context.
+                */
+               ret = target_scsi3_ua_check(cmd);
+               if (ret)
+                       goto err;
+
+               ret = target_alua_state_check(cmd);
+               if (ret)
+                       goto err;
 
-                       transport_generic_request_failure(cmd, ret);
+               ret = target_check_reservation(cmd);
+               if (ret) {
+                       cmd->scsi_status = SAM_STAT_RESERVATION_CONFLICT;
+                       goto err;
                }
        }
+
+       ret = cmd->execute_cmd(cmd);
+       if (!ret)
+               return;
+err:
+       spin_lock_irq(&cmd->t_state_lock);
+       cmd->transport_state &= ~(CMD_T_BUSY|CMD_T_SENT);
+       spin_unlock_irq(&cmd->t_state_lock);
+
+       transport_generic_request_failure(cmd, ret);
 }
 
 static int target_write_prot_action(struct se_cmd *cmd)
@@ -1807,6 +1816,8 @@ static bool target_handle_task_attr(struct se_cmd *cmd)
        if (dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH)
                return false;
 
+       cmd->se_cmd_flags |= SCF_TASK_ATTR_SET;
+
        /*
         * Check for the existence of HEAD_OF_QUEUE, and if true return 1
         * to allow the passed struct se_cmd list of tasks to the front of the list.
@@ -1887,7 +1898,7 @@ void target_execute_cmd(struct se_cmd *cmd)
                return;
        }
 
-       __target_execute_cmd(cmd);
+       __target_execute_cmd(cmd, true);
 }
 EXPORT_SYMBOL(target_execute_cmd);
 
@@ -1911,7 +1922,7 @@ static void target_restart_delayed_cmds(struct se_device *dev)
                list_del(&cmd->se_delayed_node);
                spin_unlock(&dev->delayed_cmd_lock);
 
-               __target_execute_cmd(cmd);
+               __target_execute_cmd(cmd, true);
 
                if (cmd->sam_task_attr == TCM_ORDERED_TAG)
                        break;
@@ -1929,6 +1940,9 @@ static void transport_complete_task_attr(struct se_cmd *cmd)
        if (dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH)
                return;
 
+       if (!(cmd->se_cmd_flags & SCF_TASK_ATTR_SET))
+               goto restart;
+
        if (cmd->sam_task_attr == TCM_SIMPLE_TAG) {
                atomic_dec_mb(&dev->simple_cmds);
                dev->dev_cur_ordered_id++;
@@ -1945,7 +1959,7 @@ static void transport_complete_task_attr(struct se_cmd *cmd)
                pr_debug("Incremented dev_cur_ordered_id: %u for ORDERED\n",
                         dev->dev_cur_ordered_id);
        }
-
+restart:
        target_restart_delayed_cmds(dev);
 }
 
@@ -2496,8 +2510,10 @@ int target_get_sess_cmd(struct se_cmd *se_cmd, bool ack_kref)
         * fabric acknowledgement that requires two target_put_sess_cmd()
         * invocations before se_cmd descriptor release.
         */
-       if (ack_kref)
+       if (ack_kref) {
                kref_get(&se_cmd->cmd_kref);
+               se_cmd->se_cmd_flags |= SCF_ACK_KREF;
+       }
 
        spin_lock_irqsave(&se_sess->sess_cmd_lock, flags);
        if (se_sess->sess_tearing_down) {
@@ -2533,15 +2549,10 @@ static void target_release_cmd_kref(struct kref *kref)
        bool fabric_stop;
 
        spin_lock_irqsave(&se_sess->sess_cmd_lock, flags);
-       if (list_empty(&se_cmd->se_cmd_list)) {
-               spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
-               target_free_cmd_mem(se_cmd);
-               se_cmd->se_tfo->release_cmd(se_cmd);
-               return;
-       }
 
        spin_lock(&se_cmd->t_state_lock);
-       fabric_stop = (se_cmd->transport_state & CMD_T_FABRIC_STOP);
+       fabric_stop = (se_cmd->transport_state & CMD_T_FABRIC_STOP) &&
+                     (se_cmd->transport_state & CMD_T_ABORTED);
        spin_unlock(&se_cmd->t_state_lock);
 
        if (se_cmd->cmd_wait_set || fabric_stop) {
@@ -2618,8 +2629,6 @@ void target_wait_for_sess_cmds(struct se_session *se_sess)
 
        list_for_each_entry_safe(se_cmd, tmp_cmd,
                                &se_sess->sess_wait_list, se_cmd_list) {
-               list_del_init(&se_cmd->se_cmd_list);
-
                pr_debug("Waiting for se_cmd: %p t_state: %d, fabric state:"
                        " %d\n", se_cmd, se_cmd->t_state,
                        se_cmd->se_tfo->get_cmd_state(se_cmd));
@@ -2827,6 +2836,12 @@ static const struct sense_info sense_info_table[] = {
                .ascq = 0x03, /* LOGICAL BLOCK REFERENCE TAG CHECK FAILED */
                .add_sector_info = true,
        },
+       [TCM_COPY_TARGET_DEVICE_NOT_REACHABLE] = {
+               .key = COPY_ABORTED,
+               .asc = 0x0d,
+               .ascq = 0x02, /* COPY TARGET DEVICE NOT REACHABLE */
+
+       },
        [TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE] = {
                /*
                 * Returning ILLEGAL REQUEST would cause immediate IO errors on
@@ -3043,7 +3058,6 @@ static void target_tmr_work(struct work_struct *work)
                spin_unlock_irqrestore(&cmd->t_state_lock, flags);
                goto check_stop;
        }
-       cmd->t_state = TRANSPORT_ISTATE_PROCESSING;
        spin_unlock_irqrestore(&cmd->t_state_lock, flags);
 
        cmd->se_tfo->queue_tm_rsp(cmd);
@@ -3056,11 +3070,25 @@ int transport_generic_handle_tmr(
        struct se_cmd *cmd)
 {
        unsigned long flags;
+       bool aborted = false;
 
        spin_lock_irqsave(&cmd->t_state_lock, flags);
-       cmd->transport_state |= CMD_T_ACTIVE;
+       if (cmd->transport_state & CMD_T_ABORTED) {
+               aborted = true;
+       } else {
+               cmd->t_state = TRANSPORT_ISTATE_PROCESSING;
+               cmd->transport_state |= CMD_T_ACTIVE;
+       }
        spin_unlock_irqrestore(&cmd->t_state_lock, flags);
 
+       if (aborted) {
+               pr_warn_ratelimited("handle_tmr caught CMD_T_ABORTED TMR %d"
+                       "ref_tag: %llu tag: %llu\n", cmd->se_tmr_req->function,
+                       cmd->se_tmr_req->ref_task_tag, cmd->tag);
+               transport_cmd_check_stop_to_fabric(cmd);
+               return 0;
+       }
+
        INIT_WORK(&cmd->work, target_tmr_work);
        queue_work(cmd->se_dev->tmr_wq, &cmd->work);
        return 0;