These changes are the raw update to linux-4.4.6-rt14. Kernel sources
[kvmfornfv.git] / kernel / drivers / target / target_core_transport.c
1 /*******************************************************************************
2  * Filename:  target_core_transport.c
3  *
4  * This file contains the Generic Target Engine Core.
5  *
6  * (c) Copyright 2002-2013 Datera, Inc.
7  *
8  * Nicholas A. Bellinger <nab@kernel.org>
9  *
10  * This program is free software; you can redistribute it and/or modify
11  * it under the terms of the GNU General Public License as published by
12  * the Free Software Foundation; either version 2 of the License, or
13  * (at your option) any later version.
14  *
15  * This program is distributed in the hope that it will be useful,
16  * but WITHOUT ANY WARRANTY; without even the implied warranty of
17  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
18  * GNU General Public License for more details.
19  *
20  * You should have received a copy of the GNU General Public License
21  * along with this program; if not, write to the Free Software
22  * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
23  *
24  ******************************************************************************/
25
26 #include <linux/net.h>
27 #include <linux/delay.h>
28 #include <linux/string.h>
29 #include <linux/timer.h>
30 #include <linux/slab.h>
31 #include <linux/spinlock.h>
32 #include <linux/kthread.h>
33 #include <linux/in.h>
34 #include <linux/cdrom.h>
35 #include <linux/module.h>
36 #include <linux/ratelimit.h>
37 #include <linux/vmalloc.h>
38 #include <asm/unaligned.h>
39 #include <net/sock.h>
40 #include <net/tcp.h>
41 #include <scsi/scsi_proto.h>
42 #include <scsi/scsi_common.h>
43
44 #include <target/target_core_base.h>
45 #include <target/target_core_backend.h>
46 #include <target/target_core_fabric.h>
47
48 #include "target_core_internal.h"
49 #include "target_core_alua.h"
50 #include "target_core_pr.h"
51 #include "target_core_ua.h"
52
53 #define CREATE_TRACE_POINTS
54 #include <trace/events/target.h>
55
56 static struct workqueue_struct *target_completion_wq;
57 static struct kmem_cache *se_sess_cache;
58 struct kmem_cache *se_ua_cache;
59 struct kmem_cache *t10_pr_reg_cache;
60 struct kmem_cache *t10_alua_lu_gp_cache;
61 struct kmem_cache *t10_alua_lu_gp_mem_cache;
62 struct kmem_cache *t10_alua_tg_pt_gp_cache;
63 struct kmem_cache *t10_alua_lba_map_cache;
64 struct kmem_cache *t10_alua_lba_map_mem_cache;
65
66 static void transport_complete_task_attr(struct se_cmd *cmd);
67 static void transport_handle_queue_full(struct se_cmd *cmd,
68                 struct se_device *dev);
69 static int transport_put_cmd(struct se_cmd *cmd);
70 static void target_complete_ok_work(struct work_struct *work);
71
72 int init_se_kmem_caches(void)
73 {
74         se_sess_cache = kmem_cache_create("se_sess_cache",
75                         sizeof(struct se_session), __alignof__(struct se_session),
76                         0, NULL);
77         if (!se_sess_cache) {
78                 pr_err("kmem_cache_create() for struct se_session"
79                                 " failed\n");
80                 goto out;
81         }
82         se_ua_cache = kmem_cache_create("se_ua_cache",
83                         sizeof(struct se_ua), __alignof__(struct se_ua),
84                         0, NULL);
85         if (!se_ua_cache) {
86                 pr_err("kmem_cache_create() for struct se_ua failed\n");
87                 goto out_free_sess_cache;
88         }
89         t10_pr_reg_cache = kmem_cache_create("t10_pr_reg_cache",
90                         sizeof(struct t10_pr_registration),
91                         __alignof__(struct t10_pr_registration), 0, NULL);
92         if (!t10_pr_reg_cache) {
93                 pr_err("kmem_cache_create() for struct t10_pr_registration"
94                                 " failed\n");
95                 goto out_free_ua_cache;
96         }
97         t10_alua_lu_gp_cache = kmem_cache_create("t10_alua_lu_gp_cache",
98                         sizeof(struct t10_alua_lu_gp), __alignof__(struct t10_alua_lu_gp),
99                         0, NULL);
100         if (!t10_alua_lu_gp_cache) {
101                 pr_err("kmem_cache_create() for t10_alua_lu_gp_cache"
102                                 " failed\n");
103                 goto out_free_pr_reg_cache;
104         }
105         t10_alua_lu_gp_mem_cache = kmem_cache_create("t10_alua_lu_gp_mem_cache",
106                         sizeof(struct t10_alua_lu_gp_member),
107                         __alignof__(struct t10_alua_lu_gp_member), 0, NULL);
108         if (!t10_alua_lu_gp_mem_cache) {
109                 pr_err("kmem_cache_create() for t10_alua_lu_gp_mem_"
110                                 "cache failed\n");
111                 goto out_free_lu_gp_cache;
112         }
113         t10_alua_tg_pt_gp_cache = kmem_cache_create("t10_alua_tg_pt_gp_cache",
114                         sizeof(struct t10_alua_tg_pt_gp),
115                         __alignof__(struct t10_alua_tg_pt_gp), 0, NULL);
116         if (!t10_alua_tg_pt_gp_cache) {
117                 pr_err("kmem_cache_create() for t10_alua_tg_pt_gp_"
118                                 "cache failed\n");
119                 goto out_free_lu_gp_mem_cache;
120         }
121         t10_alua_lba_map_cache = kmem_cache_create(
122                         "t10_alua_lba_map_cache",
123                         sizeof(struct t10_alua_lba_map),
124                         __alignof__(struct t10_alua_lba_map), 0, NULL);
125         if (!t10_alua_lba_map_cache) {
126                 pr_err("kmem_cache_create() for t10_alua_lba_map_"
127                                 "cache failed\n");
128                 goto out_free_tg_pt_gp_cache;
129         }
130         t10_alua_lba_map_mem_cache = kmem_cache_create(
131                         "t10_alua_lba_map_mem_cache",
132                         sizeof(struct t10_alua_lba_map_member),
133                         __alignof__(struct t10_alua_lba_map_member), 0, NULL);
134         if (!t10_alua_lba_map_mem_cache) {
135                 pr_err("kmem_cache_create() for t10_alua_lba_map_mem_"
136                                 "cache failed\n");
137                 goto out_free_lba_map_cache;
138         }
139
140         target_completion_wq = alloc_workqueue("target_completion",
141                                                WQ_MEM_RECLAIM, 0);
142         if (!target_completion_wq)
143                 goto out_free_lba_map_mem_cache;
144
145         return 0;
146
147 out_free_lba_map_mem_cache:
148         kmem_cache_destroy(t10_alua_lba_map_mem_cache);
149 out_free_lba_map_cache:
150         kmem_cache_destroy(t10_alua_lba_map_cache);
151 out_free_tg_pt_gp_cache:
152         kmem_cache_destroy(t10_alua_tg_pt_gp_cache);
153 out_free_lu_gp_mem_cache:
154         kmem_cache_destroy(t10_alua_lu_gp_mem_cache);
155 out_free_lu_gp_cache:
156         kmem_cache_destroy(t10_alua_lu_gp_cache);
157 out_free_pr_reg_cache:
158         kmem_cache_destroy(t10_pr_reg_cache);
159 out_free_ua_cache:
160         kmem_cache_destroy(se_ua_cache);
161 out_free_sess_cache:
162         kmem_cache_destroy(se_sess_cache);
163 out:
164         return -ENOMEM;
165 }
166
167 void release_se_kmem_caches(void)
168 {
169         destroy_workqueue(target_completion_wq);
170         kmem_cache_destroy(se_sess_cache);
171         kmem_cache_destroy(se_ua_cache);
172         kmem_cache_destroy(t10_pr_reg_cache);
173         kmem_cache_destroy(t10_alua_lu_gp_cache);
174         kmem_cache_destroy(t10_alua_lu_gp_mem_cache);
175         kmem_cache_destroy(t10_alua_tg_pt_gp_cache);
176         kmem_cache_destroy(t10_alua_lba_map_cache);
177         kmem_cache_destroy(t10_alua_lba_map_mem_cache);
178 }
179
180 /* This code ensures unique mib indexes are handed out. */
181 static DEFINE_SPINLOCK(scsi_mib_index_lock);
182 static u32 scsi_mib_index[SCSI_INDEX_TYPE_MAX];
183
184 /*
185  * Allocate a new row index for the entry type specified
186  */
187 u32 scsi_get_new_index(scsi_index_t type)
188 {
189         u32 new_index;
190
191         BUG_ON((type < 0) || (type >= SCSI_INDEX_TYPE_MAX));
192
193         spin_lock(&scsi_mib_index_lock);
194         new_index = ++scsi_mib_index[type];
195         spin_unlock(&scsi_mib_index_lock);
196
197         return new_index;
198 }
199
200 void transport_subsystem_check_init(void)
201 {
202         int ret;
203         static int sub_api_initialized;
204
205         if (sub_api_initialized)
206                 return;
207
208         ret = request_module("target_core_iblock");
209         if (ret != 0)
210                 pr_err("Unable to load target_core_iblock\n");
211
212         ret = request_module("target_core_file");
213         if (ret != 0)
214                 pr_err("Unable to load target_core_file\n");
215
216         ret = request_module("target_core_pscsi");
217         if (ret != 0)
218                 pr_err("Unable to load target_core_pscsi\n");
219
220         ret = request_module("target_core_user");
221         if (ret != 0)
222                 pr_err("Unable to load target_core_user\n");
223
224         sub_api_initialized = 1;
225 }
226
227 struct se_session *transport_init_session(enum target_prot_op sup_prot_ops)
228 {
229         struct se_session *se_sess;
230
231         se_sess = kmem_cache_zalloc(se_sess_cache, GFP_KERNEL);
232         if (!se_sess) {
233                 pr_err("Unable to allocate struct se_session from"
234                                 " se_sess_cache\n");
235                 return ERR_PTR(-ENOMEM);
236         }
237         INIT_LIST_HEAD(&se_sess->sess_list);
238         INIT_LIST_HEAD(&se_sess->sess_acl_list);
239         INIT_LIST_HEAD(&se_sess->sess_cmd_list);
240         INIT_LIST_HEAD(&se_sess->sess_wait_list);
241         spin_lock_init(&se_sess->sess_cmd_lock);
242         kref_init(&se_sess->sess_kref);
243         se_sess->sup_prot_ops = sup_prot_ops;
244
245         return se_sess;
246 }
247 EXPORT_SYMBOL(transport_init_session);
248
249 int transport_alloc_session_tags(struct se_session *se_sess,
250                                  unsigned int tag_num, unsigned int tag_size)
251 {
252         int rc;
253
254         se_sess->sess_cmd_map = kzalloc(tag_num * tag_size,
255                                         GFP_KERNEL | __GFP_NOWARN | __GFP_REPEAT);
256         if (!se_sess->sess_cmd_map) {
257                 se_sess->sess_cmd_map = vzalloc(tag_num * tag_size);
258                 if (!se_sess->sess_cmd_map) {
259                         pr_err("Unable to allocate se_sess->sess_cmd_map\n");
260                         return -ENOMEM;
261                 }
262         }
263
264         rc = percpu_ida_init(&se_sess->sess_tag_pool, tag_num);
265         if (rc < 0) {
266                 pr_err("Unable to init se_sess->sess_tag_pool,"
267                         " tag_num: %u\n", tag_num);
268                 kvfree(se_sess->sess_cmd_map);
269                 se_sess->sess_cmd_map = NULL;
270                 return -ENOMEM;
271         }
272
273         return 0;
274 }
275 EXPORT_SYMBOL(transport_alloc_session_tags);
276
277 struct se_session *transport_init_session_tags(unsigned int tag_num,
278                                                unsigned int tag_size,
279                                                enum target_prot_op sup_prot_ops)
280 {
281         struct se_session *se_sess;
282         int rc;
283
284         se_sess = transport_init_session(sup_prot_ops);
285         if (IS_ERR(se_sess))
286                 return se_sess;
287
288         rc = transport_alloc_session_tags(se_sess, tag_num, tag_size);
289         if (rc < 0) {
290                 transport_free_session(se_sess);
291                 return ERR_PTR(-ENOMEM);
292         }
293
294         return se_sess;
295 }
296 EXPORT_SYMBOL(transport_init_session_tags);
297
298 /*
299  * Called with spin_lock_irqsave(&struct se_portal_group->session_lock called.
300  */
301 void __transport_register_session(
302         struct se_portal_group *se_tpg,
303         struct se_node_acl *se_nacl,
304         struct se_session *se_sess,
305         void *fabric_sess_ptr)
306 {
307         const struct target_core_fabric_ops *tfo = se_tpg->se_tpg_tfo;
308         unsigned char buf[PR_REG_ISID_LEN];
309
310         se_sess->se_tpg = se_tpg;
311         se_sess->fabric_sess_ptr = fabric_sess_ptr;
312         /*
313          * Used by struct se_node_acl's under ConfigFS to locate active se_session-t
314          *
315          * Only set for struct se_session's that will actually be moving I/O.
316          * eg: *NOT* discovery sessions.
317          */
318         if (se_nacl) {
319                 /*
320                  *
321                  * Determine if fabric allows for T10-PI feature bits exposed to
322                  * initiators for device backends with !dev->dev_attrib.pi_prot_type.
323                  *
324                  * If so, then always save prot_type on a per se_node_acl node
325                  * basis and re-instate the previous sess_prot_type to avoid
326                  * disabling PI from below any previously initiator side
327                  * registered LUNs.
328                  */
329                 if (se_nacl->saved_prot_type)
330                         se_sess->sess_prot_type = se_nacl->saved_prot_type;
331                 else if (tfo->tpg_check_prot_fabric_only)
332                         se_sess->sess_prot_type = se_nacl->saved_prot_type =
333                                         tfo->tpg_check_prot_fabric_only(se_tpg);
334                 /*
335                  * If the fabric module supports an ISID based TransportID,
336                  * save this value in binary from the fabric I_T Nexus now.
337                  */
338                 if (se_tpg->se_tpg_tfo->sess_get_initiator_sid != NULL) {
339                         memset(&buf[0], 0, PR_REG_ISID_LEN);
340                         se_tpg->se_tpg_tfo->sess_get_initiator_sid(se_sess,
341                                         &buf[0], PR_REG_ISID_LEN);
342                         se_sess->sess_bin_isid = get_unaligned_be64(&buf[0]);
343                 }
344                 kref_get(&se_nacl->acl_kref);
345
346                 spin_lock_irq(&se_nacl->nacl_sess_lock);
347                 /*
348                  * The se_nacl->nacl_sess pointer will be set to the
349                  * last active I_T Nexus for each struct se_node_acl.
350                  */
351                 se_nacl->nacl_sess = se_sess;
352
353                 list_add_tail(&se_sess->sess_acl_list,
354                               &se_nacl->acl_sess_list);
355                 spin_unlock_irq(&se_nacl->nacl_sess_lock);
356         }
357         list_add_tail(&se_sess->sess_list, &se_tpg->tpg_sess_list);
358
359         pr_debug("TARGET_CORE[%s]: Registered fabric_sess_ptr: %p\n",
360                 se_tpg->se_tpg_tfo->get_fabric_name(), se_sess->fabric_sess_ptr);
361 }
362 EXPORT_SYMBOL(__transport_register_session);
363
364 void transport_register_session(
365         struct se_portal_group *se_tpg,
366         struct se_node_acl *se_nacl,
367         struct se_session *se_sess,
368         void *fabric_sess_ptr)
369 {
370         unsigned long flags;
371
372         spin_lock_irqsave(&se_tpg->session_lock, flags);
373         __transport_register_session(se_tpg, se_nacl, se_sess, fabric_sess_ptr);
374         spin_unlock_irqrestore(&se_tpg->session_lock, flags);
375 }
376 EXPORT_SYMBOL(transport_register_session);
377
378 static void target_release_session(struct kref *kref)
379 {
380         struct se_session *se_sess = container_of(kref,
381                         struct se_session, sess_kref);
382         struct se_portal_group *se_tpg = se_sess->se_tpg;
383
384         se_tpg->se_tpg_tfo->close_session(se_sess);
385 }
386
387 void target_get_session(struct se_session *se_sess)
388 {
389         kref_get(&se_sess->sess_kref);
390 }
391 EXPORT_SYMBOL(target_get_session);
392
393 void target_put_session(struct se_session *se_sess)
394 {
395         kref_put(&se_sess->sess_kref, target_release_session);
396 }
397 EXPORT_SYMBOL(target_put_session);
398
399 ssize_t target_show_dynamic_sessions(struct se_portal_group *se_tpg, char *page)
400 {
401         struct se_session *se_sess;
402         ssize_t len = 0;
403
404         spin_lock_bh(&se_tpg->session_lock);
405         list_for_each_entry(se_sess, &se_tpg->tpg_sess_list, sess_list) {
406                 if (!se_sess->se_node_acl)
407                         continue;
408                 if (!se_sess->se_node_acl->dynamic_node_acl)
409                         continue;
410                 if (strlen(se_sess->se_node_acl->initiatorname) + 1 + len > PAGE_SIZE)
411                         break;
412
413                 len += snprintf(page + len, PAGE_SIZE - len, "%s\n",
414                                 se_sess->se_node_acl->initiatorname);
415                 len += 1; /* Include NULL terminator */
416         }
417         spin_unlock_bh(&se_tpg->session_lock);
418
419         return len;
420 }
421 EXPORT_SYMBOL(target_show_dynamic_sessions);
422
423 static void target_complete_nacl(struct kref *kref)
424 {
425         struct se_node_acl *nacl = container_of(kref,
426                                 struct se_node_acl, acl_kref);
427
428         complete(&nacl->acl_free_comp);
429 }
430
431 void target_put_nacl(struct se_node_acl *nacl)
432 {
433         kref_put(&nacl->acl_kref, target_complete_nacl);
434 }
435
436 void transport_deregister_session_configfs(struct se_session *se_sess)
437 {
438         struct se_node_acl *se_nacl;
439         unsigned long flags;
440         /*
441          * Used by struct se_node_acl's under ConfigFS to locate active struct se_session
442          */
443         se_nacl = se_sess->se_node_acl;
444         if (se_nacl) {
445                 spin_lock_irqsave(&se_nacl->nacl_sess_lock, flags);
446                 if (se_nacl->acl_stop == 0)
447                         list_del(&se_sess->sess_acl_list);
448                 /*
449                  * If the session list is empty, then clear the pointer.
450                  * Otherwise, set the struct se_session pointer from the tail
451                  * element of the per struct se_node_acl active session list.
452                  */
453                 if (list_empty(&se_nacl->acl_sess_list))
454                         se_nacl->nacl_sess = NULL;
455                 else {
456                         se_nacl->nacl_sess = container_of(
457                                         se_nacl->acl_sess_list.prev,
458                                         struct se_session, sess_acl_list);
459                 }
460                 spin_unlock_irqrestore(&se_nacl->nacl_sess_lock, flags);
461         }
462 }
463 EXPORT_SYMBOL(transport_deregister_session_configfs);
464
465 void transport_free_session(struct se_session *se_sess)
466 {
467         if (se_sess->sess_cmd_map) {
468                 percpu_ida_destroy(&se_sess->sess_tag_pool);
469                 kvfree(se_sess->sess_cmd_map);
470         }
471         kmem_cache_free(se_sess_cache, se_sess);
472 }
473 EXPORT_SYMBOL(transport_free_session);
474
475 void transport_deregister_session(struct se_session *se_sess)
476 {
477         struct se_portal_group *se_tpg = se_sess->se_tpg;
478         const struct target_core_fabric_ops *se_tfo;
479         struct se_node_acl *se_nacl;
480         unsigned long flags;
481         bool comp_nacl = true, drop_nacl = false;
482
483         if (!se_tpg) {
484                 transport_free_session(se_sess);
485                 return;
486         }
487         se_tfo = se_tpg->se_tpg_tfo;
488
489         spin_lock_irqsave(&se_tpg->session_lock, flags);
490         list_del(&se_sess->sess_list);
491         se_sess->se_tpg = NULL;
492         se_sess->fabric_sess_ptr = NULL;
493         spin_unlock_irqrestore(&se_tpg->session_lock, flags);
494
495         /*
496          * Determine if we need to do extra work for this initiator node's
497          * struct se_node_acl if it had been previously dynamically generated.
498          */
499         se_nacl = se_sess->se_node_acl;
500
501         mutex_lock(&se_tpg->acl_node_mutex);
502         if (se_nacl && se_nacl->dynamic_node_acl) {
503                 if (!se_tfo->tpg_check_demo_mode_cache(se_tpg)) {
504                         list_del(&se_nacl->acl_list);
505                         se_tpg->num_node_acls--;
506                         drop_nacl = true;
507                 }
508         }
509         mutex_unlock(&se_tpg->acl_node_mutex);
510
511         if (drop_nacl) {
512                 core_tpg_wait_for_nacl_pr_ref(se_nacl);
513                 core_free_device_list_for_node(se_nacl, se_tpg);
514                 kfree(se_nacl);
515                 comp_nacl = false;
516         }
517         pr_debug("TARGET_CORE[%s]: Deregistered fabric_sess\n",
518                 se_tpg->se_tpg_tfo->get_fabric_name());
519         /*
520          * If last kref is dropping now for an explicit NodeACL, awake sleeping
521          * ->acl_free_comp caller to wakeup configfs se_node_acl->acl_group
522          * removal context.
523          */
524         if (se_nacl && comp_nacl)
525                 target_put_nacl(se_nacl);
526
527         transport_free_session(se_sess);
528 }
529 EXPORT_SYMBOL(transport_deregister_session);
530
531 static void target_remove_from_state_list(struct se_cmd *cmd)
532 {
533         struct se_device *dev = cmd->se_dev;
534         unsigned long flags;
535
536         if (!dev)
537                 return;
538
539         if (cmd->transport_state & CMD_T_BUSY)
540                 return;
541
542         spin_lock_irqsave(&dev->execute_task_lock, flags);
543         if (cmd->state_active) {
544                 list_del(&cmd->state_list);
545                 cmd->state_active = false;
546         }
547         spin_unlock_irqrestore(&dev->execute_task_lock, flags);
548 }
549
550 static int transport_cmd_check_stop(struct se_cmd *cmd, bool remove_from_lists,
551                                     bool write_pending)
552 {
553         unsigned long flags;
554
555         if (remove_from_lists) {
556                 target_remove_from_state_list(cmd);
557
558                 /*
559                  * Clear struct se_cmd->se_lun before the handoff to FE.
560                  */
561                 cmd->se_lun = NULL;
562         }
563
564         spin_lock_irqsave(&cmd->t_state_lock, flags);
565         if (write_pending)
566                 cmd->t_state = TRANSPORT_WRITE_PENDING;
567
568         /*
569          * Determine if frontend context caller is requesting the stopping of
570          * this command for frontend exceptions.
571          */
572         if (cmd->transport_state & CMD_T_STOP) {
573                 pr_debug("%s:%d CMD_T_STOP for ITT: 0x%08llx\n",
574                         __func__, __LINE__, cmd->tag);
575
576                 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
577
578                 complete_all(&cmd->t_transport_stop_comp);
579                 return 1;
580         }
581
582         cmd->transport_state &= ~CMD_T_ACTIVE;
583         if (remove_from_lists) {
584                 /*
585                  * Some fabric modules like tcm_loop can release
586                  * their internally allocated I/O reference now and
587                  * struct se_cmd now.
588                  *
589                  * Fabric modules are expected to return '1' here if the
590                  * se_cmd being passed is released at this point,
591                  * or zero if not being released.
592                  */
593                 if (cmd->se_tfo->check_stop_free != NULL) {
594                         spin_unlock_irqrestore(&cmd->t_state_lock, flags);
595                         return cmd->se_tfo->check_stop_free(cmd);
596                 }
597         }
598
599         spin_unlock_irqrestore(&cmd->t_state_lock, flags);
600         return 0;
601 }
602
603 static int transport_cmd_check_stop_to_fabric(struct se_cmd *cmd)
604 {
605         return transport_cmd_check_stop(cmd, true, false);
606 }
607
608 static void transport_lun_remove_cmd(struct se_cmd *cmd)
609 {
610         struct se_lun *lun = cmd->se_lun;
611
612         if (!lun)
613                 return;
614
615         if (cmpxchg(&cmd->lun_ref_active, true, false))
616                 percpu_ref_put(&lun->lun_ref);
617 }
618
619 void transport_cmd_finish_abort(struct se_cmd *cmd, int remove)
620 {
621         bool ack_kref = (cmd->se_cmd_flags & SCF_ACK_KREF);
622
623         if (cmd->se_cmd_flags & SCF_SE_LUN_CMD)
624                 transport_lun_remove_cmd(cmd);
625         /*
626          * Allow the fabric driver to unmap any resources before
627          * releasing the descriptor via TFO->release_cmd()
628          */
629         if (remove)
630                 cmd->se_tfo->aborted_task(cmd);
631
632         if (transport_cmd_check_stop_to_fabric(cmd))
633                 return;
634         if (remove && ack_kref)
635                 transport_put_cmd(cmd);
636 }
637
638 static void target_complete_failure_work(struct work_struct *work)
639 {
640         struct se_cmd *cmd = container_of(work, struct se_cmd, work);
641
642         transport_generic_request_failure(cmd,
643                         TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE);
644 }
645
646 /*
647  * Used when asking transport to copy Sense Data from the underlying
648  * Linux/SCSI struct scsi_cmnd
649  */
650 static unsigned char *transport_get_sense_buffer(struct se_cmd *cmd)
651 {
652         struct se_device *dev = cmd->se_dev;
653
654         WARN_ON(!cmd->se_lun);
655
656         if (!dev)
657                 return NULL;
658
659         if (cmd->se_cmd_flags & SCF_SENT_CHECK_CONDITION)
660                 return NULL;
661
662         cmd->scsi_sense_length = TRANSPORT_SENSE_BUFFER;
663
664         pr_debug("HBA_[%u]_PLUG[%s]: Requesting sense for SAM STATUS: 0x%02x\n",
665                 dev->se_hba->hba_id, dev->transport->name, cmd->scsi_status);
666         return cmd->sense_buffer;
667 }
668
669 void target_complete_cmd(struct se_cmd *cmd, u8 scsi_status)
670 {
671         struct se_device *dev = cmd->se_dev;
672         int success = scsi_status == GOOD;
673         unsigned long flags;
674
675         cmd->scsi_status = scsi_status;
676
677
678         spin_lock_irqsave(&cmd->t_state_lock, flags);
679         cmd->transport_state &= ~CMD_T_BUSY;
680
681         if (dev && dev->transport->transport_complete) {
682                 dev->transport->transport_complete(cmd,
683                                 cmd->t_data_sg,
684                                 transport_get_sense_buffer(cmd));
685                 if (cmd->se_cmd_flags & SCF_TRANSPORT_TASK_SENSE)
686                         success = 1;
687         }
688
689         /*
690          * See if we are waiting to complete for an exception condition.
691          */
692         if (cmd->transport_state & CMD_T_REQUEST_STOP) {
693                 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
694                 complete(&cmd->task_stop_comp);
695                 return;
696         }
697
698         /*
699          * Check for case where an explicit ABORT_TASK has been received
700          * and transport_wait_for_tasks() will be waiting for completion..
701          */
702         if (cmd->transport_state & CMD_T_ABORTED ||
703             cmd->transport_state & CMD_T_STOP) {
704                 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
705                 complete_all(&cmd->t_transport_stop_comp);
706                 return;
707         } else if (!success) {
708                 INIT_WORK(&cmd->work, target_complete_failure_work);
709         } else {
710                 INIT_WORK(&cmd->work, target_complete_ok_work);
711         }
712
713         cmd->t_state = TRANSPORT_COMPLETE;
714         cmd->transport_state |= (CMD_T_COMPLETE | CMD_T_ACTIVE);
715         spin_unlock_irqrestore(&cmd->t_state_lock, flags);
716
717         queue_work(target_completion_wq, &cmd->work);
718 }
719 EXPORT_SYMBOL(target_complete_cmd);
720
721 void target_complete_cmd_with_length(struct se_cmd *cmd, u8 scsi_status, int length)
722 {
723         if (scsi_status == SAM_STAT_GOOD && length < cmd->data_length) {
724                 if (cmd->se_cmd_flags & SCF_UNDERFLOW_BIT) {
725                         cmd->residual_count += cmd->data_length - length;
726                 } else {
727                         cmd->se_cmd_flags |= SCF_UNDERFLOW_BIT;
728                         cmd->residual_count = cmd->data_length - length;
729                 }
730
731                 cmd->data_length = length;
732         }
733
734         target_complete_cmd(cmd, scsi_status);
735 }
736 EXPORT_SYMBOL(target_complete_cmd_with_length);
737
738 static void target_add_to_state_list(struct se_cmd *cmd)
739 {
740         struct se_device *dev = cmd->se_dev;
741         unsigned long flags;
742
743         spin_lock_irqsave(&dev->execute_task_lock, flags);
744         if (!cmd->state_active) {
745                 list_add_tail(&cmd->state_list, &dev->state_list);
746                 cmd->state_active = true;
747         }
748         spin_unlock_irqrestore(&dev->execute_task_lock, flags);
749 }
750
751 /*
752  * Handle QUEUE_FULL / -EAGAIN and -ENOMEM status
753  */
754 static void transport_write_pending_qf(struct se_cmd *cmd);
755 static void transport_complete_qf(struct se_cmd *cmd);
756
757 void target_qf_do_work(struct work_struct *work)
758 {
759         struct se_device *dev = container_of(work, struct se_device,
760                                         qf_work_queue);
761         LIST_HEAD(qf_cmd_list);
762         struct se_cmd *cmd, *cmd_tmp;
763
764         spin_lock_irq(&dev->qf_cmd_lock);
765         list_splice_init(&dev->qf_cmd_list, &qf_cmd_list);
766         spin_unlock_irq(&dev->qf_cmd_lock);
767
768         list_for_each_entry_safe(cmd, cmd_tmp, &qf_cmd_list, se_qf_node) {
769                 list_del(&cmd->se_qf_node);
770                 atomic_dec_mb(&dev->dev_qf_count);
771
772                 pr_debug("Processing %s cmd: %p QUEUE_FULL in work queue"
773                         " context: %s\n", cmd->se_tfo->get_fabric_name(), cmd,
774                         (cmd->t_state == TRANSPORT_COMPLETE_QF_OK) ? "COMPLETE_OK" :
775                         (cmd->t_state == TRANSPORT_COMPLETE_QF_WP) ? "WRITE_PENDING"
776                         : "UNKNOWN");
777
778                 if (cmd->t_state == TRANSPORT_COMPLETE_QF_WP)
779                         transport_write_pending_qf(cmd);
780                 else if (cmd->t_state == TRANSPORT_COMPLETE_QF_OK)
781                         transport_complete_qf(cmd);
782         }
783 }
784
785 unsigned char *transport_dump_cmd_direction(struct se_cmd *cmd)
786 {
787         switch (cmd->data_direction) {
788         case DMA_NONE:
789                 return "NONE";
790         case DMA_FROM_DEVICE:
791                 return "READ";
792         case DMA_TO_DEVICE:
793                 return "WRITE";
794         case DMA_BIDIRECTIONAL:
795                 return "BIDI";
796         default:
797                 break;
798         }
799
800         return "UNKNOWN";
801 }
802
803 void transport_dump_dev_state(
804         struct se_device *dev,
805         char *b,
806         int *bl)
807 {
808         *bl += sprintf(b + *bl, "Status: ");
809         if (dev->export_count)
810                 *bl += sprintf(b + *bl, "ACTIVATED");
811         else
812                 *bl += sprintf(b + *bl, "DEACTIVATED");
813
814         *bl += sprintf(b + *bl, "  Max Queue Depth: %d", dev->queue_depth);
815         *bl += sprintf(b + *bl, "  SectorSize: %u  HwMaxSectors: %u\n",
816                 dev->dev_attrib.block_size,
817                 dev->dev_attrib.hw_max_sectors);
818         *bl += sprintf(b + *bl, "        ");
819 }
820
821 void transport_dump_vpd_proto_id(
822         struct t10_vpd *vpd,
823         unsigned char *p_buf,
824         int p_buf_len)
825 {
826         unsigned char buf[VPD_TMP_BUF_SIZE];
827         int len;
828
829         memset(buf, 0, VPD_TMP_BUF_SIZE);
830         len = sprintf(buf, "T10 VPD Protocol Identifier: ");
831
832         switch (vpd->protocol_identifier) {
833         case 0x00:
834                 sprintf(buf+len, "Fibre Channel\n");
835                 break;
836         case 0x10:
837                 sprintf(buf+len, "Parallel SCSI\n");
838                 break;
839         case 0x20:
840                 sprintf(buf+len, "SSA\n");
841                 break;
842         case 0x30:
843                 sprintf(buf+len, "IEEE 1394\n");
844                 break;
845         case 0x40:
846                 sprintf(buf+len, "SCSI Remote Direct Memory Access"
847                                 " Protocol\n");
848                 break;
849         case 0x50:
850                 sprintf(buf+len, "Internet SCSI (iSCSI)\n");
851                 break;
852         case 0x60:
853                 sprintf(buf+len, "SAS Serial SCSI Protocol\n");
854                 break;
855         case 0x70:
856                 sprintf(buf+len, "Automation/Drive Interface Transport"
857                                 " Protocol\n");
858                 break;
859         case 0x80:
860                 sprintf(buf+len, "AT Attachment Interface ATA/ATAPI\n");
861                 break;
862         default:
863                 sprintf(buf+len, "Unknown 0x%02x\n",
864                                 vpd->protocol_identifier);
865                 break;
866         }
867
868         if (p_buf)
869                 strncpy(p_buf, buf, p_buf_len);
870         else
871                 pr_debug("%s", buf);
872 }
873
874 void
875 transport_set_vpd_proto_id(struct t10_vpd *vpd, unsigned char *page_83)
876 {
877         /*
878          * Check if the Protocol Identifier Valid (PIV) bit is set..
879          *
880          * from spc3r23.pdf section 7.5.1
881          */
882          if (page_83[1] & 0x80) {
883                 vpd->protocol_identifier = (page_83[0] & 0xf0);
884                 vpd->protocol_identifier_set = 1;
885                 transport_dump_vpd_proto_id(vpd, NULL, 0);
886         }
887 }
888 EXPORT_SYMBOL(transport_set_vpd_proto_id);
889
890 int transport_dump_vpd_assoc(
891         struct t10_vpd *vpd,
892         unsigned char *p_buf,
893         int p_buf_len)
894 {
895         unsigned char buf[VPD_TMP_BUF_SIZE];
896         int ret = 0;
897         int len;
898
899         memset(buf, 0, VPD_TMP_BUF_SIZE);
900         len = sprintf(buf, "T10 VPD Identifier Association: ");
901
902         switch (vpd->association) {
903         case 0x00:
904                 sprintf(buf+len, "addressed logical unit\n");
905                 break;
906         case 0x10:
907                 sprintf(buf+len, "target port\n");
908                 break;
909         case 0x20:
910                 sprintf(buf+len, "SCSI target device\n");
911                 break;
912         default:
913                 sprintf(buf+len, "Unknown 0x%02x\n", vpd->association);
914                 ret = -EINVAL;
915                 break;
916         }
917
918         if (p_buf)
919                 strncpy(p_buf, buf, p_buf_len);
920         else
921                 pr_debug("%s", buf);
922
923         return ret;
924 }
925
926 int transport_set_vpd_assoc(struct t10_vpd *vpd, unsigned char *page_83)
927 {
928         /*
929          * The VPD identification association..
930          *
931          * from spc3r23.pdf Section 7.6.3.1 Table 297
932          */
933         vpd->association = (page_83[1] & 0x30);
934         return transport_dump_vpd_assoc(vpd, NULL, 0);
935 }
936 EXPORT_SYMBOL(transport_set_vpd_assoc);
937
938 int transport_dump_vpd_ident_type(
939         struct t10_vpd *vpd,
940         unsigned char *p_buf,
941         int p_buf_len)
942 {
943         unsigned char buf[VPD_TMP_BUF_SIZE];
944         int ret = 0;
945         int len;
946
947         memset(buf, 0, VPD_TMP_BUF_SIZE);
948         len = sprintf(buf, "T10 VPD Identifier Type: ");
949
950         switch (vpd->device_identifier_type) {
951         case 0x00:
952                 sprintf(buf+len, "Vendor specific\n");
953                 break;
954         case 0x01:
955                 sprintf(buf+len, "T10 Vendor ID based\n");
956                 break;
957         case 0x02:
958                 sprintf(buf+len, "EUI-64 based\n");
959                 break;
960         case 0x03:
961                 sprintf(buf+len, "NAA\n");
962                 break;
963         case 0x04:
964                 sprintf(buf+len, "Relative target port identifier\n");
965                 break;
966         case 0x08:
967                 sprintf(buf+len, "SCSI name string\n");
968                 break;
969         default:
970                 sprintf(buf+len, "Unsupported: 0x%02x\n",
971                                 vpd->device_identifier_type);
972                 ret = -EINVAL;
973                 break;
974         }
975
976         if (p_buf) {
977                 if (p_buf_len < strlen(buf)+1)
978                         return -EINVAL;
979                 strncpy(p_buf, buf, p_buf_len);
980         } else {
981                 pr_debug("%s", buf);
982         }
983
984         return ret;
985 }
986
987 int transport_set_vpd_ident_type(struct t10_vpd *vpd, unsigned char *page_83)
988 {
989         /*
990          * The VPD identifier type..
991          *
992          * from spc3r23.pdf Section 7.6.3.1 Table 298
993          */
994         vpd->device_identifier_type = (page_83[1] & 0x0f);
995         return transport_dump_vpd_ident_type(vpd, NULL, 0);
996 }
997 EXPORT_SYMBOL(transport_set_vpd_ident_type);
998
999 int transport_dump_vpd_ident(
1000         struct t10_vpd *vpd,
1001         unsigned char *p_buf,
1002         int p_buf_len)
1003 {
1004         unsigned char buf[VPD_TMP_BUF_SIZE];
1005         int ret = 0;
1006
1007         memset(buf, 0, VPD_TMP_BUF_SIZE);
1008
1009         switch (vpd->device_identifier_code_set) {
1010         case 0x01: /* Binary */
1011                 snprintf(buf, sizeof(buf),
1012                         "T10 VPD Binary Device Identifier: %s\n",
1013                         &vpd->device_identifier[0]);
1014                 break;
1015         case 0x02: /* ASCII */
1016                 snprintf(buf, sizeof(buf),
1017                         "T10 VPD ASCII Device Identifier: %s\n",
1018                         &vpd->device_identifier[0]);
1019                 break;
1020         case 0x03: /* UTF-8 */
1021                 snprintf(buf, sizeof(buf),
1022                         "T10 VPD UTF-8 Device Identifier: %s\n",
1023                         &vpd->device_identifier[0]);
1024                 break;
1025         default:
1026                 sprintf(buf, "T10 VPD Device Identifier encoding unsupported:"
1027                         " 0x%02x", vpd->device_identifier_code_set);
1028                 ret = -EINVAL;
1029                 break;
1030         }
1031
1032         if (p_buf)
1033                 strncpy(p_buf, buf, p_buf_len);
1034         else
1035                 pr_debug("%s", buf);
1036
1037         return ret;
1038 }
1039
1040 int
1041 transport_set_vpd_ident(struct t10_vpd *vpd, unsigned char *page_83)
1042 {
1043         static const char hex_str[] = "0123456789abcdef";
1044         int j = 0, i = 4; /* offset to start of the identifier */
1045
1046         /*
1047          * The VPD Code Set (encoding)
1048          *
1049          * from spc3r23.pdf Section 7.6.3.1 Table 296
1050          */
1051         vpd->device_identifier_code_set = (page_83[0] & 0x0f);
1052         switch (vpd->device_identifier_code_set) {
1053         case 0x01: /* Binary */
1054                 vpd->device_identifier[j++] =
1055                                 hex_str[vpd->device_identifier_type];
1056                 while (i < (4 + page_83[3])) {
1057                         vpd->device_identifier[j++] =
1058                                 hex_str[(page_83[i] & 0xf0) >> 4];
1059                         vpd->device_identifier[j++] =
1060                                 hex_str[page_83[i] & 0x0f];
1061                         i++;
1062                 }
1063                 break;
1064         case 0x02: /* ASCII */
1065         case 0x03: /* UTF-8 */
1066                 while (i < (4 + page_83[3]))
1067                         vpd->device_identifier[j++] = page_83[i++];
1068                 break;
1069         default:
1070                 break;
1071         }
1072
1073         return transport_dump_vpd_ident(vpd, NULL, 0);
1074 }
1075 EXPORT_SYMBOL(transport_set_vpd_ident);
1076
1077 static sense_reason_t
1078 target_check_max_data_sg_nents(struct se_cmd *cmd, struct se_device *dev,
1079                                unsigned int size)
1080 {
1081         u32 mtl;
1082
1083         if (!cmd->se_tfo->max_data_sg_nents)
1084                 return TCM_NO_SENSE;
1085         /*
1086          * Check if fabric enforced maximum SGL entries per I/O descriptor
1087          * exceeds se_cmd->data_length.  If true, set SCF_UNDERFLOW_BIT +
1088          * residual_count and reduce original cmd->data_length to maximum
1089          * length based on single PAGE_SIZE entry scatter-lists.
1090          */
1091         mtl = (cmd->se_tfo->max_data_sg_nents * PAGE_SIZE);
1092         if (cmd->data_length > mtl) {
1093                 /*
1094                  * If an existing CDB overflow is present, calculate new residual
1095                  * based on CDB size minus fabric maximum transfer length.
1096                  *
1097                  * If an existing CDB underflow is present, calculate new residual
1098                  * based on original cmd->data_length minus fabric maximum transfer
1099                  * length.
1100                  *
1101                  * Otherwise, set the underflow residual based on cmd->data_length
1102                  * minus fabric maximum transfer length.
1103                  */
1104                 if (cmd->se_cmd_flags & SCF_OVERFLOW_BIT) {
1105                         cmd->residual_count = (size - mtl);
1106                 } else if (cmd->se_cmd_flags & SCF_UNDERFLOW_BIT) {
1107                         u32 orig_dl = size + cmd->residual_count;
1108                         cmd->residual_count = (orig_dl - mtl);
1109                 } else {
1110                         cmd->se_cmd_flags |= SCF_UNDERFLOW_BIT;
1111                         cmd->residual_count = (cmd->data_length - mtl);
1112                 }
1113                 cmd->data_length = mtl;
1114                 /*
1115                  * Reset sbc_check_prot() calculated protection payload
1116                  * length based upon the new smaller MTL.
1117                  */
1118                 if (cmd->prot_length) {
1119                         u32 sectors = (mtl / dev->dev_attrib.block_size);
1120                         cmd->prot_length = dev->prot_length * sectors;
1121                 }
1122         }
1123         return TCM_NO_SENSE;
1124 }
1125
1126 sense_reason_t
1127 target_cmd_size_check(struct se_cmd *cmd, unsigned int size)
1128 {
1129         struct se_device *dev = cmd->se_dev;
1130
1131         if (cmd->unknown_data_length) {
1132                 cmd->data_length = size;
1133         } else if (size != cmd->data_length) {
1134                 pr_warn("TARGET_CORE[%s]: Expected Transfer Length:"
1135                         " %u does not match SCSI CDB Length: %u for SAM Opcode:"
1136                         " 0x%02x\n", cmd->se_tfo->get_fabric_name(),
1137                                 cmd->data_length, size, cmd->t_task_cdb[0]);
1138
1139                 if (cmd->data_direction == DMA_TO_DEVICE &&
1140                     cmd->se_cmd_flags & SCF_SCSI_DATA_CDB) {
1141                         pr_err("Rejecting underflow/overflow WRITE data\n");
1142                         return TCM_INVALID_CDB_FIELD;
1143                 }
1144                 /*
1145                  * Reject READ_* or WRITE_* with overflow/underflow for
1146                  * type SCF_SCSI_DATA_CDB.
1147                  */
1148                 if (dev->dev_attrib.block_size != 512)  {
1149                         pr_err("Failing OVERFLOW/UNDERFLOW for LBA op"
1150                                 " CDB on non 512-byte sector setup subsystem"
1151                                 " plugin: %s\n", dev->transport->name);
1152                         /* Returns CHECK_CONDITION + INVALID_CDB_FIELD */
1153                         return TCM_INVALID_CDB_FIELD;
1154                 }
1155                 /*
1156                  * For the overflow case keep the existing fabric provided
1157                  * ->data_length.  Otherwise for the underflow case, reset
1158                  * ->data_length to the smaller SCSI expected data transfer
1159                  * length.
1160                  */
1161                 if (size > cmd->data_length) {
1162                         cmd->se_cmd_flags |= SCF_OVERFLOW_BIT;
1163                         cmd->residual_count = (size - cmd->data_length);
1164                 } else {
1165                         cmd->se_cmd_flags |= SCF_UNDERFLOW_BIT;
1166                         cmd->residual_count = (cmd->data_length - size);
1167                         cmd->data_length = size;
1168                 }
1169         }
1170
1171         return target_check_max_data_sg_nents(cmd, dev, size);
1172
1173 }
1174
1175 /*
1176  * Used by fabric modules containing a local struct se_cmd within their
1177  * fabric dependent per I/O descriptor.
1178  *
1179  * Preserves the value of @cmd->tag.
1180  */
1181 void transport_init_se_cmd(
1182         struct se_cmd *cmd,
1183         const struct target_core_fabric_ops *tfo,
1184         struct se_session *se_sess,
1185         u32 data_length,
1186         int data_direction,
1187         int task_attr,
1188         unsigned char *sense_buffer)
1189 {
1190         INIT_LIST_HEAD(&cmd->se_delayed_node);
1191         INIT_LIST_HEAD(&cmd->se_qf_node);
1192         INIT_LIST_HEAD(&cmd->se_cmd_list);
1193         INIT_LIST_HEAD(&cmd->state_list);
1194         init_completion(&cmd->t_transport_stop_comp);
1195         init_completion(&cmd->cmd_wait_comp);
1196         init_completion(&cmd->task_stop_comp);
1197         spin_lock_init(&cmd->t_state_lock);
1198         kref_init(&cmd->cmd_kref);
1199         cmd->transport_state = CMD_T_DEV_ACTIVE;
1200
1201         cmd->se_tfo = tfo;
1202         cmd->se_sess = se_sess;
1203         cmd->data_length = data_length;
1204         cmd->data_direction = data_direction;
1205         cmd->sam_task_attr = task_attr;
1206         cmd->sense_buffer = sense_buffer;
1207
1208         cmd->state_active = false;
1209 }
1210 EXPORT_SYMBOL(transport_init_se_cmd);
1211
1212 static sense_reason_t
1213 transport_check_alloc_task_attr(struct se_cmd *cmd)
1214 {
1215         struct se_device *dev = cmd->se_dev;
1216
1217         /*
1218          * Check if SAM Task Attribute emulation is enabled for this
1219          * struct se_device storage object
1220          */
1221         if (dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH)
1222                 return 0;
1223
1224         if (cmd->sam_task_attr == TCM_ACA_TAG) {
1225                 pr_debug("SAM Task Attribute ACA"
1226                         " emulation is not supported\n");
1227                 return TCM_INVALID_CDB_FIELD;
1228         }
1229
1230         return 0;
1231 }
1232
1233 sense_reason_t
1234 target_setup_cmd_from_cdb(struct se_cmd *cmd, unsigned char *cdb)
1235 {
1236         struct se_device *dev = cmd->se_dev;
1237         sense_reason_t ret;
1238
1239         /*
1240          * Ensure that the received CDB is less than the max (252 + 8) bytes
1241          * for VARIABLE_LENGTH_CMD
1242          */
1243         if (scsi_command_size(cdb) > SCSI_MAX_VARLEN_CDB_SIZE) {
1244                 pr_err("Received SCSI CDB with command_size: %d that"
1245                         " exceeds SCSI_MAX_VARLEN_CDB_SIZE: %d\n",
1246                         scsi_command_size(cdb), SCSI_MAX_VARLEN_CDB_SIZE);
1247                 return TCM_INVALID_CDB_FIELD;
1248         }
1249         /*
1250          * If the received CDB is larger than TCM_MAX_COMMAND_SIZE,
1251          * allocate the additional extended CDB buffer now..  Otherwise
1252          * setup the pointer from __t_task_cdb to t_task_cdb.
1253          */
1254         if (scsi_command_size(cdb) > sizeof(cmd->__t_task_cdb)) {
1255                 cmd->t_task_cdb = kzalloc(scsi_command_size(cdb),
1256                                                 GFP_KERNEL);
1257                 if (!cmd->t_task_cdb) {
1258                         pr_err("Unable to allocate cmd->t_task_cdb"
1259                                 " %u > sizeof(cmd->__t_task_cdb): %lu ops\n",
1260                                 scsi_command_size(cdb),
1261                                 (unsigned long)sizeof(cmd->__t_task_cdb));
1262                         return TCM_OUT_OF_RESOURCES;
1263                 }
1264         } else
1265                 cmd->t_task_cdb = &cmd->__t_task_cdb[0];
1266         /*
1267          * Copy the original CDB into cmd->
1268          */
1269         memcpy(cmd->t_task_cdb, cdb, scsi_command_size(cdb));
1270
1271         trace_target_sequencer_start(cmd);
1272
1273         /*
1274          * Check for an existing UNIT ATTENTION condition
1275          */
1276         ret = target_scsi3_ua_check(cmd);
1277         if (ret)
1278                 return ret;
1279
1280         ret = target_alua_state_check(cmd);
1281         if (ret)
1282                 return ret;
1283
1284         ret = target_check_reservation(cmd);
1285         if (ret) {
1286                 cmd->scsi_status = SAM_STAT_RESERVATION_CONFLICT;
1287                 return ret;
1288         }
1289
1290         ret = dev->transport->parse_cdb(cmd);
1291         if (ret == TCM_UNSUPPORTED_SCSI_OPCODE)
1292                 pr_warn_ratelimited("%s/%s: Unsupported SCSI Opcode 0x%02x, sending CHECK_CONDITION.\n",
1293                                     cmd->se_tfo->get_fabric_name(),
1294                                     cmd->se_sess->se_node_acl->initiatorname,
1295                                     cmd->t_task_cdb[0]);
1296         if (ret)
1297                 return ret;
1298
1299         ret = transport_check_alloc_task_attr(cmd);
1300         if (ret)
1301                 return ret;
1302
1303         cmd->se_cmd_flags |= SCF_SUPPORTED_SAM_OPCODE;
1304         atomic_long_inc(&cmd->se_lun->lun_stats.cmd_pdus);
1305         return 0;
1306 }
1307 EXPORT_SYMBOL(target_setup_cmd_from_cdb);
1308
1309 /*
1310  * Used by fabric module frontends to queue tasks directly.
1311  * Many only be used from process context only
1312  */
1313 int transport_handle_cdb_direct(
1314         struct se_cmd *cmd)
1315 {
1316         sense_reason_t ret;
1317
1318         if (!cmd->se_lun) {
1319                 dump_stack();
1320                 pr_err("cmd->se_lun is NULL\n");
1321                 return -EINVAL;
1322         }
1323         if (in_interrupt()) {
1324                 dump_stack();
1325                 pr_err("transport_generic_handle_cdb cannot be called"
1326                                 " from interrupt context\n");
1327                 return -EINVAL;
1328         }
1329         /*
1330          * Set TRANSPORT_NEW_CMD state and CMD_T_ACTIVE to ensure that
1331          * outstanding descriptors are handled correctly during shutdown via
1332          * transport_wait_for_tasks()
1333          *
1334          * Also, we don't take cmd->t_state_lock here as we only expect
1335          * this to be called for initial descriptor submission.
1336          */
1337         cmd->t_state = TRANSPORT_NEW_CMD;
1338         cmd->transport_state |= CMD_T_ACTIVE;
1339
1340         /*
1341          * transport_generic_new_cmd() is already handling QUEUE_FULL,
1342          * so follow TRANSPORT_NEW_CMD processing thread context usage
1343          * and call transport_generic_request_failure() if necessary..
1344          */
1345         ret = transport_generic_new_cmd(cmd);
1346         if (ret)
1347                 transport_generic_request_failure(cmd, ret);
1348         return 0;
1349 }
1350 EXPORT_SYMBOL(transport_handle_cdb_direct);
1351
1352 sense_reason_t
1353 transport_generic_map_mem_to_cmd(struct se_cmd *cmd, struct scatterlist *sgl,
1354                 u32 sgl_count, struct scatterlist *sgl_bidi, u32 sgl_bidi_count)
1355 {
1356         if (!sgl || !sgl_count)
1357                 return 0;
1358
1359         /*
1360          * Reject SCSI data overflow with map_mem_to_cmd() as incoming
1361          * scatterlists already have been set to follow what the fabric
1362          * passes for the original expected data transfer length.
1363          */
1364         if (cmd->se_cmd_flags & SCF_OVERFLOW_BIT) {
1365                 pr_warn("Rejecting SCSI DATA overflow for fabric using"
1366                         " SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC\n");
1367                 return TCM_INVALID_CDB_FIELD;
1368         }
1369
1370         cmd->t_data_sg = sgl;
1371         cmd->t_data_nents = sgl_count;
1372         cmd->t_bidi_data_sg = sgl_bidi;
1373         cmd->t_bidi_data_nents = sgl_bidi_count;
1374
1375         cmd->se_cmd_flags |= SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC;
1376         return 0;
1377 }
1378
1379 /*
1380  * target_submit_cmd_map_sgls - lookup unpacked lun and submit uninitialized
1381  *                       se_cmd + use pre-allocated SGL memory.
1382  *
1383  * @se_cmd: command descriptor to submit
1384  * @se_sess: associated se_sess for endpoint
1385  * @cdb: pointer to SCSI CDB
1386  * @sense: pointer to SCSI sense buffer
1387  * @unpacked_lun: unpacked LUN to reference for struct se_lun
1388  * @data_length: fabric expected data transfer length
1389  * @task_addr: SAM task attribute
1390  * @data_dir: DMA data direction
1391  * @flags: flags for command submission from target_sc_flags_tables
1392  * @sgl: struct scatterlist memory for unidirectional mapping
1393  * @sgl_count: scatterlist count for unidirectional mapping
1394  * @sgl_bidi: struct scatterlist memory for bidirectional READ mapping
1395  * @sgl_bidi_count: scatterlist count for bidirectional READ mapping
1396  * @sgl_prot: struct scatterlist memory protection information
1397  * @sgl_prot_count: scatterlist count for protection information
1398  *
1399  * Task tags are supported if the caller has set @se_cmd->tag.
1400  *
1401  * Returns non zero to signal active I/O shutdown failure.  All other
1402  * setup exceptions will be returned as a SCSI CHECK_CONDITION response,
1403  * but still return zero here.
1404  *
1405  * This may only be called from process context, and also currently
1406  * assumes internal allocation of fabric payload buffer by target-core.
1407  */
1408 int target_submit_cmd_map_sgls(struct se_cmd *se_cmd, struct se_session *se_sess,
1409                 unsigned char *cdb, unsigned char *sense, u64 unpacked_lun,
1410                 u32 data_length, int task_attr, int data_dir, int flags,
1411                 struct scatterlist *sgl, u32 sgl_count,
1412                 struct scatterlist *sgl_bidi, u32 sgl_bidi_count,
1413                 struct scatterlist *sgl_prot, u32 sgl_prot_count)
1414 {
1415         struct se_portal_group *se_tpg;
1416         sense_reason_t rc;
1417         int ret;
1418
1419         se_tpg = se_sess->se_tpg;
1420         BUG_ON(!se_tpg);
1421         BUG_ON(se_cmd->se_tfo || se_cmd->se_sess);
1422         BUG_ON(in_interrupt());
1423         /*
1424          * Initialize se_cmd for target operation.  From this point
1425          * exceptions are handled by sending exception status via
1426          * target_core_fabric_ops->queue_status() callback
1427          */
1428         transport_init_se_cmd(se_cmd, se_tpg->se_tpg_tfo, se_sess,
1429                                 data_length, data_dir, task_attr, sense);
1430         if (flags & TARGET_SCF_UNKNOWN_SIZE)
1431                 se_cmd->unknown_data_length = 1;
1432         /*
1433          * Obtain struct se_cmd->cmd_kref reference and add new cmd to
1434          * se_sess->sess_cmd_list.  A second kref_get here is necessary
1435          * for fabrics using TARGET_SCF_ACK_KREF that expect a second
1436          * kref_put() to happen during fabric packet acknowledgement.
1437          */
1438         ret = target_get_sess_cmd(se_cmd, flags & TARGET_SCF_ACK_KREF);
1439         if (ret)
1440                 return ret;
1441         /*
1442          * Signal bidirectional data payloads to target-core
1443          */
1444         if (flags & TARGET_SCF_BIDI_OP)
1445                 se_cmd->se_cmd_flags |= SCF_BIDI;
1446         /*
1447          * Locate se_lun pointer and attach it to struct se_cmd
1448          */
1449         rc = transport_lookup_cmd_lun(se_cmd, unpacked_lun);
1450         if (rc) {
1451                 transport_send_check_condition_and_sense(se_cmd, rc, 0);
1452                 target_put_sess_cmd(se_cmd);
1453                 return 0;
1454         }
1455
1456         rc = target_setup_cmd_from_cdb(se_cmd, cdb);
1457         if (rc != 0) {
1458                 transport_generic_request_failure(se_cmd, rc);
1459                 return 0;
1460         }
1461
1462         /*
1463          * Save pointers for SGLs containing protection information,
1464          * if present.
1465          */
1466         if (sgl_prot_count) {
1467                 se_cmd->t_prot_sg = sgl_prot;
1468                 se_cmd->t_prot_nents = sgl_prot_count;
1469                 se_cmd->se_cmd_flags |= SCF_PASSTHROUGH_PROT_SG_TO_MEM_NOALLOC;
1470         }
1471
1472         /*
1473          * When a non zero sgl_count has been passed perform SGL passthrough
1474          * mapping for pre-allocated fabric memory instead of having target
1475          * core perform an internal SGL allocation..
1476          */
1477         if (sgl_count != 0) {
1478                 BUG_ON(!sgl);
1479
1480                 /*
1481                  * A work-around for tcm_loop as some userspace code via
1482                  * scsi-generic do not memset their associated read buffers,
1483                  * so go ahead and do that here for type non-data CDBs.  Also
1484                  * note that this is currently guaranteed to be a single SGL
1485                  * for this case by target core in target_setup_cmd_from_cdb()
1486                  * -> transport_generic_cmd_sequencer().
1487                  */
1488                 if (!(se_cmd->se_cmd_flags & SCF_SCSI_DATA_CDB) &&
1489                      se_cmd->data_direction == DMA_FROM_DEVICE) {
1490                         unsigned char *buf = NULL;
1491
1492                         if (sgl)
1493                                 buf = kmap(sg_page(sgl)) + sgl->offset;
1494
1495                         if (buf) {
1496                                 memset(buf, 0, sgl->length);
1497                                 kunmap(sg_page(sgl));
1498                         }
1499                 }
1500
1501                 rc = transport_generic_map_mem_to_cmd(se_cmd, sgl, sgl_count,
1502                                 sgl_bidi, sgl_bidi_count);
1503                 if (rc != 0) {
1504                         transport_generic_request_failure(se_cmd, rc);
1505                         return 0;
1506                 }
1507         }
1508
1509         /*
1510          * Check if we need to delay processing because of ALUA
1511          * Active/NonOptimized primary access state..
1512          */
1513         core_alua_check_nonop_delay(se_cmd);
1514
1515         transport_handle_cdb_direct(se_cmd);
1516         return 0;
1517 }
1518 EXPORT_SYMBOL(target_submit_cmd_map_sgls);
1519
1520 /*
1521  * target_submit_cmd - lookup unpacked lun and submit uninitialized se_cmd
1522  *
1523  * @se_cmd: command descriptor to submit
1524  * @se_sess: associated se_sess for endpoint
1525  * @cdb: pointer to SCSI CDB
1526  * @sense: pointer to SCSI sense buffer
1527  * @unpacked_lun: unpacked LUN to reference for struct se_lun
1528  * @data_length: fabric expected data transfer length
1529  * @task_addr: SAM task attribute
1530  * @data_dir: DMA data direction
1531  * @flags: flags for command submission from target_sc_flags_tables
1532  *
1533  * Task tags are supported if the caller has set @se_cmd->tag.
1534  *
1535  * Returns non zero to signal active I/O shutdown failure.  All other
1536  * setup exceptions will be returned as a SCSI CHECK_CONDITION response,
1537  * but still return zero here.
1538  *
1539  * This may only be called from process context, and also currently
1540  * assumes internal allocation of fabric payload buffer by target-core.
1541  *
1542  * It also assumes interal target core SGL memory allocation.
1543  */
1544 int target_submit_cmd(struct se_cmd *se_cmd, struct se_session *se_sess,
1545                 unsigned char *cdb, unsigned char *sense, u64 unpacked_lun,
1546                 u32 data_length, int task_attr, int data_dir, int flags)
1547 {
1548         return target_submit_cmd_map_sgls(se_cmd, se_sess, cdb, sense,
1549                         unpacked_lun, data_length, task_attr, data_dir,
1550                         flags, NULL, 0, NULL, 0, NULL, 0);
1551 }
1552 EXPORT_SYMBOL(target_submit_cmd);
1553
1554 static void target_complete_tmr_failure(struct work_struct *work)
1555 {
1556         struct se_cmd *se_cmd = container_of(work, struct se_cmd, work);
1557
1558         se_cmd->se_tmr_req->response = TMR_LUN_DOES_NOT_EXIST;
1559         se_cmd->se_tfo->queue_tm_rsp(se_cmd);
1560
1561         transport_cmd_check_stop_to_fabric(se_cmd);
1562 }
1563
1564 /**
1565  * target_submit_tmr - lookup unpacked lun and submit uninitialized se_cmd
1566  *                     for TMR CDBs
1567  *
1568  * @se_cmd: command descriptor to submit
1569  * @se_sess: associated se_sess for endpoint
1570  * @sense: pointer to SCSI sense buffer
1571  * @unpacked_lun: unpacked LUN to reference for struct se_lun
1572  * @fabric_context: fabric context for TMR req
1573  * @tm_type: Type of TM request
1574  * @gfp: gfp type for caller
1575  * @tag: referenced task tag for TMR_ABORT_TASK
1576  * @flags: submit cmd flags
1577  *
1578  * Callable from all contexts.
1579  **/
1580
1581 int target_submit_tmr(struct se_cmd *se_cmd, struct se_session *se_sess,
1582                 unsigned char *sense, u64 unpacked_lun,
1583                 void *fabric_tmr_ptr, unsigned char tm_type,
1584                 gfp_t gfp, unsigned int tag, int flags)
1585 {
1586         struct se_portal_group *se_tpg;
1587         int ret;
1588
1589         se_tpg = se_sess->se_tpg;
1590         BUG_ON(!se_tpg);
1591
1592         transport_init_se_cmd(se_cmd, se_tpg->se_tpg_tfo, se_sess,
1593                               0, DMA_NONE, TCM_SIMPLE_TAG, sense);
1594         /*
1595          * FIXME: Currently expect caller to handle se_cmd->se_tmr_req
1596          * allocation failure.
1597          */
1598         ret = core_tmr_alloc_req(se_cmd, fabric_tmr_ptr, tm_type, gfp);
1599         if (ret < 0)
1600                 return -ENOMEM;
1601
1602         if (tm_type == TMR_ABORT_TASK)
1603                 se_cmd->se_tmr_req->ref_task_tag = tag;
1604
1605         /* See target_submit_cmd for commentary */
1606         ret = target_get_sess_cmd(se_cmd, flags & TARGET_SCF_ACK_KREF);
1607         if (ret) {
1608                 core_tmr_release_req(se_cmd->se_tmr_req);
1609                 return ret;
1610         }
1611
1612         ret = transport_lookup_tmr_lun(se_cmd, unpacked_lun);
1613         if (ret) {
1614                 /*
1615                  * For callback during failure handling, push this work off
1616                  * to process context with TMR_LUN_DOES_NOT_EXIST status.
1617                  */
1618                 INIT_WORK(&se_cmd->work, target_complete_tmr_failure);
1619                 schedule_work(&se_cmd->work);
1620                 return 0;
1621         }
1622         transport_generic_handle_tmr(se_cmd);
1623         return 0;
1624 }
1625 EXPORT_SYMBOL(target_submit_tmr);
1626
1627 /*
1628  * If the cmd is active, request it to be stopped and sleep until it
1629  * has completed.
1630  */
1631 bool target_stop_cmd(struct se_cmd *cmd, unsigned long *flags)
1632         __releases(&cmd->t_state_lock)
1633         __acquires(&cmd->t_state_lock)
1634 {
1635         bool was_active = false;
1636
1637         if (cmd->transport_state & CMD_T_BUSY) {
1638                 cmd->transport_state |= CMD_T_REQUEST_STOP;
1639                 spin_unlock_irqrestore(&cmd->t_state_lock, *flags);
1640
1641                 pr_debug("cmd %p waiting to complete\n", cmd);
1642                 wait_for_completion(&cmd->task_stop_comp);
1643                 pr_debug("cmd %p stopped successfully\n", cmd);
1644
1645                 spin_lock_irqsave(&cmd->t_state_lock, *flags);
1646                 cmd->transport_state &= ~CMD_T_REQUEST_STOP;
1647                 cmd->transport_state &= ~CMD_T_BUSY;
1648                 was_active = true;
1649         }
1650
1651         return was_active;
1652 }
1653
1654 /*
1655  * Handle SAM-esque emulation for generic transport request failures.
1656  */
1657 void transport_generic_request_failure(struct se_cmd *cmd,
1658                 sense_reason_t sense_reason)
1659 {
1660         int ret = 0, post_ret = 0;
1661
1662         pr_debug("-----[ Storage Engine Exception for cmd: %p ITT: 0x%08llx"
1663                 " CDB: 0x%02x\n", cmd, cmd->tag, cmd->t_task_cdb[0]);
1664         pr_debug("-----[ i_state: %d t_state: %d sense_reason: %d\n",
1665                 cmd->se_tfo->get_cmd_state(cmd),
1666                 cmd->t_state, sense_reason);
1667         pr_debug("-----[ CMD_T_ACTIVE: %d CMD_T_STOP: %d CMD_T_SENT: %d\n",
1668                 (cmd->transport_state & CMD_T_ACTIVE) != 0,
1669                 (cmd->transport_state & CMD_T_STOP) != 0,
1670                 (cmd->transport_state & CMD_T_SENT) != 0);
1671
1672         /*
1673          * For SAM Task Attribute emulation for failed struct se_cmd
1674          */
1675         transport_complete_task_attr(cmd);
1676         /*
1677          * Handle special case for COMPARE_AND_WRITE failure, where the
1678          * callback is expected to drop the per device ->caw_sem.
1679          */
1680         if ((cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE) &&
1681              cmd->transport_complete_callback)
1682                 cmd->transport_complete_callback(cmd, false, &post_ret);
1683
1684         switch (sense_reason) {
1685         case TCM_NON_EXISTENT_LUN:
1686         case TCM_UNSUPPORTED_SCSI_OPCODE:
1687         case TCM_INVALID_CDB_FIELD:
1688         case TCM_INVALID_PARAMETER_LIST:
1689         case TCM_PARAMETER_LIST_LENGTH_ERROR:
1690         case TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE:
1691         case TCM_UNKNOWN_MODE_PAGE:
1692         case TCM_WRITE_PROTECTED:
1693         case TCM_ADDRESS_OUT_OF_RANGE:
1694         case TCM_CHECK_CONDITION_ABORT_CMD:
1695         case TCM_CHECK_CONDITION_UNIT_ATTENTION:
1696         case TCM_CHECK_CONDITION_NOT_READY:
1697         case TCM_LOGICAL_BLOCK_GUARD_CHECK_FAILED:
1698         case TCM_LOGICAL_BLOCK_APP_TAG_CHECK_FAILED:
1699         case TCM_LOGICAL_BLOCK_REF_TAG_CHECK_FAILED:
1700                 break;
1701         case TCM_OUT_OF_RESOURCES:
1702                 sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
1703                 break;
1704         case TCM_RESERVATION_CONFLICT:
1705                 /*
1706                  * No SENSE Data payload for this case, set SCSI Status
1707                  * and queue the response to $FABRIC_MOD.
1708                  *
1709                  * Uses linux/include/scsi/scsi.h SAM status codes defs
1710                  */
1711                 cmd->scsi_status = SAM_STAT_RESERVATION_CONFLICT;
1712                 /*
1713                  * For UA Interlock Code 11b, a RESERVATION CONFLICT will
1714                  * establish a UNIT ATTENTION with PREVIOUS RESERVATION
1715                  * CONFLICT STATUS.
1716                  *
1717                  * See spc4r17, section 7.4.6 Control Mode Page, Table 349
1718                  */
1719                 if (cmd->se_sess &&
1720                     cmd->se_dev->dev_attrib.emulate_ua_intlck_ctrl == 2) {
1721                         target_ua_allocate_lun(cmd->se_sess->se_node_acl,
1722                                                cmd->orig_fe_lun, 0x2C,
1723                                         ASCQ_2CH_PREVIOUS_RESERVATION_CONFLICT_STATUS);
1724                 }
1725                 trace_target_cmd_complete(cmd);
1726                 ret = cmd->se_tfo->queue_status(cmd);
1727                 if (ret == -EAGAIN || ret == -ENOMEM)
1728                         goto queue_full;
1729                 goto check_stop;
1730         default:
1731                 pr_err("Unknown transport error for CDB 0x%02x: %d\n",
1732                         cmd->t_task_cdb[0], sense_reason);
1733                 sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE;
1734                 break;
1735         }
1736
1737         ret = transport_send_check_condition_and_sense(cmd, sense_reason, 0);
1738         if (ret == -EAGAIN || ret == -ENOMEM)
1739                 goto queue_full;
1740
1741 check_stop:
1742         transport_lun_remove_cmd(cmd);
1743         transport_cmd_check_stop_to_fabric(cmd);
1744         return;
1745
1746 queue_full:
1747         cmd->t_state = TRANSPORT_COMPLETE_QF_OK;
1748         transport_handle_queue_full(cmd, cmd->se_dev);
1749 }
1750 EXPORT_SYMBOL(transport_generic_request_failure);
1751
1752 void __target_execute_cmd(struct se_cmd *cmd)
1753 {
1754         sense_reason_t ret;
1755
1756         if (cmd->execute_cmd) {
1757                 ret = cmd->execute_cmd(cmd);
1758                 if (ret) {
1759                         spin_lock_irq(&cmd->t_state_lock);
1760                         cmd->transport_state &= ~(CMD_T_BUSY|CMD_T_SENT);
1761                         spin_unlock_irq(&cmd->t_state_lock);
1762
1763                         transport_generic_request_failure(cmd, ret);
1764                 }
1765         }
1766 }
1767
1768 static int target_write_prot_action(struct se_cmd *cmd)
1769 {
1770         u32 sectors;
1771         /*
1772          * Perform WRITE_INSERT of PI using software emulation when backend
1773          * device has PI enabled, if the transport has not already generated
1774          * PI using hardware WRITE_INSERT offload.
1775          */
1776         switch (cmd->prot_op) {
1777         case TARGET_PROT_DOUT_INSERT:
1778                 if (!(cmd->se_sess->sup_prot_ops & TARGET_PROT_DOUT_INSERT))
1779                         sbc_dif_generate(cmd);
1780                 break;
1781         case TARGET_PROT_DOUT_STRIP:
1782                 if (cmd->se_sess->sup_prot_ops & TARGET_PROT_DOUT_STRIP)
1783                         break;
1784
1785                 sectors = cmd->data_length >> ilog2(cmd->se_dev->dev_attrib.block_size);
1786                 cmd->pi_err = sbc_dif_verify(cmd, cmd->t_task_lba,
1787                                              sectors, 0, cmd->t_prot_sg, 0);
1788                 if (unlikely(cmd->pi_err)) {
1789                         spin_lock_irq(&cmd->t_state_lock);
1790                         cmd->transport_state &= ~(CMD_T_BUSY|CMD_T_SENT);
1791                         spin_unlock_irq(&cmd->t_state_lock);
1792                         transport_generic_request_failure(cmd, cmd->pi_err);
1793                         return -1;
1794                 }
1795                 break;
1796         default:
1797                 break;
1798         }
1799
1800         return 0;
1801 }
1802
1803 static bool target_handle_task_attr(struct se_cmd *cmd)
1804 {
1805         struct se_device *dev = cmd->se_dev;
1806
1807         if (dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH)
1808                 return false;
1809
1810         /*
1811          * Check for the existence of HEAD_OF_QUEUE, and if true return 1
1812          * to allow the passed struct se_cmd list of tasks to the front of the list.
1813          */
1814         switch (cmd->sam_task_attr) {
1815         case TCM_HEAD_TAG:
1816                 pr_debug("Added HEAD_OF_QUEUE for CDB: 0x%02x\n",
1817                          cmd->t_task_cdb[0]);
1818                 return false;
1819         case TCM_ORDERED_TAG:
1820                 atomic_inc_mb(&dev->dev_ordered_sync);
1821
1822                 pr_debug("Added ORDERED for CDB: 0x%02x to ordered list\n",
1823                          cmd->t_task_cdb[0]);
1824
1825                 /*
1826                  * Execute an ORDERED command if no other older commands
1827                  * exist that need to be completed first.
1828                  */
1829                 if (!atomic_read(&dev->simple_cmds))
1830                         return false;
1831                 break;
1832         default:
1833                 /*
1834                  * For SIMPLE and UNTAGGED Task Attribute commands
1835                  */
1836                 atomic_inc_mb(&dev->simple_cmds);
1837                 break;
1838         }
1839
1840         if (atomic_read(&dev->dev_ordered_sync) == 0)
1841                 return false;
1842
1843         spin_lock(&dev->delayed_cmd_lock);
1844         list_add_tail(&cmd->se_delayed_node, &dev->delayed_cmd_list);
1845         spin_unlock(&dev->delayed_cmd_lock);
1846
1847         pr_debug("Added CDB: 0x%02x Task Attr: 0x%02x to delayed CMD listn",
1848                 cmd->t_task_cdb[0], cmd->sam_task_attr);
1849         return true;
1850 }
1851
1852 static int __transport_check_aborted_status(struct se_cmd *, int);
1853
1854 void target_execute_cmd(struct se_cmd *cmd)
1855 {
1856         /*
1857          * Determine if frontend context caller is requesting the stopping of
1858          * this command for frontend exceptions.
1859          *
1860          * If the received CDB has aleady been aborted stop processing it here.
1861          */
1862         spin_lock_irq(&cmd->t_state_lock);
1863         if (__transport_check_aborted_status(cmd, 1)) {
1864                 spin_unlock_irq(&cmd->t_state_lock);
1865                 return;
1866         }
1867         if (cmd->transport_state & CMD_T_STOP) {
1868                 pr_debug("%s:%d CMD_T_STOP for ITT: 0x%08llx\n",
1869                         __func__, __LINE__, cmd->tag);
1870
1871                 spin_unlock_irq(&cmd->t_state_lock);
1872                 complete_all(&cmd->t_transport_stop_comp);
1873                 return;
1874         }
1875
1876         cmd->t_state = TRANSPORT_PROCESSING;
1877         cmd->transport_state |= CMD_T_ACTIVE|CMD_T_BUSY|CMD_T_SENT;
1878         spin_unlock_irq(&cmd->t_state_lock);
1879
1880         if (target_write_prot_action(cmd))
1881                 return;
1882
1883         if (target_handle_task_attr(cmd)) {
1884                 spin_lock_irq(&cmd->t_state_lock);
1885                 cmd->transport_state &= ~(CMD_T_BUSY | CMD_T_SENT);
1886                 spin_unlock_irq(&cmd->t_state_lock);
1887                 return;
1888         }
1889
1890         __target_execute_cmd(cmd);
1891 }
1892 EXPORT_SYMBOL(target_execute_cmd);
1893
1894 /*
1895  * Process all commands up to the last received ORDERED task attribute which
1896  * requires another blocking boundary
1897  */
1898 static void target_restart_delayed_cmds(struct se_device *dev)
1899 {
1900         for (;;) {
1901                 struct se_cmd *cmd;
1902
1903                 spin_lock(&dev->delayed_cmd_lock);
1904                 if (list_empty(&dev->delayed_cmd_list)) {
1905                         spin_unlock(&dev->delayed_cmd_lock);
1906                         break;
1907                 }
1908
1909                 cmd = list_entry(dev->delayed_cmd_list.next,
1910                                  struct se_cmd, se_delayed_node);
1911                 list_del(&cmd->se_delayed_node);
1912                 spin_unlock(&dev->delayed_cmd_lock);
1913
1914                 __target_execute_cmd(cmd);
1915
1916                 if (cmd->sam_task_attr == TCM_ORDERED_TAG)
1917                         break;
1918         }
1919 }
1920
1921 /*
1922  * Called from I/O completion to determine which dormant/delayed
1923  * and ordered cmds need to have their tasks added to the execution queue.
1924  */
1925 static void transport_complete_task_attr(struct se_cmd *cmd)
1926 {
1927         struct se_device *dev = cmd->se_dev;
1928
1929         if (dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH)
1930                 return;
1931
1932         if (cmd->sam_task_attr == TCM_SIMPLE_TAG) {
1933                 atomic_dec_mb(&dev->simple_cmds);
1934                 dev->dev_cur_ordered_id++;
1935                 pr_debug("Incremented dev->dev_cur_ordered_id: %u for SIMPLE\n",
1936                          dev->dev_cur_ordered_id);
1937         } else if (cmd->sam_task_attr == TCM_HEAD_TAG) {
1938                 dev->dev_cur_ordered_id++;
1939                 pr_debug("Incremented dev_cur_ordered_id: %u for HEAD_OF_QUEUE\n",
1940                          dev->dev_cur_ordered_id);
1941         } else if (cmd->sam_task_attr == TCM_ORDERED_TAG) {
1942                 atomic_dec_mb(&dev->dev_ordered_sync);
1943
1944                 dev->dev_cur_ordered_id++;
1945                 pr_debug("Incremented dev_cur_ordered_id: %u for ORDERED\n",
1946                          dev->dev_cur_ordered_id);
1947         }
1948
1949         target_restart_delayed_cmds(dev);
1950 }
1951
1952 static void transport_complete_qf(struct se_cmd *cmd)
1953 {
1954         int ret = 0;
1955
1956         transport_complete_task_attr(cmd);
1957
1958         if (cmd->se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) {
1959                 trace_target_cmd_complete(cmd);
1960                 ret = cmd->se_tfo->queue_status(cmd);
1961                 goto out;
1962         }
1963
1964         switch (cmd->data_direction) {
1965         case DMA_FROM_DEVICE:
1966                 trace_target_cmd_complete(cmd);
1967                 ret = cmd->se_tfo->queue_data_in(cmd);
1968                 break;
1969         case DMA_TO_DEVICE:
1970                 if (cmd->se_cmd_flags & SCF_BIDI) {
1971                         ret = cmd->se_tfo->queue_data_in(cmd);
1972                         break;
1973                 }
1974                 /* Fall through for DMA_TO_DEVICE */
1975         case DMA_NONE:
1976                 trace_target_cmd_complete(cmd);
1977                 ret = cmd->se_tfo->queue_status(cmd);
1978                 break;
1979         default:
1980                 break;
1981         }
1982
1983 out:
1984         if (ret < 0) {
1985                 transport_handle_queue_full(cmd, cmd->se_dev);
1986                 return;
1987         }
1988         transport_lun_remove_cmd(cmd);
1989         transport_cmd_check_stop_to_fabric(cmd);
1990 }
1991
1992 static void transport_handle_queue_full(
1993         struct se_cmd *cmd,
1994         struct se_device *dev)
1995 {
1996         spin_lock_irq(&dev->qf_cmd_lock);
1997         list_add_tail(&cmd->se_qf_node, &cmd->se_dev->qf_cmd_list);
1998         atomic_inc_mb(&dev->dev_qf_count);
1999         spin_unlock_irq(&cmd->se_dev->qf_cmd_lock);
2000
2001         schedule_work(&cmd->se_dev->qf_work_queue);
2002 }
2003
2004 static bool target_read_prot_action(struct se_cmd *cmd)
2005 {
2006         switch (cmd->prot_op) {
2007         case TARGET_PROT_DIN_STRIP:
2008                 if (!(cmd->se_sess->sup_prot_ops & TARGET_PROT_DIN_STRIP)) {
2009                         u32 sectors = cmd->data_length >>
2010                                   ilog2(cmd->se_dev->dev_attrib.block_size);
2011
2012                         cmd->pi_err = sbc_dif_verify(cmd, cmd->t_task_lba,
2013                                                      sectors, 0, cmd->t_prot_sg,
2014                                                      0);
2015                         if (cmd->pi_err)
2016                                 return true;
2017                 }
2018                 break;
2019         case TARGET_PROT_DIN_INSERT:
2020                 if (cmd->se_sess->sup_prot_ops & TARGET_PROT_DIN_INSERT)
2021                         break;
2022
2023                 sbc_dif_generate(cmd);
2024                 break;
2025         default:
2026                 break;
2027         }
2028
2029         return false;
2030 }
2031
2032 static void target_complete_ok_work(struct work_struct *work)
2033 {
2034         struct se_cmd *cmd = container_of(work, struct se_cmd, work);
2035         int ret;
2036
2037         /*
2038          * Check if we need to move delayed/dormant tasks from cmds on the
2039          * delayed execution list after a HEAD_OF_QUEUE or ORDERED Task
2040          * Attribute.
2041          */
2042         transport_complete_task_attr(cmd);
2043
2044         /*
2045          * Check to schedule QUEUE_FULL work, or execute an existing
2046          * cmd->transport_qf_callback()
2047          */
2048         if (atomic_read(&cmd->se_dev->dev_qf_count) != 0)
2049                 schedule_work(&cmd->se_dev->qf_work_queue);
2050
2051         /*
2052          * Check if we need to send a sense buffer from
2053          * the struct se_cmd in question.
2054          */
2055         if (cmd->se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) {
2056                 WARN_ON(!cmd->scsi_status);
2057                 ret = transport_send_check_condition_and_sense(
2058                                         cmd, 0, 1);
2059                 if (ret == -EAGAIN || ret == -ENOMEM)
2060                         goto queue_full;
2061
2062                 transport_lun_remove_cmd(cmd);
2063                 transport_cmd_check_stop_to_fabric(cmd);
2064                 return;
2065         }
2066         /*
2067          * Check for a callback, used by amongst other things
2068          * XDWRITE_READ_10 and COMPARE_AND_WRITE emulation.
2069          */
2070         if (cmd->transport_complete_callback) {
2071                 sense_reason_t rc;
2072                 bool caw = (cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE);
2073                 bool zero_dl = !(cmd->data_length);
2074                 int post_ret = 0;
2075
2076                 rc = cmd->transport_complete_callback(cmd, true, &post_ret);
2077                 if (!rc && !post_ret) {
2078                         if (caw && zero_dl)
2079                                 goto queue_rsp;
2080
2081                         return;
2082                 } else if (rc) {
2083                         ret = transport_send_check_condition_and_sense(cmd,
2084                                                 rc, 0);
2085                         if (ret == -EAGAIN || ret == -ENOMEM)
2086                                 goto queue_full;
2087
2088                         transport_lun_remove_cmd(cmd);
2089                         transport_cmd_check_stop_to_fabric(cmd);
2090                         return;
2091                 }
2092         }
2093
2094 queue_rsp:
2095         switch (cmd->data_direction) {
2096         case DMA_FROM_DEVICE:
2097                 atomic_long_add(cmd->data_length,
2098                                 &cmd->se_lun->lun_stats.tx_data_octets);
2099                 /*
2100                  * Perform READ_STRIP of PI using software emulation when
2101                  * backend had PI enabled, if the transport will not be
2102                  * performing hardware READ_STRIP offload.
2103                  */
2104                 if (target_read_prot_action(cmd)) {
2105                         ret = transport_send_check_condition_and_sense(cmd,
2106                                                 cmd->pi_err, 0);
2107                         if (ret == -EAGAIN || ret == -ENOMEM)
2108                                 goto queue_full;
2109
2110                         transport_lun_remove_cmd(cmd);
2111                         transport_cmd_check_stop_to_fabric(cmd);
2112                         return;
2113                 }
2114
2115                 trace_target_cmd_complete(cmd);
2116                 ret = cmd->se_tfo->queue_data_in(cmd);
2117                 if (ret == -EAGAIN || ret == -ENOMEM)
2118                         goto queue_full;
2119                 break;
2120         case DMA_TO_DEVICE:
2121                 atomic_long_add(cmd->data_length,
2122                                 &cmd->se_lun->lun_stats.rx_data_octets);
2123                 /*
2124                  * Check if we need to send READ payload for BIDI-COMMAND
2125                  */
2126                 if (cmd->se_cmd_flags & SCF_BIDI) {
2127                         atomic_long_add(cmd->data_length,
2128                                         &cmd->se_lun->lun_stats.tx_data_octets);
2129                         ret = cmd->se_tfo->queue_data_in(cmd);
2130                         if (ret == -EAGAIN || ret == -ENOMEM)
2131                                 goto queue_full;
2132                         break;
2133                 }
2134                 /* Fall through for DMA_TO_DEVICE */
2135         case DMA_NONE:
2136                 trace_target_cmd_complete(cmd);
2137                 ret = cmd->se_tfo->queue_status(cmd);
2138                 if (ret == -EAGAIN || ret == -ENOMEM)
2139                         goto queue_full;
2140                 break;
2141         default:
2142                 break;
2143         }
2144
2145         transport_lun_remove_cmd(cmd);
2146         transport_cmd_check_stop_to_fabric(cmd);
2147         return;
2148
2149 queue_full:
2150         pr_debug("Handling complete_ok QUEUE_FULL: se_cmd: %p,"
2151                 " data_direction: %d\n", cmd, cmd->data_direction);
2152         cmd->t_state = TRANSPORT_COMPLETE_QF_OK;
2153         transport_handle_queue_full(cmd, cmd->se_dev);
2154 }
2155
2156 static inline void transport_free_sgl(struct scatterlist *sgl, int nents)
2157 {
2158         struct scatterlist *sg;
2159         int count;
2160
2161         for_each_sg(sgl, sg, nents, count)
2162                 __free_page(sg_page(sg));
2163
2164         kfree(sgl);
2165 }
2166
2167 static inline void transport_reset_sgl_orig(struct se_cmd *cmd)
2168 {
2169         /*
2170          * Check for saved t_data_sg that may be used for COMPARE_AND_WRITE
2171          * emulation, and free + reset pointers if necessary..
2172          */
2173         if (!cmd->t_data_sg_orig)
2174                 return;
2175
2176         kfree(cmd->t_data_sg);
2177         cmd->t_data_sg = cmd->t_data_sg_orig;
2178         cmd->t_data_sg_orig = NULL;
2179         cmd->t_data_nents = cmd->t_data_nents_orig;
2180         cmd->t_data_nents_orig = 0;
2181 }
2182
2183 static inline void transport_free_pages(struct se_cmd *cmd)
2184 {
2185         if (!(cmd->se_cmd_flags & SCF_PASSTHROUGH_PROT_SG_TO_MEM_NOALLOC)) {
2186                 transport_free_sgl(cmd->t_prot_sg, cmd->t_prot_nents);
2187                 cmd->t_prot_sg = NULL;
2188                 cmd->t_prot_nents = 0;
2189         }
2190
2191         if (cmd->se_cmd_flags & SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC) {
2192                 /*
2193                  * Release special case READ buffer payload required for
2194                  * SG_TO_MEM_NOALLOC to function with COMPARE_AND_WRITE
2195                  */
2196                 if (cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE) {
2197                         transport_free_sgl(cmd->t_bidi_data_sg,
2198                                            cmd->t_bidi_data_nents);
2199                         cmd->t_bidi_data_sg = NULL;
2200                         cmd->t_bidi_data_nents = 0;
2201                 }
2202                 transport_reset_sgl_orig(cmd);
2203                 return;
2204         }
2205         transport_reset_sgl_orig(cmd);
2206
2207         transport_free_sgl(cmd->t_data_sg, cmd->t_data_nents);
2208         cmd->t_data_sg = NULL;
2209         cmd->t_data_nents = 0;
2210
2211         transport_free_sgl(cmd->t_bidi_data_sg, cmd->t_bidi_data_nents);
2212         cmd->t_bidi_data_sg = NULL;
2213         cmd->t_bidi_data_nents = 0;
2214 }
2215
2216 /**
2217  * transport_put_cmd - release a reference to a command
2218  * @cmd:       command to release
2219  *
2220  * This routine releases our reference to the command and frees it if possible.
2221  */
2222 static int transport_put_cmd(struct se_cmd *cmd)
2223 {
2224         BUG_ON(!cmd->se_tfo);
2225         /*
2226          * If this cmd has been setup with target_get_sess_cmd(), drop
2227          * the kref and call ->release_cmd() in kref callback.
2228          */
2229         return target_put_sess_cmd(cmd);
2230 }
2231
2232 void *transport_kmap_data_sg(struct se_cmd *cmd)
2233 {
2234         struct scatterlist *sg = cmd->t_data_sg;
2235         struct page **pages;
2236         int i;
2237
2238         /*
2239          * We need to take into account a possible offset here for fabrics like
2240          * tcm_loop who may be using a contig buffer from the SCSI midlayer for
2241          * control CDBs passed as SGLs via transport_generic_map_mem_to_cmd()
2242          */
2243         if (!cmd->t_data_nents)
2244                 return NULL;
2245
2246         BUG_ON(!sg);
2247         if (cmd->t_data_nents == 1)
2248                 return kmap(sg_page(sg)) + sg->offset;
2249
2250         /* >1 page. use vmap */
2251         pages = kmalloc(sizeof(*pages) * cmd->t_data_nents, GFP_KERNEL);
2252         if (!pages)
2253                 return NULL;
2254
2255         /* convert sg[] to pages[] */
2256         for_each_sg(cmd->t_data_sg, sg, cmd->t_data_nents, i) {
2257                 pages[i] = sg_page(sg);
2258         }
2259
2260         cmd->t_data_vmap = vmap(pages, cmd->t_data_nents,  VM_MAP, PAGE_KERNEL);
2261         kfree(pages);
2262         if (!cmd->t_data_vmap)
2263                 return NULL;
2264
2265         return cmd->t_data_vmap + cmd->t_data_sg[0].offset;
2266 }
2267 EXPORT_SYMBOL(transport_kmap_data_sg);
2268
2269 void transport_kunmap_data_sg(struct se_cmd *cmd)
2270 {
2271         if (!cmd->t_data_nents) {
2272                 return;
2273         } else if (cmd->t_data_nents == 1) {
2274                 kunmap(sg_page(cmd->t_data_sg));
2275                 return;
2276         }
2277
2278         vunmap(cmd->t_data_vmap);
2279         cmd->t_data_vmap = NULL;
2280 }
2281 EXPORT_SYMBOL(transport_kunmap_data_sg);
2282
2283 int
2284 target_alloc_sgl(struct scatterlist **sgl, unsigned int *nents, u32 length,
2285                  bool zero_page)
2286 {
2287         struct scatterlist *sg;
2288         struct page *page;
2289         gfp_t zero_flag = (zero_page) ? __GFP_ZERO : 0;
2290         unsigned int nent;
2291         int i = 0;
2292
2293         nent = DIV_ROUND_UP(length, PAGE_SIZE);
2294         sg = kmalloc(sizeof(struct scatterlist) * nent, GFP_KERNEL);
2295         if (!sg)
2296                 return -ENOMEM;
2297
2298         sg_init_table(sg, nent);
2299
2300         while (length) {
2301                 u32 page_len = min_t(u32, length, PAGE_SIZE);
2302                 page = alloc_page(GFP_KERNEL | zero_flag);
2303                 if (!page)
2304                         goto out;
2305
2306                 sg_set_page(&sg[i], page, page_len, 0);
2307                 length -= page_len;
2308                 i++;
2309         }
2310         *sgl = sg;
2311         *nents = nent;
2312         return 0;
2313
2314 out:
2315         while (i > 0) {
2316                 i--;
2317                 __free_page(sg_page(&sg[i]));
2318         }
2319         kfree(sg);
2320         return -ENOMEM;
2321 }
2322
2323 /*
2324  * Allocate any required resources to execute the command.  For writes we
2325  * might not have the payload yet, so notify the fabric via a call to
2326  * ->write_pending instead. Otherwise place it on the execution queue.
2327  */
2328 sense_reason_t
2329 transport_generic_new_cmd(struct se_cmd *cmd)
2330 {
2331         int ret = 0;
2332         bool zero_flag = !(cmd->se_cmd_flags & SCF_SCSI_DATA_CDB);
2333
2334         if (cmd->prot_op != TARGET_PROT_NORMAL &&
2335             !(cmd->se_cmd_flags & SCF_PASSTHROUGH_PROT_SG_TO_MEM_NOALLOC)) {
2336                 ret = target_alloc_sgl(&cmd->t_prot_sg, &cmd->t_prot_nents,
2337                                        cmd->prot_length, true);
2338                 if (ret < 0)
2339                         return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
2340         }
2341
2342         /*
2343          * Determine is the TCM fabric module has already allocated physical
2344          * memory, and is directly calling transport_generic_map_mem_to_cmd()
2345          * beforehand.
2346          */
2347         if (!(cmd->se_cmd_flags & SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC) &&
2348             cmd->data_length) {
2349
2350                 if ((cmd->se_cmd_flags & SCF_BIDI) ||
2351                     (cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE)) {
2352                         u32 bidi_length;
2353
2354                         if (cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE)
2355                                 bidi_length = cmd->t_task_nolb *
2356                                               cmd->se_dev->dev_attrib.block_size;
2357                         else
2358                                 bidi_length = cmd->data_length;
2359
2360                         ret = target_alloc_sgl(&cmd->t_bidi_data_sg,
2361                                                &cmd->t_bidi_data_nents,
2362                                                bidi_length, zero_flag);
2363                         if (ret < 0)
2364                                 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
2365                 }
2366
2367                 ret = target_alloc_sgl(&cmd->t_data_sg, &cmd->t_data_nents,
2368                                        cmd->data_length, zero_flag);
2369                 if (ret < 0)
2370                         return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
2371         } else if ((cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE) &&
2372                     cmd->data_length) {
2373                 /*
2374                  * Special case for COMPARE_AND_WRITE with fabrics
2375                  * using SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC.
2376                  */
2377                 u32 caw_length = cmd->t_task_nolb *
2378                                  cmd->se_dev->dev_attrib.block_size;
2379
2380                 ret = target_alloc_sgl(&cmd->t_bidi_data_sg,
2381                                        &cmd->t_bidi_data_nents,
2382                                        caw_length, zero_flag);
2383                 if (ret < 0)
2384                         return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
2385         }
2386         /*
2387          * If this command is not a write we can execute it right here,
2388          * for write buffers we need to notify the fabric driver first
2389          * and let it call back once the write buffers are ready.
2390          */
2391         target_add_to_state_list(cmd);
2392         if (cmd->data_direction != DMA_TO_DEVICE || cmd->data_length == 0) {
2393                 target_execute_cmd(cmd);
2394                 return 0;
2395         }
2396         transport_cmd_check_stop(cmd, false, true);
2397
2398         ret = cmd->se_tfo->write_pending(cmd);
2399         if (ret == -EAGAIN || ret == -ENOMEM)
2400                 goto queue_full;
2401
2402         /* fabric drivers should only return -EAGAIN or -ENOMEM as error */
2403         WARN_ON(ret);
2404
2405         return (!ret) ? 0 : TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
2406
2407 queue_full:
2408         pr_debug("Handling write_pending QUEUE__FULL: se_cmd: %p\n", cmd);
2409         cmd->t_state = TRANSPORT_COMPLETE_QF_WP;
2410         transport_handle_queue_full(cmd, cmd->se_dev);
2411         return 0;
2412 }
2413 EXPORT_SYMBOL(transport_generic_new_cmd);
2414
2415 static void transport_write_pending_qf(struct se_cmd *cmd)
2416 {
2417         int ret;
2418
2419         ret = cmd->se_tfo->write_pending(cmd);
2420         if (ret == -EAGAIN || ret == -ENOMEM) {
2421                 pr_debug("Handling write_pending QUEUE__FULL: se_cmd: %p\n",
2422                          cmd);
2423                 transport_handle_queue_full(cmd, cmd->se_dev);
2424         }
2425 }
2426
2427 static bool
2428 __transport_wait_for_tasks(struct se_cmd *, bool, bool *, bool *,
2429                            unsigned long *flags);
2430
2431 static void target_wait_free_cmd(struct se_cmd *cmd, bool *aborted, bool *tas)
2432 {
2433         unsigned long flags;
2434
2435         spin_lock_irqsave(&cmd->t_state_lock, flags);
2436         __transport_wait_for_tasks(cmd, true, aborted, tas, &flags);
2437         spin_unlock_irqrestore(&cmd->t_state_lock, flags);
2438 }
2439
2440 int transport_generic_free_cmd(struct se_cmd *cmd, int wait_for_tasks)
2441 {
2442         int ret = 0;
2443         bool aborted = false, tas = false;
2444
2445         if (!(cmd->se_cmd_flags & SCF_SE_LUN_CMD)) {
2446                 if (wait_for_tasks && (cmd->se_cmd_flags & SCF_SCSI_TMR_CDB))
2447                         target_wait_free_cmd(cmd, &aborted, &tas);
2448
2449                 if (!aborted || tas)
2450                         ret = transport_put_cmd(cmd);
2451         } else {
2452                 if (wait_for_tasks)
2453                         target_wait_free_cmd(cmd, &aborted, &tas);
2454                 /*
2455                  * Handle WRITE failure case where transport_generic_new_cmd()
2456                  * has already added se_cmd to state_list, but fabric has
2457                  * failed command before I/O submission.
2458                  */
2459                 if (cmd->state_active)
2460                         target_remove_from_state_list(cmd);
2461
2462                 if (cmd->se_lun)
2463                         transport_lun_remove_cmd(cmd);
2464
2465                 if (!aborted || tas)
2466                         ret = transport_put_cmd(cmd);
2467         }
2468         /*
2469          * If the task has been internally aborted due to TMR ABORT_TASK
2470          * or LUN_RESET, target_core_tmr.c is responsible for performing
2471          * the remaining calls to target_put_sess_cmd(), and not the
2472          * callers of this function.
2473          */
2474         if (aborted) {
2475                 pr_debug("Detected CMD_T_ABORTED for ITT: %llu\n", cmd->tag);
2476                 wait_for_completion(&cmd->cmd_wait_comp);
2477                 cmd->se_tfo->release_cmd(cmd);
2478                 ret = 1;
2479         }
2480         return ret;
2481 }
2482 EXPORT_SYMBOL(transport_generic_free_cmd);
2483
2484 /* target_get_sess_cmd - Add command to active ->sess_cmd_list
2485  * @se_cmd:     command descriptor to add
2486  * @ack_kref:   Signal that fabric will perform an ack target_put_sess_cmd()
2487  */
2488 int target_get_sess_cmd(struct se_cmd *se_cmd, bool ack_kref)
2489 {
2490         struct se_session *se_sess = se_cmd->se_sess;
2491         unsigned long flags;
2492         int ret = 0;
2493
2494         /*
2495          * Add a second kref if the fabric caller is expecting to handle
2496          * fabric acknowledgement that requires two target_put_sess_cmd()
2497          * invocations before se_cmd descriptor release.
2498          */
2499         if (ack_kref)
2500                 kref_get(&se_cmd->cmd_kref);
2501
2502         spin_lock_irqsave(&se_sess->sess_cmd_lock, flags);
2503         if (se_sess->sess_tearing_down) {
2504                 ret = -ESHUTDOWN;
2505                 goto out;
2506         }
2507         list_add_tail(&se_cmd->se_cmd_list, &se_sess->sess_cmd_list);
2508 out:
2509         spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
2510
2511         if (ret && ack_kref)
2512                 target_put_sess_cmd(se_cmd);
2513
2514         return ret;
2515 }
2516 EXPORT_SYMBOL(target_get_sess_cmd);
2517
2518 static void target_free_cmd_mem(struct se_cmd *cmd)
2519 {
2520         transport_free_pages(cmd);
2521
2522         if (cmd->se_cmd_flags & SCF_SCSI_TMR_CDB)
2523                 core_tmr_release_req(cmd->se_tmr_req);
2524         if (cmd->t_task_cdb != cmd->__t_task_cdb)
2525                 kfree(cmd->t_task_cdb);
2526 }
2527
2528 static void target_release_cmd_kref(struct kref *kref)
2529 {
2530         struct se_cmd *se_cmd = container_of(kref, struct se_cmd, cmd_kref);
2531         struct se_session *se_sess = se_cmd->se_sess;
2532         unsigned long flags;
2533         bool fabric_stop;
2534
2535         spin_lock_irqsave(&se_sess->sess_cmd_lock, flags);
2536         if (list_empty(&se_cmd->se_cmd_list)) {
2537                 spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
2538                 target_free_cmd_mem(se_cmd);
2539                 se_cmd->se_tfo->release_cmd(se_cmd);
2540                 return;
2541         }
2542
2543         spin_lock(&se_cmd->t_state_lock);
2544         fabric_stop = (se_cmd->transport_state & CMD_T_FABRIC_STOP);
2545         spin_unlock(&se_cmd->t_state_lock);
2546
2547         if (se_cmd->cmd_wait_set || fabric_stop) {
2548                 list_del_init(&se_cmd->se_cmd_list);
2549                 spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
2550                 target_free_cmd_mem(se_cmd);
2551                 complete(&se_cmd->cmd_wait_comp);
2552                 return;
2553         }
2554         list_del_init(&se_cmd->se_cmd_list);
2555         spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
2556
2557         target_free_cmd_mem(se_cmd);
2558         se_cmd->se_tfo->release_cmd(se_cmd);
2559 }
2560
2561 /* target_put_sess_cmd - Check for active I/O shutdown via kref_put
2562  * @se_cmd:     command descriptor to drop
2563  */
2564 int target_put_sess_cmd(struct se_cmd *se_cmd)
2565 {
2566         struct se_session *se_sess = se_cmd->se_sess;
2567
2568         if (!se_sess) {
2569                 target_free_cmd_mem(se_cmd);
2570                 se_cmd->se_tfo->release_cmd(se_cmd);
2571                 return 1;
2572         }
2573         return kref_put(&se_cmd->cmd_kref, target_release_cmd_kref);
2574 }
2575 EXPORT_SYMBOL(target_put_sess_cmd);
2576
2577 /* target_sess_cmd_list_set_waiting - Flag all commands in
2578  *         sess_cmd_list to complete cmd_wait_comp.  Set
2579  *         sess_tearing_down so no more commands are queued.
2580  * @se_sess:    session to flag
2581  */
2582 void target_sess_cmd_list_set_waiting(struct se_session *se_sess)
2583 {
2584         struct se_cmd *se_cmd;
2585         unsigned long flags;
2586         int rc;
2587
2588         spin_lock_irqsave(&se_sess->sess_cmd_lock, flags);
2589         if (se_sess->sess_tearing_down) {
2590                 spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
2591                 return;
2592         }
2593         se_sess->sess_tearing_down = 1;
2594         list_splice_init(&se_sess->sess_cmd_list, &se_sess->sess_wait_list);
2595
2596         list_for_each_entry(se_cmd, &se_sess->sess_wait_list, se_cmd_list) {
2597                 rc = kref_get_unless_zero(&se_cmd->cmd_kref);
2598                 if (rc) {
2599                         se_cmd->cmd_wait_set = 1;
2600                         spin_lock(&se_cmd->t_state_lock);
2601                         se_cmd->transport_state |= CMD_T_FABRIC_STOP;
2602                         spin_unlock(&se_cmd->t_state_lock);
2603                 }
2604         }
2605
2606         spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
2607 }
2608 EXPORT_SYMBOL(target_sess_cmd_list_set_waiting);
2609
2610 /* target_wait_for_sess_cmds - Wait for outstanding descriptors
2611  * @se_sess:    session to wait for active I/O
2612  */
2613 void target_wait_for_sess_cmds(struct se_session *se_sess)
2614 {
2615         struct se_cmd *se_cmd, *tmp_cmd;
2616         unsigned long flags;
2617         bool tas;
2618
2619         list_for_each_entry_safe(se_cmd, tmp_cmd,
2620                                 &se_sess->sess_wait_list, se_cmd_list) {
2621                 list_del_init(&se_cmd->se_cmd_list);
2622
2623                 pr_debug("Waiting for se_cmd: %p t_state: %d, fabric state:"
2624                         " %d\n", se_cmd, se_cmd->t_state,
2625                         se_cmd->se_tfo->get_cmd_state(se_cmd));
2626
2627                 spin_lock_irqsave(&se_cmd->t_state_lock, flags);
2628                 tas = (se_cmd->transport_state & CMD_T_TAS);
2629                 spin_unlock_irqrestore(&se_cmd->t_state_lock, flags);
2630
2631                 if (!target_put_sess_cmd(se_cmd)) {
2632                         if (tas)
2633                                 target_put_sess_cmd(se_cmd);
2634                 }
2635
2636                 wait_for_completion(&se_cmd->cmd_wait_comp);
2637                 pr_debug("After cmd_wait_comp: se_cmd: %p t_state: %d"
2638                         " fabric state: %d\n", se_cmd, se_cmd->t_state,
2639                         se_cmd->se_tfo->get_cmd_state(se_cmd));
2640
2641                 se_cmd->se_tfo->release_cmd(se_cmd);
2642         }
2643
2644         spin_lock_irqsave(&se_sess->sess_cmd_lock, flags);
2645         WARN_ON(!list_empty(&se_sess->sess_cmd_list));
2646         spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
2647
2648 }
2649 EXPORT_SYMBOL(target_wait_for_sess_cmds);
2650
2651 void transport_clear_lun_ref(struct se_lun *lun)
2652 {
2653         percpu_ref_kill(&lun->lun_ref);
2654         wait_for_completion(&lun->lun_ref_comp);
2655 }
2656
2657 static bool
2658 __transport_wait_for_tasks(struct se_cmd *cmd, bool fabric_stop,
2659                            bool *aborted, bool *tas, unsigned long *flags)
2660         __releases(&cmd->t_state_lock)
2661         __acquires(&cmd->t_state_lock)
2662 {
2663
2664         assert_spin_locked(&cmd->t_state_lock);
2665         WARN_ON_ONCE(!irqs_disabled());
2666
2667         if (fabric_stop)
2668                 cmd->transport_state |= CMD_T_FABRIC_STOP;
2669
2670         if (cmd->transport_state & CMD_T_ABORTED)
2671                 *aborted = true;
2672
2673         if (cmd->transport_state & CMD_T_TAS)
2674                 *tas = true;
2675
2676         if (!(cmd->se_cmd_flags & SCF_SE_LUN_CMD) &&
2677             !(cmd->se_cmd_flags & SCF_SCSI_TMR_CDB))
2678                 return false;
2679
2680         if (!(cmd->se_cmd_flags & SCF_SUPPORTED_SAM_OPCODE) &&
2681             !(cmd->se_cmd_flags & SCF_SCSI_TMR_CDB))
2682                 return false;
2683
2684         if (!(cmd->transport_state & CMD_T_ACTIVE))
2685                 return false;
2686
2687         if (fabric_stop && *aborted)
2688                 return false;
2689
2690         cmd->transport_state |= CMD_T_STOP;
2691
2692         pr_debug("wait_for_tasks: Stopping %p ITT: 0x%08llx i_state: %d,"
2693                  " t_state: %d, CMD_T_STOP\n", cmd, cmd->tag,
2694                  cmd->se_tfo->get_cmd_state(cmd), cmd->t_state);
2695
2696         spin_unlock_irqrestore(&cmd->t_state_lock, *flags);
2697
2698         wait_for_completion(&cmd->t_transport_stop_comp);
2699
2700         spin_lock_irqsave(&cmd->t_state_lock, *flags);
2701         cmd->transport_state &= ~(CMD_T_ACTIVE | CMD_T_STOP);
2702
2703         pr_debug("wait_for_tasks: Stopped wait_for_completion(&cmd->"
2704                  "t_transport_stop_comp) for ITT: 0x%08llx\n", cmd->tag);
2705
2706         return true;
2707 }
2708
2709 /**
2710  * transport_wait_for_tasks - wait for completion to occur
2711  * @cmd:        command to wait
2712  *
2713  * Called from frontend fabric context to wait for storage engine
2714  * to pause and/or release frontend generated struct se_cmd.
2715  */
2716 bool transport_wait_for_tasks(struct se_cmd *cmd)
2717 {
2718         unsigned long flags;
2719         bool ret, aborted = false, tas = false;
2720
2721         spin_lock_irqsave(&cmd->t_state_lock, flags);
2722         ret = __transport_wait_for_tasks(cmd, false, &aborted, &tas, &flags);
2723         spin_unlock_irqrestore(&cmd->t_state_lock, flags);
2724
2725         return ret;
2726 }
2727 EXPORT_SYMBOL(transport_wait_for_tasks);
2728
2729 struct sense_info {
2730         u8 key;
2731         u8 asc;
2732         u8 ascq;
2733         bool add_sector_info;
2734 };
2735
2736 static const struct sense_info sense_info_table[] = {
2737         [TCM_NO_SENSE] = {
2738                 .key = NOT_READY
2739         },
2740         [TCM_NON_EXISTENT_LUN] = {
2741                 .key = ILLEGAL_REQUEST,
2742                 .asc = 0x25 /* LOGICAL UNIT NOT SUPPORTED */
2743         },
2744         [TCM_UNSUPPORTED_SCSI_OPCODE] = {
2745                 .key = ILLEGAL_REQUEST,
2746                 .asc = 0x20, /* INVALID COMMAND OPERATION CODE */
2747         },
2748         [TCM_SECTOR_COUNT_TOO_MANY] = {
2749                 .key = ILLEGAL_REQUEST,
2750                 .asc = 0x20, /* INVALID COMMAND OPERATION CODE */
2751         },
2752         [TCM_UNKNOWN_MODE_PAGE] = {
2753                 .key = ILLEGAL_REQUEST,
2754                 .asc = 0x24, /* INVALID FIELD IN CDB */
2755         },
2756         [TCM_CHECK_CONDITION_ABORT_CMD] = {
2757                 .key = ABORTED_COMMAND,
2758                 .asc = 0x29, /* BUS DEVICE RESET FUNCTION OCCURRED */
2759                 .ascq = 0x03,
2760         },
2761         [TCM_INCORRECT_AMOUNT_OF_DATA] = {
2762                 .key = ABORTED_COMMAND,
2763                 .asc = 0x0c, /* WRITE ERROR */
2764                 .ascq = 0x0d, /* NOT ENOUGH UNSOLICITED DATA */
2765         },
2766         [TCM_INVALID_CDB_FIELD] = {
2767                 .key = ILLEGAL_REQUEST,
2768                 .asc = 0x24, /* INVALID FIELD IN CDB */
2769         },
2770         [TCM_INVALID_PARAMETER_LIST] = {
2771                 .key = ILLEGAL_REQUEST,
2772                 .asc = 0x26, /* INVALID FIELD IN PARAMETER LIST */
2773         },
2774         [TCM_PARAMETER_LIST_LENGTH_ERROR] = {
2775                 .key = ILLEGAL_REQUEST,
2776                 .asc = 0x1a, /* PARAMETER LIST LENGTH ERROR */
2777         },
2778         [TCM_UNEXPECTED_UNSOLICITED_DATA] = {
2779                 .key = ILLEGAL_REQUEST,
2780                 .asc = 0x0c, /* WRITE ERROR */
2781                 .ascq = 0x0c, /* UNEXPECTED_UNSOLICITED_DATA */
2782         },
2783         [TCM_SERVICE_CRC_ERROR] = {
2784                 .key = ABORTED_COMMAND,
2785                 .asc = 0x47, /* PROTOCOL SERVICE CRC ERROR */
2786                 .ascq = 0x05, /* N/A */
2787         },
2788         [TCM_SNACK_REJECTED] = {
2789                 .key = ABORTED_COMMAND,
2790                 .asc = 0x11, /* READ ERROR */
2791                 .ascq = 0x13, /* FAILED RETRANSMISSION REQUEST */
2792         },
2793         [TCM_WRITE_PROTECTED] = {
2794                 .key = DATA_PROTECT,
2795                 .asc = 0x27, /* WRITE PROTECTED */
2796         },
2797         [TCM_ADDRESS_OUT_OF_RANGE] = {
2798                 .key = ILLEGAL_REQUEST,
2799                 .asc = 0x21, /* LOGICAL BLOCK ADDRESS OUT OF RANGE */
2800         },
2801         [TCM_CHECK_CONDITION_UNIT_ATTENTION] = {
2802                 .key = UNIT_ATTENTION,
2803         },
2804         [TCM_CHECK_CONDITION_NOT_READY] = {
2805                 .key = NOT_READY,
2806         },
2807         [TCM_MISCOMPARE_VERIFY] = {
2808                 .key = MISCOMPARE,
2809                 .asc = 0x1d, /* MISCOMPARE DURING VERIFY OPERATION */
2810                 .ascq = 0x00,
2811         },
2812         [TCM_LOGICAL_BLOCK_GUARD_CHECK_FAILED] = {
2813                 .key = ABORTED_COMMAND,
2814                 .asc = 0x10,
2815                 .ascq = 0x01, /* LOGICAL BLOCK GUARD CHECK FAILED */
2816                 .add_sector_info = true,
2817         },
2818         [TCM_LOGICAL_BLOCK_APP_TAG_CHECK_FAILED] = {
2819                 .key = ABORTED_COMMAND,
2820                 .asc = 0x10,
2821                 .ascq = 0x02, /* LOGICAL BLOCK APPLICATION TAG CHECK FAILED */
2822                 .add_sector_info = true,
2823         },
2824         [TCM_LOGICAL_BLOCK_REF_TAG_CHECK_FAILED] = {
2825                 .key = ABORTED_COMMAND,
2826                 .asc = 0x10,
2827                 .ascq = 0x03, /* LOGICAL BLOCK REFERENCE TAG CHECK FAILED */
2828                 .add_sector_info = true,
2829         },
2830         [TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE] = {
2831                 /*
2832                  * Returning ILLEGAL REQUEST would cause immediate IO errors on
2833                  * Solaris initiators.  Returning NOT READY instead means the
2834                  * operations will be retried a finite number of times and we
2835                  * can survive intermittent errors.
2836                  */
2837                 .key = NOT_READY,
2838                 .asc = 0x08, /* LOGICAL UNIT COMMUNICATION FAILURE */
2839         },
2840 };
2841
2842 static int translate_sense_reason(struct se_cmd *cmd, sense_reason_t reason)
2843 {
2844         const struct sense_info *si;
2845         u8 *buffer = cmd->sense_buffer;
2846         int r = (__force int)reason;
2847         u8 asc, ascq;
2848         bool desc_format = target_sense_desc_format(cmd->se_dev);
2849
2850         if (r < ARRAY_SIZE(sense_info_table) && sense_info_table[r].key)
2851                 si = &sense_info_table[r];
2852         else
2853                 si = &sense_info_table[(__force int)
2854                                        TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE];
2855
2856         if (reason == TCM_CHECK_CONDITION_UNIT_ATTENTION) {
2857                 core_scsi3_ua_for_check_condition(cmd, &asc, &ascq);
2858                 WARN_ON_ONCE(asc == 0);
2859         } else if (si->asc == 0) {
2860                 WARN_ON_ONCE(cmd->scsi_asc == 0);
2861                 asc = cmd->scsi_asc;
2862                 ascq = cmd->scsi_ascq;
2863         } else {
2864                 asc = si->asc;
2865                 ascq = si->ascq;
2866         }
2867
2868         scsi_build_sense_buffer(desc_format, buffer, si->key, asc, ascq);
2869         if (si->add_sector_info)
2870                 return scsi_set_sense_information(buffer,
2871                                                   cmd->scsi_sense_length,
2872                                                   cmd->bad_sector);
2873
2874         return 0;
2875 }
2876
2877 int
2878 transport_send_check_condition_and_sense(struct se_cmd *cmd,
2879                 sense_reason_t reason, int from_transport)
2880 {
2881         unsigned long flags;
2882
2883         spin_lock_irqsave(&cmd->t_state_lock, flags);
2884         if (cmd->se_cmd_flags & SCF_SENT_CHECK_CONDITION) {
2885                 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
2886                 return 0;
2887         }
2888         cmd->se_cmd_flags |= SCF_SENT_CHECK_CONDITION;
2889         spin_unlock_irqrestore(&cmd->t_state_lock, flags);
2890
2891         if (!from_transport) {
2892                 int rc;
2893
2894                 cmd->se_cmd_flags |= SCF_EMULATED_TASK_SENSE;
2895                 cmd->scsi_status = SAM_STAT_CHECK_CONDITION;
2896                 cmd->scsi_sense_length  = TRANSPORT_SENSE_BUFFER;
2897                 rc = translate_sense_reason(cmd, reason);
2898                 if (rc)
2899                         return rc;
2900         }
2901
2902         trace_target_cmd_complete(cmd);
2903         return cmd->se_tfo->queue_status(cmd);
2904 }
2905 EXPORT_SYMBOL(transport_send_check_condition_and_sense);
2906
2907 static int __transport_check_aborted_status(struct se_cmd *cmd, int send_status)
2908         __releases(&cmd->t_state_lock)
2909         __acquires(&cmd->t_state_lock)
2910 {
2911         assert_spin_locked(&cmd->t_state_lock);
2912         WARN_ON_ONCE(!irqs_disabled());
2913
2914         if (!(cmd->transport_state & CMD_T_ABORTED))
2915                 return 0;
2916         /*
2917          * If cmd has been aborted but either no status is to be sent or it has
2918          * already been sent, just return
2919          */
2920         if (!send_status || !(cmd->se_cmd_flags & SCF_SEND_DELAYED_TAS)) {
2921                 if (send_status)
2922                         cmd->se_cmd_flags |= SCF_SEND_DELAYED_TAS;
2923                 return 1;
2924         }
2925
2926         pr_debug("Sending delayed SAM_STAT_TASK_ABORTED status for CDB:"
2927                 " 0x%02x ITT: 0x%08llx\n", cmd->t_task_cdb[0], cmd->tag);
2928
2929         cmd->se_cmd_flags &= ~SCF_SEND_DELAYED_TAS;
2930         cmd->scsi_status = SAM_STAT_TASK_ABORTED;
2931         trace_target_cmd_complete(cmd);
2932
2933         spin_unlock_irq(&cmd->t_state_lock);
2934         cmd->se_tfo->queue_status(cmd);
2935         spin_lock_irq(&cmd->t_state_lock);
2936
2937         return 1;
2938 }
2939
2940 int transport_check_aborted_status(struct se_cmd *cmd, int send_status)
2941 {
2942         int ret;
2943
2944         spin_lock_irq(&cmd->t_state_lock);
2945         ret = __transport_check_aborted_status(cmd, send_status);
2946         spin_unlock_irq(&cmd->t_state_lock);
2947
2948         return ret;
2949 }
2950 EXPORT_SYMBOL(transport_check_aborted_status);
2951
2952 void transport_send_task_abort(struct se_cmd *cmd)
2953 {
2954         unsigned long flags;
2955
2956         spin_lock_irqsave(&cmd->t_state_lock, flags);
2957         if (cmd->se_cmd_flags & (SCF_SENT_CHECK_CONDITION)) {
2958                 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
2959                 return;
2960         }
2961         spin_unlock_irqrestore(&cmd->t_state_lock, flags);
2962
2963         /*
2964          * If there are still expected incoming fabric WRITEs, we wait
2965          * until until they have completed before sending a TASK_ABORTED
2966          * response.  This response with TASK_ABORTED status will be
2967          * queued back to fabric module by transport_check_aborted_status().
2968          */
2969         if (cmd->data_direction == DMA_TO_DEVICE) {
2970                 if (cmd->se_tfo->write_pending_status(cmd) != 0) {
2971                         spin_lock_irqsave(&cmd->t_state_lock, flags);
2972                         if (cmd->se_cmd_flags & SCF_SEND_DELAYED_TAS) {
2973                                 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
2974                                 goto send_abort;
2975                         }
2976                         cmd->se_cmd_flags |= SCF_SEND_DELAYED_TAS;
2977                         spin_unlock_irqrestore(&cmd->t_state_lock, flags);
2978                         return;
2979                 }
2980         }
2981 send_abort:
2982         cmd->scsi_status = SAM_STAT_TASK_ABORTED;
2983
2984         transport_lun_remove_cmd(cmd);
2985
2986         pr_debug("Setting SAM_STAT_TASK_ABORTED status for CDB: 0x%02x, ITT: 0x%08llx\n",
2987                  cmd->t_task_cdb[0], cmd->tag);
2988
2989         trace_target_cmd_complete(cmd);
2990         cmd->se_tfo->queue_status(cmd);
2991 }
2992
2993 static void target_tmr_work(struct work_struct *work)
2994 {
2995         struct se_cmd *cmd = container_of(work, struct se_cmd, work);
2996         struct se_device *dev = cmd->se_dev;
2997         struct se_tmr_req *tmr = cmd->se_tmr_req;
2998         unsigned long flags;
2999         int ret;
3000
3001         spin_lock_irqsave(&cmd->t_state_lock, flags);
3002         if (cmd->transport_state & CMD_T_ABORTED) {
3003                 tmr->response = TMR_FUNCTION_REJECTED;
3004                 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
3005                 goto check_stop;
3006         }
3007         spin_unlock_irqrestore(&cmd->t_state_lock, flags);
3008
3009         switch (tmr->function) {
3010         case TMR_ABORT_TASK:
3011                 core_tmr_abort_task(dev, tmr, cmd->se_sess);
3012                 break;
3013         case TMR_ABORT_TASK_SET:
3014         case TMR_CLEAR_ACA:
3015         case TMR_CLEAR_TASK_SET:
3016                 tmr->response = TMR_TASK_MGMT_FUNCTION_NOT_SUPPORTED;
3017                 break;
3018         case TMR_LUN_RESET:
3019                 ret = core_tmr_lun_reset(dev, tmr, NULL, NULL);
3020                 tmr->response = (!ret) ? TMR_FUNCTION_COMPLETE :
3021                                          TMR_FUNCTION_REJECTED;
3022                 if (tmr->response == TMR_FUNCTION_COMPLETE) {
3023                         target_ua_allocate_lun(cmd->se_sess->se_node_acl,
3024                                                cmd->orig_fe_lun, 0x29,
3025                                                ASCQ_29H_BUS_DEVICE_RESET_FUNCTION_OCCURRED);
3026                 }
3027                 break;
3028         case TMR_TARGET_WARM_RESET:
3029                 tmr->response = TMR_FUNCTION_REJECTED;
3030                 break;
3031         case TMR_TARGET_COLD_RESET:
3032                 tmr->response = TMR_FUNCTION_REJECTED;
3033                 break;
3034         default:
3035                 pr_err("Uknown TMR function: 0x%02x.\n",
3036                                 tmr->function);
3037                 tmr->response = TMR_FUNCTION_REJECTED;
3038                 break;
3039         }
3040
3041         spin_lock_irqsave(&cmd->t_state_lock, flags);
3042         if (cmd->transport_state & CMD_T_ABORTED) {
3043                 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
3044                 goto check_stop;
3045         }
3046         cmd->t_state = TRANSPORT_ISTATE_PROCESSING;
3047         spin_unlock_irqrestore(&cmd->t_state_lock, flags);
3048
3049         cmd->se_tfo->queue_tm_rsp(cmd);
3050
3051 check_stop:
3052         transport_cmd_check_stop_to_fabric(cmd);
3053 }
3054
3055 int transport_generic_handle_tmr(
3056         struct se_cmd *cmd)
3057 {
3058         unsigned long flags;
3059
3060         spin_lock_irqsave(&cmd->t_state_lock, flags);
3061         cmd->transport_state |= CMD_T_ACTIVE;
3062         spin_unlock_irqrestore(&cmd->t_state_lock, flags);
3063
3064         INIT_WORK(&cmd->work, target_tmr_work);
3065         queue_work(cmd->se_dev->tmr_wq, &cmd->work);
3066         return 0;
3067 }
3068 EXPORT_SYMBOL(transport_generic_handle_tmr);
3069
3070 bool
3071 target_check_wce(struct se_device *dev)
3072 {
3073         bool wce = false;
3074
3075         if (dev->transport->get_write_cache)
3076                 wce = dev->transport->get_write_cache(dev);
3077         else if (dev->dev_attrib.emulate_write_cache > 0)
3078                 wce = true;
3079
3080         return wce;
3081 }
3082
3083 bool
3084 target_check_fua(struct se_device *dev)
3085 {
3086         return target_check_wce(dev) && dev->dev_attrib.emulate_fua_write > 0;
3087 }