Add the rt linux 4.1.3-rt3 as base
[kvmfornfv.git] / kernel / drivers / scsi / scsi_debug.c
1 /*
2  * vvvvvvvvvvvvvvvvvvvvvvv Original vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv
3  *  Copyright (C) 1992  Eric Youngdale
4  *  Simulate a host adapter with 2 disks attached.  Do a lot of checking
5  *  to make sure that we are not getting blocks mixed up, and PANIC if
6  *  anything out of the ordinary is seen.
7  * ^^^^^^^^^^^^^^^^^^^^^^^ Original ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
8  *
9  *  This version is more generic, simulating a variable number of disk
10  *  (or disk like devices) sharing a common amount of RAM. To be more
11  *  realistic, the simulated devices have the transport attributes of
12  *  SAS disks.
13  *
14  *
15  *  For documentation see http://sg.danny.cz/sg/sdebug26.html
16  *
17  *   D. Gilbert (dpg) work for Magneto-Optical device test [20010421]
18  *   dpg: work for devfs large number of disks [20010809]
19  *        forked for lk 2.5 series [20011216, 20020101]
20  *        use vmalloc() more inquiry+mode_sense [20020302]
21  *        add timers for delayed responses [20020721]
22  *   Patrick Mansfield <patmans@us.ibm.com> max_luns+scsi_level [20021031]
23  *   Mike Anderson <andmike@us.ibm.com> sysfs work [20021118]
24  *   dpg: change style of boot options to "scsi_debug.num_tgts=2" and
25  *        module options to "modprobe scsi_debug num_tgts=2" [20021221]
26  */
27
28 #include <linux/module.h>
29
30 #include <linux/kernel.h>
31 #include <linux/errno.h>
32 #include <linux/timer.h>
33 #include <linux/slab.h>
34 #include <linux/types.h>
35 #include <linux/string.h>
36 #include <linux/genhd.h>
37 #include <linux/fs.h>
38 #include <linux/init.h>
39 #include <linux/proc_fs.h>
40 #include <linux/vmalloc.h>
41 #include <linux/moduleparam.h>
42 #include <linux/scatterlist.h>
43 #include <linux/blkdev.h>
44 #include <linux/crc-t10dif.h>
45 #include <linux/spinlock.h>
46 #include <linux/interrupt.h>
47 #include <linux/atomic.h>
48 #include <linux/hrtimer.h>
49
50 #include <net/checksum.h>
51
52 #include <asm/unaligned.h>
53
54 #include <scsi/scsi.h>
55 #include <scsi/scsi_cmnd.h>
56 #include <scsi/scsi_device.h>
57 #include <scsi/scsi_host.h>
58 #include <scsi/scsicam.h>
59 #include <scsi/scsi_eh.h>
60 #include <scsi/scsi_tcq.h>
61 #include <scsi/scsi_dbg.h>
62
63 #include "sd.h"
64 #include "scsi_logging.h"
65
66 #define SCSI_DEBUG_VERSION "1.85"
67 static const char *scsi_debug_version_date = "20141022";
68
69 #define MY_NAME "scsi_debug"
70
71 /* Additional Sense Code (ASC) */
72 #define NO_ADDITIONAL_SENSE 0x0
73 #define LOGICAL_UNIT_NOT_READY 0x4
74 #define LOGICAL_UNIT_COMMUNICATION_FAILURE 0x8
75 #define UNRECOVERED_READ_ERR 0x11
76 #define PARAMETER_LIST_LENGTH_ERR 0x1a
77 #define INVALID_OPCODE 0x20
78 #define LBA_OUT_OF_RANGE 0x21
79 #define INVALID_FIELD_IN_CDB 0x24
80 #define INVALID_FIELD_IN_PARAM_LIST 0x26
81 #define UA_RESET_ASC 0x29
82 #define UA_CHANGED_ASC 0x2a
83 #define TARGET_CHANGED_ASC 0x3f
84 #define LUNS_CHANGED_ASCQ 0x0e
85 #define INSUFF_RES_ASC 0x55
86 #define INSUFF_RES_ASCQ 0x3
87 #define POWER_ON_RESET_ASCQ 0x0
88 #define BUS_RESET_ASCQ 0x2      /* scsi bus reset occurred */
89 #define MODE_CHANGED_ASCQ 0x1   /* mode parameters changed */
90 #define CAPACITY_CHANGED_ASCQ 0x9
91 #define SAVING_PARAMS_UNSUP 0x39
92 #define TRANSPORT_PROBLEM 0x4b
93 #define THRESHOLD_EXCEEDED 0x5d
94 #define LOW_POWER_COND_ON 0x5e
95 #define MISCOMPARE_VERIFY_ASC 0x1d
96 #define MICROCODE_CHANGED_ASCQ 0x1      /* with TARGET_CHANGED_ASC */
97 #define MICROCODE_CHANGED_WO_RESET_ASCQ 0x16
98
99 /* Additional Sense Code Qualifier (ASCQ) */
100 #define ACK_NAK_TO 0x3
101
102
103 /* Default values for driver parameters */
104 #define DEF_NUM_HOST   1
105 #define DEF_NUM_TGTS   1
106 #define DEF_MAX_LUNS   1
107 /* With these defaults, this driver will make 1 host with 1 target
108  * (id 0) containing 1 logical unit (lun 0). That is 1 device.
109  */
110 #define DEF_ATO 1
111 #define DEF_DELAY   1           /* if > 0 unit is a jiffy */
112 #define DEF_DEV_SIZE_MB   8
113 #define DEF_DIF 0
114 #define DEF_DIX 0
115 #define DEF_D_SENSE   0
116 #define DEF_EVERY_NTH   0
117 #define DEF_FAKE_RW     0
118 #define DEF_GUARD 0
119 #define DEF_HOST_LOCK 0
120 #define DEF_LBPU 0
121 #define DEF_LBPWS 0
122 #define DEF_LBPWS10 0
123 #define DEF_LBPRZ 1
124 #define DEF_LOWEST_ALIGNED 0
125 #define DEF_NDELAY   0          /* if > 0 unit is a nanosecond */
126 #define DEF_NO_LUN_0   0
127 #define DEF_NUM_PARTS   0
128 #define DEF_OPTS   0
129 #define DEF_OPT_BLKS 64
130 #define DEF_PHYSBLK_EXP 0
131 #define DEF_PTYPE   0
132 #define DEF_REMOVABLE false
133 #define DEF_SCSI_LEVEL   6    /* INQUIRY, byte2 [6->SPC-4] */
134 #define DEF_SECTOR_SIZE 512
135 #define DEF_UNMAP_ALIGNMENT 0
136 #define DEF_UNMAP_GRANULARITY 1
137 #define DEF_UNMAP_MAX_BLOCKS 0xFFFFFFFF
138 #define DEF_UNMAP_MAX_DESC 256
139 #define DEF_VIRTUAL_GB   0
140 #define DEF_VPD_USE_HOSTNO 1
141 #define DEF_WRITESAME_LENGTH 0xFFFF
142 #define DEF_STRICT 0
143 #define DELAY_OVERRIDDEN -9999
144
145 /* bit mask values for scsi_debug_opts */
146 #define SCSI_DEBUG_OPT_NOISE   1
147 #define SCSI_DEBUG_OPT_MEDIUM_ERR   2
148 #define SCSI_DEBUG_OPT_TIMEOUT   4
149 #define SCSI_DEBUG_OPT_RECOVERED_ERR   8
150 #define SCSI_DEBUG_OPT_TRANSPORT_ERR   16
151 #define SCSI_DEBUG_OPT_DIF_ERR   32
152 #define SCSI_DEBUG_OPT_DIX_ERR   64
153 #define SCSI_DEBUG_OPT_MAC_TIMEOUT  128
154 #define SCSI_DEBUG_OPT_SHORT_TRANSFER   0x100
155 #define SCSI_DEBUG_OPT_Q_NOISE  0x200
156 #define SCSI_DEBUG_OPT_ALL_TSF  0x400
157 #define SCSI_DEBUG_OPT_RARE_TSF 0x800
158 #define SCSI_DEBUG_OPT_N_WCE    0x1000
159 #define SCSI_DEBUG_OPT_RESET_NOISE 0x2000
160 #define SCSI_DEBUG_OPT_NO_CDB_NOISE 0x4000
161 #define SCSI_DEBUG_OPT_ALL_NOISE (0x1 | 0x200 | 0x2000)
162 /* When "every_nth" > 0 then modulo "every_nth" commands:
163  *   - a no response is simulated if SCSI_DEBUG_OPT_TIMEOUT is set
164  *   - a RECOVERED_ERROR is simulated on successful read and write
165  *     commands if SCSI_DEBUG_OPT_RECOVERED_ERR is set.
166  *   - a TRANSPORT_ERROR is simulated on successful read and write
167  *     commands if SCSI_DEBUG_OPT_TRANSPORT_ERR is set.
168  *
169  * When "every_nth" < 0 then after "- every_nth" commands:
170  *   - a no response is simulated if SCSI_DEBUG_OPT_TIMEOUT is set
171  *   - a RECOVERED_ERROR is simulated on successful read and write
172  *     commands if SCSI_DEBUG_OPT_RECOVERED_ERR is set.
173  *   - a TRANSPORT_ERROR is simulated on successful read and write
174  *     commands if SCSI_DEBUG_OPT_TRANSPORT_ERR is set.
175  * This will continue until some other action occurs (e.g. the user
176  * writing a new value (other than -1 or 1) to every_nth via sysfs).
177  */
178
179 /* As indicated in SAM-5 and SPC-4 Unit Attentions (UAs)are returned in
180  * priority order. In the subset implemented here lower numbers have higher
181  * priority. The UA numbers should be a sequence starting from 0 with
182  * SDEBUG_NUM_UAS being 1 higher than the highest numbered UA. */
183 #define SDEBUG_UA_POR 0         /* Power on, reset, or bus device reset */
184 #define SDEBUG_UA_BUS_RESET 1
185 #define SDEBUG_UA_MODE_CHANGED 2
186 #define SDEBUG_UA_CAPACITY_CHANGED 3
187 #define SDEBUG_UA_LUNS_CHANGED 4
188 #define SDEBUG_UA_MICROCODE_CHANGED 5   /* simulate firmware change */
189 #define SDEBUG_UA_MICROCODE_CHANGED_WO_RESET 6
190 #define SDEBUG_NUM_UAS 7
191
192 /* for check_readiness() */
193 #define UAS_ONLY 1      /* check for UAs only */
194 #define UAS_TUR 0       /* if no UAs then check if media access possible */
195
196 /* when 1==SCSI_DEBUG_OPT_MEDIUM_ERR, a medium error is simulated at this
197  * sector on read commands: */
198 #define OPT_MEDIUM_ERR_ADDR   0x1234 /* that's sector 4660 in decimal */
199 #define OPT_MEDIUM_ERR_NUM    10     /* number of consecutive medium errs */
200
201 /* If REPORT LUNS has luns >= 256 it can choose "flat space" (value 1)
202  * or "peripheral device" addressing (value 0) */
203 #define SAM2_LUN_ADDRESS_METHOD 0
204 #define SAM2_WLUN_REPORT_LUNS 0xc101
205
206 /* SCSI_DEBUG_CANQUEUE is the maximum number of commands that can be queued
207  * (for response) at one time. Can be reduced by max_queue option. Command
208  * responses are not queued when delay=0 and ndelay=0. The per-device
209  * DEF_CMD_PER_LUN can be changed via sysfs:
210  * /sys/class/scsi_device/<h:c:t:l>/device/queue_depth but cannot exceed
211  * SCSI_DEBUG_CANQUEUE. */
212 #define SCSI_DEBUG_CANQUEUE_WORDS  9    /* a WORD is bits in a long */
213 #define SCSI_DEBUG_CANQUEUE  (SCSI_DEBUG_CANQUEUE_WORDS * BITS_PER_LONG)
214 #define DEF_CMD_PER_LUN  255
215
216 #if DEF_CMD_PER_LUN > SCSI_DEBUG_CANQUEUE
217 #warning "Expect DEF_CMD_PER_LUN <= SCSI_DEBUG_CANQUEUE"
218 #endif
219
220 /* SCSI opcodes (first byte of cdb) mapped onto these indexes */
221 enum sdeb_opcode_index {
222         SDEB_I_INVALID_OPCODE = 0,
223         SDEB_I_INQUIRY = 1,
224         SDEB_I_REPORT_LUNS = 2,
225         SDEB_I_REQUEST_SENSE = 3,
226         SDEB_I_TEST_UNIT_READY = 4,
227         SDEB_I_MODE_SENSE = 5,          /* 6, 10 */
228         SDEB_I_MODE_SELECT = 6,         /* 6, 10 */
229         SDEB_I_LOG_SENSE = 7,
230         SDEB_I_READ_CAPACITY = 8,       /* 10; 16 is in SA_IN(16) */
231         SDEB_I_READ = 9,                /* 6, 10, 12, 16 */
232         SDEB_I_WRITE = 10,              /* 6, 10, 12, 16 */
233         SDEB_I_START_STOP = 11,
234         SDEB_I_SERV_ACT_IN = 12,        /* 12, 16 */
235         SDEB_I_SERV_ACT_OUT = 13,       /* 12, 16 */
236         SDEB_I_MAINT_IN = 14,
237         SDEB_I_MAINT_OUT = 15,
238         SDEB_I_VERIFY = 16,             /* 10 only */
239         SDEB_I_VARIABLE_LEN = 17,
240         SDEB_I_RESERVE = 18,            /* 6, 10 */
241         SDEB_I_RELEASE = 19,            /* 6, 10 */
242         SDEB_I_ALLOW_REMOVAL = 20,      /* PREVENT ALLOW MEDIUM REMOVAL */
243         SDEB_I_REZERO_UNIT = 21,        /* REWIND in SSC */
244         SDEB_I_ATA_PT = 22,             /* 12, 16 */
245         SDEB_I_SEND_DIAG = 23,
246         SDEB_I_UNMAP = 24,
247         SDEB_I_XDWRITEREAD = 25,        /* 10 only */
248         SDEB_I_WRITE_BUFFER = 26,
249         SDEB_I_WRITE_SAME = 27,         /* 10, 16 */
250         SDEB_I_SYNC_CACHE = 28,         /* 10 only */
251         SDEB_I_COMP_WRITE = 29,
252         SDEB_I_LAST_ELEMENT = 30,       /* keep this last */
253 };
254
255 static const unsigned char opcode_ind_arr[256] = {
256 /* 0x0; 0x0->0x1f: 6 byte cdbs */
257         SDEB_I_TEST_UNIT_READY, SDEB_I_REZERO_UNIT, 0, SDEB_I_REQUEST_SENSE,
258             0, 0, 0, 0,
259         SDEB_I_READ, 0, SDEB_I_WRITE, 0, 0, 0, 0, 0,
260         0, 0, SDEB_I_INQUIRY, 0, 0, SDEB_I_MODE_SELECT, SDEB_I_RESERVE,
261             SDEB_I_RELEASE,
262         0, 0, SDEB_I_MODE_SENSE, SDEB_I_START_STOP, 0, SDEB_I_SEND_DIAG,
263             SDEB_I_ALLOW_REMOVAL, 0,
264 /* 0x20; 0x20->0x3f: 10 byte cdbs */
265         0, 0, 0, 0, 0, SDEB_I_READ_CAPACITY, 0, 0,
266         SDEB_I_READ, 0, SDEB_I_WRITE, 0, 0, 0, 0, SDEB_I_VERIFY,
267         0, 0, 0, 0, 0, SDEB_I_SYNC_CACHE, 0, 0,
268         0, 0, 0, SDEB_I_WRITE_BUFFER, 0, 0, 0, 0,
269 /* 0x40; 0x40->0x5f: 10 byte cdbs */
270         0, SDEB_I_WRITE_SAME, SDEB_I_UNMAP, 0, 0, 0, 0, 0,
271         0, 0, 0, 0, 0, SDEB_I_LOG_SENSE, 0, 0,
272         0, 0, 0, SDEB_I_XDWRITEREAD, 0, SDEB_I_MODE_SELECT, SDEB_I_RESERVE,
273             SDEB_I_RELEASE,
274         0, 0, SDEB_I_MODE_SENSE, 0, 0, 0, 0, 0,
275 /* 0x60; 0x60->0x7d are reserved */
276         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
277         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
278         0, SDEB_I_VARIABLE_LEN,
279 /* 0x80; 0x80->0x9f: 16 byte cdbs */
280         0, 0, 0, 0, 0, SDEB_I_ATA_PT, 0, 0,
281         SDEB_I_READ, SDEB_I_COMP_WRITE, SDEB_I_WRITE, 0, 0, 0, 0, 0,
282         0, 0, 0, SDEB_I_WRITE_SAME, 0, 0, 0, 0,
283         0, 0, 0, 0, 0, 0, SDEB_I_SERV_ACT_IN, SDEB_I_SERV_ACT_OUT,
284 /* 0xa0; 0xa0->0xbf: 12 byte cdbs */
285         SDEB_I_REPORT_LUNS, SDEB_I_ATA_PT, 0, SDEB_I_MAINT_IN,
286              SDEB_I_MAINT_OUT, 0, 0, 0,
287         SDEB_I_READ, SDEB_I_SERV_ACT_OUT, SDEB_I_WRITE, SDEB_I_SERV_ACT_IN,
288              0, 0, 0, 0,
289         0, 0, 0, 0, 0, 0, 0, 0,
290         0, 0, 0, 0, 0, 0, 0, 0,
291 /* 0xc0; 0xc0->0xff: vendor specific */
292         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
293         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
294         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
295         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
296 };
297
298 #define F_D_IN                  1
299 #define F_D_OUT                 2
300 #define F_D_OUT_MAYBE           4       /* WRITE SAME, NDOB bit */
301 #define F_D_UNKN                8
302 #define F_RL_WLUN_OK            0x10
303 #define F_SKIP_UA               0x20
304 #define F_DELAY_OVERR           0x40
305 #define F_SA_LOW                0x80    /* cdb byte 1, bits 4 to 0 */
306 #define F_SA_HIGH               0x100   /* as used by variable length cdbs */
307 #define F_INV_OP                0x200
308 #define F_FAKE_RW               0x400
309 #define F_M_ACCESS              0x800   /* media access */
310
311 #define FF_RESPOND (F_RL_WLUN_OK | F_SKIP_UA | F_DELAY_OVERR)
312 #define FF_DIRECT_IO (F_M_ACCESS | F_FAKE_RW)
313 #define FF_SA (F_SA_HIGH | F_SA_LOW)
314
315 struct sdebug_dev_info;
316 static int resp_inquiry(struct scsi_cmnd *, struct sdebug_dev_info *);
317 static int resp_report_luns(struct scsi_cmnd *, struct sdebug_dev_info *);
318 static int resp_requests(struct scsi_cmnd *, struct sdebug_dev_info *);
319 static int resp_mode_sense(struct scsi_cmnd *, struct sdebug_dev_info *);
320 static int resp_mode_select(struct scsi_cmnd *, struct sdebug_dev_info *);
321 static int resp_log_sense(struct scsi_cmnd *, struct sdebug_dev_info *);
322 static int resp_readcap(struct scsi_cmnd *, struct sdebug_dev_info *);
323 static int resp_read_dt0(struct scsi_cmnd *, struct sdebug_dev_info *);
324 static int resp_write_dt0(struct scsi_cmnd *, struct sdebug_dev_info *);
325 static int resp_start_stop(struct scsi_cmnd *, struct sdebug_dev_info *);
326 static int resp_readcap16(struct scsi_cmnd *, struct sdebug_dev_info *);
327 static int resp_get_lba_status(struct scsi_cmnd *, struct sdebug_dev_info *);
328 static int resp_report_tgtpgs(struct scsi_cmnd *, struct sdebug_dev_info *);
329 static int resp_unmap(struct scsi_cmnd *, struct sdebug_dev_info *);
330 static int resp_rsup_opcodes(struct scsi_cmnd *, struct sdebug_dev_info *);
331 static int resp_rsup_tmfs(struct scsi_cmnd *, struct sdebug_dev_info *);
332 static int resp_write_same_10(struct scsi_cmnd *, struct sdebug_dev_info *);
333 static int resp_write_same_16(struct scsi_cmnd *, struct sdebug_dev_info *);
334 static int resp_xdwriteread_10(struct scsi_cmnd *, struct sdebug_dev_info *);
335 static int resp_comp_write(struct scsi_cmnd *, struct sdebug_dev_info *);
336 static int resp_write_buffer(struct scsi_cmnd *, struct sdebug_dev_info *);
337
338 struct opcode_info_t {
339         u8 num_attached;        /* 0 if this is it (i.e. a leaf); use 0xff
340                                  * for terminating element */
341         u8 opcode;              /* if num_attached > 0, preferred */
342         u16 sa;                 /* service action */
343         u32 flags;              /* OR-ed set of SDEB_F_* */
344         int (*pfp)(struct scsi_cmnd *, struct sdebug_dev_info *);
345         const struct opcode_info_t *arrp;  /* num_attached elements or NULL */
346         u8 len_mask[16];        /* len=len_mask[0], then mask for cdb[1]... */
347                                 /* ignore cdb bytes after position 15 */
348 };
349
350 static const struct opcode_info_t msense_iarr[1] = {
351         {0, 0x1a, 0, F_D_IN, NULL, NULL,
352             {6,  0xe8, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
353 };
354
355 static const struct opcode_info_t mselect_iarr[1] = {
356         {0, 0x15, 0, F_D_OUT, NULL, NULL,
357             {6,  0xf1, 0, 0, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
358 };
359
360 static const struct opcode_info_t read_iarr[3] = {
361         {0, 0x28, 0, F_D_IN | FF_DIRECT_IO, resp_read_dt0, NULL,/* READ(10) */
362             {10,  0xff, 0xff, 0xff, 0xff, 0xff, 0x1f, 0xff, 0xff, 0xc7, 0, 0,
363              0, 0, 0, 0} },
364         {0, 0x8, 0, F_D_IN | FF_DIRECT_IO, resp_read_dt0, NULL, /* READ(6) */
365             {6,  0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
366         {0, 0xa8, 0, F_D_IN | FF_DIRECT_IO, resp_read_dt0, NULL,/* READ(12) */
367             {12,  0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x9f,
368              0xc7, 0, 0, 0, 0} },
369 };
370
371 static const struct opcode_info_t write_iarr[3] = {
372         {0, 0x2a, 0, F_D_OUT | FF_DIRECT_IO, resp_write_dt0, NULL,   /* 10 */
373             {10,  0xfb, 0xff, 0xff, 0xff, 0xff, 0x1f, 0xff, 0xff, 0xc7, 0, 0,
374              0, 0, 0, 0} },
375         {0, 0xa, 0, F_D_OUT | FF_DIRECT_IO, resp_write_dt0, NULL,    /* 6 */
376             {6,  0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
377         {0, 0xaa, 0, F_D_OUT | FF_DIRECT_IO, resp_write_dt0, NULL,   /* 12 */
378             {12,  0xfb, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x9f,
379              0xc7, 0, 0, 0, 0} },
380 };
381
382 static const struct opcode_info_t sa_in_iarr[1] = {
383         {0, 0x9e, 0x12, F_SA_LOW | F_D_IN, resp_get_lba_status, NULL,
384             {16,  0x12, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
385              0xff, 0xff, 0xff, 0, 0xc7} },
386 };
387
388 static const struct opcode_info_t vl_iarr[1] = {        /* VARIABLE LENGTH */
389         {0, 0x7f, 0xb, F_SA_HIGH | F_D_OUT | FF_DIRECT_IO, resp_write_dt0,
390             NULL, {32,  0xc7, 0, 0, 0, 0, 0x1f, 0x18, 0x0, 0xb, 0xfa,
391                    0, 0xff, 0xff, 0xff, 0xff} },        /* WRITE(32) */
392 };
393
394 static const struct opcode_info_t maint_in_iarr[2] = {
395         {0, 0xa3, 0xc, F_SA_LOW | F_D_IN, resp_rsup_opcodes, NULL,
396             {12,  0xc, 0x87, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0,
397              0xc7, 0, 0, 0, 0} },
398         {0, 0xa3, 0xd, F_SA_LOW | F_D_IN, resp_rsup_tmfs, NULL,
399             {12,  0xd, 0x80, 0, 0, 0, 0xff, 0xff, 0xff, 0xff, 0, 0xc7, 0, 0,
400              0, 0} },
401 };
402
403 static const struct opcode_info_t write_same_iarr[1] = {
404         {0, 0x93, 0, F_D_OUT_MAYBE | FF_DIRECT_IO, resp_write_same_16, NULL,
405             {16,  0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
406              0xff, 0xff, 0xff, 0x1f, 0xc7} },
407 };
408
409 static const struct opcode_info_t reserve_iarr[1] = {
410         {0, 0x16, 0, F_D_OUT, NULL, NULL,       /* RESERVE(6) */
411             {6,  0x1f, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
412 };
413
414 static const struct opcode_info_t release_iarr[1] = {
415         {0, 0x17, 0, F_D_OUT, NULL, NULL,       /* RELEASE(6) */
416             {6,  0x1f, 0xff, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
417 };
418
419
420 /* This array is accessed via SDEB_I_* values. Make sure all are mapped,
421  * plus the terminating elements for logic that scans this table such as
422  * REPORT SUPPORTED OPERATION CODES. */
423 static const struct opcode_info_t opcode_info_arr[SDEB_I_LAST_ELEMENT + 1] = {
424 /* 0 */
425         {0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL,
426             {0,  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
427         {0, 0x12, 0, FF_RESPOND | F_D_IN, resp_inquiry, NULL,
428             {6,  0xe3, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
429         {0, 0xa0, 0, FF_RESPOND | F_D_IN, resp_report_luns, NULL,
430             {12,  0xe3, 0xff, 0, 0, 0, 0xff, 0xff, 0xff, 0xff, 0, 0xc7, 0, 0,
431              0, 0} },
432         {0, 0x3, 0, FF_RESPOND | F_D_IN, resp_requests, NULL,
433             {6,  0xe1, 0, 0, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
434         {0, 0x0, 0, F_M_ACCESS | F_RL_WLUN_OK, NULL, NULL,/* TEST UNIT READY */
435             {6,  0, 0, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
436         {1, 0x5a, 0, F_D_IN, resp_mode_sense, msense_iarr,
437             {10,  0xf8, 0xff, 0xff, 0, 0, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0,
438              0} },
439         {1, 0x55, 0, F_D_OUT, resp_mode_select, mselect_iarr,
440             {10,  0xf1, 0, 0, 0, 0, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0} },
441         {0, 0x4d, 0, F_D_IN, resp_log_sense, NULL,
442             {10,  0xe3, 0xff, 0xff, 0, 0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0,
443              0, 0, 0} },
444         {0, 0x25, 0, F_D_IN, resp_readcap, NULL,
445             {10,  0xe1, 0xff, 0xff, 0xff, 0xff, 0, 0, 0x1, 0xc7, 0, 0, 0, 0,
446              0, 0} },
447         {3, 0x88, 0, F_D_IN | FF_DIRECT_IO, resp_read_dt0, read_iarr,
448             {16,  0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
449              0xff, 0xff, 0xff, 0x9f, 0xc7} },           /* READ(16) */
450 /* 10 */
451         {3, 0x8a, 0, F_D_OUT | FF_DIRECT_IO, resp_write_dt0, write_iarr,
452             {16,  0xfa, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
453              0xff, 0xff, 0xff, 0x9f, 0xc7} },           /* WRITE(16) */
454         {0, 0x1b, 0, 0, resp_start_stop, NULL,          /* START STOP UNIT */
455             {6,  0x1, 0, 0xf, 0xf7, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
456         {1, 0x9e, 0x10, F_SA_LOW | F_D_IN, resp_readcap16, sa_in_iarr,
457             {16,  0x10, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
458              0xff, 0xff, 0xff, 0x1, 0xc7} },    /* READ CAPACITY(16) */
459         {0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL, /* SA OUT */
460             {0,  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
461         {2, 0xa3, 0xa, F_SA_LOW | F_D_IN, resp_report_tgtpgs, maint_in_iarr,
462             {12,  0xea, 0, 0, 0, 0, 0xff, 0xff, 0xff, 0xff, 0, 0xc7, 0, 0, 0,
463              0} },
464         {0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL, /* MAINT OUT */
465             {0,  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
466         {0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL, /* VERIFY */
467             {0,  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
468         {1, 0x7f, 0x9, F_SA_HIGH | F_D_IN | FF_DIRECT_IO, resp_read_dt0,
469             vl_iarr, {32,  0xc7, 0, 0, 0, 0, 0x1f, 0x18, 0x0, 0x9, 0xfe, 0,
470                       0xff, 0xff, 0xff, 0xff} },/* VARIABLE LENGTH, READ(32) */
471         {1, 0x56, 0, F_D_OUT, NULL, reserve_iarr, /* RESERVE(10) */
472             {10,  0xff, 0xff, 0xff, 0, 0, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0,
473              0} },
474         {1, 0x57, 0, F_D_OUT, NULL, release_iarr, /* RELEASE(10) */
475             {10,  0x13, 0xff, 0xff, 0, 0, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0,
476              0} },
477 /* 20 */
478         {0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL, /* ALLOW REMOVAL */
479             {0,  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
480         {0, 0x1, 0, 0, resp_start_stop, NULL, /* REWIND ?? */
481             {6,  0x1, 0, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
482         {0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL, /* ATA_PT */
483             {0,  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
484         {0, 0x1d, F_D_OUT, 0, NULL, NULL,       /* SEND DIAGNOSTIC */
485             {6,  0xf7, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
486         {0, 0x42, 0, F_D_OUT | FF_DIRECT_IO, resp_unmap, NULL, /* UNMAP */
487             {10,  0x1, 0, 0, 0, 0, 0x1f, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0} },
488         {0, 0x53, 0, F_D_IN | F_D_OUT | FF_DIRECT_IO, resp_xdwriteread_10,
489             NULL, {10,  0xff, 0xff, 0xff, 0xff, 0xff, 0x1f, 0xff, 0xff, 0xc7,
490                    0, 0, 0, 0, 0, 0} },
491         {0, 0x3b, 0, F_D_OUT_MAYBE, resp_write_buffer, NULL,
492             {10,  0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0,
493              0, 0, 0, 0} },                     /* WRITE_BUFFER */
494         {1, 0x41, 0, F_D_OUT_MAYBE | FF_DIRECT_IO, resp_write_same_10,
495             write_same_iarr, {10,  0xff, 0xff, 0xff, 0xff, 0xff, 0x1f, 0xff,
496                               0xff, 0xc7, 0, 0, 0, 0, 0, 0} },
497         {0, 0x35, 0, F_DELAY_OVERR | FF_DIRECT_IO, NULL, NULL, /* SYNC_CACHE */
498             {10,  0x7, 0xff, 0xff, 0xff, 0xff, 0x1f, 0xff, 0xff, 0xc7, 0, 0,
499              0, 0, 0, 0} },
500         {0, 0x89, 0, F_D_OUT | FF_DIRECT_IO, resp_comp_write, NULL,
501             {16,  0xf8, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0, 0,
502              0, 0xff, 0x1f, 0xc7} },            /* COMPARE AND WRITE */
503
504 /* 30 */
505         {0xff, 0, 0, 0, NULL, NULL,             /* terminating element */
506             {0,  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
507 };
508
509 struct sdebug_scmd_extra_t {
510         bool inj_recovered;
511         bool inj_transport;
512         bool inj_dif;
513         bool inj_dix;
514         bool inj_short;
515 };
516
517 static int scsi_debug_add_host = DEF_NUM_HOST;
518 static int scsi_debug_ato = DEF_ATO;
519 static int scsi_debug_delay = DEF_DELAY;
520 static int scsi_debug_dev_size_mb = DEF_DEV_SIZE_MB;
521 static int scsi_debug_dif = DEF_DIF;
522 static int scsi_debug_dix = DEF_DIX;
523 static int scsi_debug_dsense = DEF_D_SENSE;
524 static int scsi_debug_every_nth = DEF_EVERY_NTH;
525 static int scsi_debug_fake_rw = DEF_FAKE_RW;
526 static unsigned int scsi_debug_guard = DEF_GUARD;
527 static int scsi_debug_lowest_aligned = DEF_LOWEST_ALIGNED;
528 static int scsi_debug_max_luns = DEF_MAX_LUNS;
529 static int scsi_debug_max_queue = SCSI_DEBUG_CANQUEUE;
530 static atomic_t retired_max_queue;      /* if > 0 then was prior max_queue */
531 static int scsi_debug_ndelay = DEF_NDELAY;
532 static int scsi_debug_no_lun_0 = DEF_NO_LUN_0;
533 static int scsi_debug_no_uld = 0;
534 static int scsi_debug_num_parts = DEF_NUM_PARTS;
535 static int scsi_debug_num_tgts = DEF_NUM_TGTS; /* targets per host */
536 static int scsi_debug_opt_blks = DEF_OPT_BLKS;
537 static int scsi_debug_opts = DEF_OPTS;
538 static int scsi_debug_physblk_exp = DEF_PHYSBLK_EXP;
539 static int scsi_debug_ptype = DEF_PTYPE; /* SCSI peripheral type (0==disk) */
540 static int scsi_debug_scsi_level = DEF_SCSI_LEVEL;
541 static int scsi_debug_sector_size = DEF_SECTOR_SIZE;
542 static int scsi_debug_virtual_gb = DEF_VIRTUAL_GB;
543 static int scsi_debug_vpd_use_hostno = DEF_VPD_USE_HOSTNO;
544 static unsigned int scsi_debug_lbpu = DEF_LBPU;
545 static unsigned int scsi_debug_lbpws = DEF_LBPWS;
546 static unsigned int scsi_debug_lbpws10 = DEF_LBPWS10;
547 static unsigned int scsi_debug_lbprz = DEF_LBPRZ;
548 static unsigned int scsi_debug_unmap_alignment = DEF_UNMAP_ALIGNMENT;
549 static unsigned int scsi_debug_unmap_granularity = DEF_UNMAP_GRANULARITY;
550 static unsigned int scsi_debug_unmap_max_blocks = DEF_UNMAP_MAX_BLOCKS;
551 static unsigned int scsi_debug_unmap_max_desc = DEF_UNMAP_MAX_DESC;
552 static unsigned int scsi_debug_write_same_length = DEF_WRITESAME_LENGTH;
553 static bool scsi_debug_removable = DEF_REMOVABLE;
554 static bool scsi_debug_clustering;
555 static bool scsi_debug_host_lock = DEF_HOST_LOCK;
556 static bool scsi_debug_strict = DEF_STRICT;
557 static bool sdebug_any_injecting_opt;
558
559 static atomic_t sdebug_cmnd_count;
560 static atomic_t sdebug_completions;
561 static atomic_t sdebug_a_tsf;           /* counter of 'almost' TSFs */
562
563 #define DEV_READONLY(TGT)      (0)
564
565 static unsigned int sdebug_store_sectors;
566 static sector_t sdebug_capacity;        /* in sectors */
567
568 /* old BIOS stuff, kernel may get rid of them but some mode sense pages
569    may still need them */
570 static int sdebug_heads;                /* heads per disk */
571 static int sdebug_cylinders_per;        /* cylinders per surface */
572 static int sdebug_sectors_per;          /* sectors per cylinder */
573
574 #define SDEBUG_MAX_PARTS 4
575
576 #define SCSI_DEBUG_MAX_CMD_LEN 32
577
578 static unsigned int scsi_debug_lbp(void)
579 {
580         return ((0 == scsi_debug_fake_rw) &&
581                 (scsi_debug_lbpu | scsi_debug_lbpws | scsi_debug_lbpws10));
582 }
583
584 struct sdebug_dev_info {
585         struct list_head dev_list;
586         unsigned int channel;
587         unsigned int target;
588         u64 lun;
589         struct sdebug_host_info *sdbg_host;
590         unsigned long uas_bm[1];
591         atomic_t num_in_q;
592         char stopped;           /* TODO: should be atomic */
593         bool used;
594 };
595
596 struct sdebug_host_info {
597         struct list_head host_list;
598         struct Scsi_Host *shost;
599         struct device dev;
600         struct list_head dev_info_list;
601 };
602
603 #define to_sdebug_host(d)       \
604         container_of(d, struct sdebug_host_info, dev)
605
606 static LIST_HEAD(sdebug_host_list);
607 static DEFINE_SPINLOCK(sdebug_host_list_lock);
608
609
610 struct sdebug_hrtimer {         /* ... is derived from hrtimer */
611         struct hrtimer hrt;     /* must be first element */
612         int qa_indx;
613 };
614
615 struct sdebug_queued_cmd {
616         /* in_use flagged by a bit in queued_in_use_bm[] */
617         struct timer_list *cmnd_timerp;
618         struct tasklet_struct *tletp;
619         struct sdebug_hrtimer *sd_hrtp;
620         struct scsi_cmnd * a_cmnd;
621 };
622 static struct sdebug_queued_cmd queued_arr[SCSI_DEBUG_CANQUEUE];
623 static unsigned long queued_in_use_bm[SCSI_DEBUG_CANQUEUE_WORDS];
624
625
626 static unsigned char * fake_storep;     /* ramdisk storage */
627 static struct sd_dif_tuple *dif_storep; /* protection info */
628 static void *map_storep;                /* provisioning map */
629
630 static unsigned long map_size;
631 static int num_aborts;
632 static int num_dev_resets;
633 static int num_target_resets;
634 static int num_bus_resets;
635 static int num_host_resets;
636 static int dix_writes;
637 static int dix_reads;
638 static int dif_errors;
639
640 static DEFINE_SPINLOCK(queued_arr_lock);
641 static DEFINE_RWLOCK(atomic_rw);
642
643 static char sdebug_proc_name[] = MY_NAME;
644 static const char *my_name = MY_NAME;
645
646 static struct bus_type pseudo_lld_bus;
647
648 static struct device_driver sdebug_driverfs_driver = {
649         .name           = sdebug_proc_name,
650         .bus            = &pseudo_lld_bus,
651 };
652
653 static const int check_condition_result =
654                 (DRIVER_SENSE << 24) | SAM_STAT_CHECK_CONDITION;
655
656 static const int illegal_condition_result =
657         (DRIVER_SENSE << 24) | (DID_ABORT << 16) | SAM_STAT_CHECK_CONDITION;
658
659 static const int device_qfull_result =
660         (DID_OK << 16) | (COMMAND_COMPLETE << 8) | SAM_STAT_TASK_SET_FULL;
661
662 static unsigned char caching_pg[] = {0x8, 18, 0x14, 0, 0xff, 0xff, 0, 0,
663                                      0xff, 0xff, 0xff, 0xff, 0x80, 0x14, 0, 0,
664                                      0, 0, 0, 0};
665 static unsigned char ctrl_m_pg[] = {0xa, 10, 2, 0, 0, 0, 0, 0,
666                                     0, 0, 0x2, 0x4b};
667 static unsigned char iec_m_pg[] = {0x1c, 0xa, 0x08, 0, 0, 0, 0, 0,
668                                    0, 0, 0x0, 0x0};
669
670 static void *fake_store(unsigned long long lba)
671 {
672         lba = do_div(lba, sdebug_store_sectors);
673
674         return fake_storep + lba * scsi_debug_sector_size;
675 }
676
677 static struct sd_dif_tuple *dif_store(sector_t sector)
678 {
679         sector = do_div(sector, sdebug_store_sectors);
680
681         return dif_storep + sector;
682 }
683
684 static int sdebug_add_adapter(void);
685 static void sdebug_remove_adapter(void);
686
687 static void sdebug_max_tgts_luns(void)
688 {
689         struct sdebug_host_info *sdbg_host;
690         struct Scsi_Host *hpnt;
691
692         spin_lock(&sdebug_host_list_lock);
693         list_for_each_entry(sdbg_host, &sdebug_host_list, host_list) {
694                 hpnt = sdbg_host->shost;
695                 if ((hpnt->this_id >= 0) &&
696                     (scsi_debug_num_tgts > hpnt->this_id))
697                         hpnt->max_id = scsi_debug_num_tgts + 1;
698                 else
699                         hpnt->max_id = scsi_debug_num_tgts;
700                 /* scsi_debug_max_luns; */
701                 hpnt->max_lun = SAM2_WLUN_REPORT_LUNS;
702         }
703         spin_unlock(&sdebug_host_list_lock);
704 }
705
706 enum sdeb_cmd_data {SDEB_IN_DATA = 0, SDEB_IN_CDB = 1};
707
708 /* Set in_bit to -1 to indicate no bit position of invalid field */
709 static void
710 mk_sense_invalid_fld(struct scsi_cmnd *scp, enum sdeb_cmd_data c_d,
711                      int in_byte, int in_bit)
712 {
713         unsigned char *sbuff;
714         u8 sks[4];
715         int sl, asc;
716
717         sbuff = scp->sense_buffer;
718         if (!sbuff) {
719                 sdev_printk(KERN_ERR, scp->device,
720                             "%s: sense_buffer is NULL\n", __func__);
721                 return;
722         }
723         asc = c_d ? INVALID_FIELD_IN_CDB : INVALID_FIELD_IN_PARAM_LIST;
724         memset(sbuff, 0, SCSI_SENSE_BUFFERSIZE);
725         scsi_build_sense_buffer(scsi_debug_dsense, sbuff, ILLEGAL_REQUEST,
726                                 asc, 0);
727         memset(sks, 0, sizeof(sks));
728         sks[0] = 0x80;
729         if (c_d)
730                 sks[0] |= 0x40;
731         if (in_bit >= 0) {
732                 sks[0] |= 0x8;
733                 sks[0] |= 0x7 & in_bit;
734         }
735         put_unaligned_be16(in_byte, sks + 1);
736         if (scsi_debug_dsense) {
737                 sl = sbuff[7] + 8;
738                 sbuff[7] = sl;
739                 sbuff[sl] = 0x2;
740                 sbuff[sl + 1] = 0x6;
741                 memcpy(sbuff + sl + 4, sks, 3);
742         } else
743                 memcpy(sbuff + 15, sks, 3);
744         if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
745                 sdev_printk(KERN_INFO, scp->device, "%s:  [sense_key,asc,ascq"
746                             "]: [0x5,0x%x,0x0] %c byte=%d, bit=%d\n",
747                             my_name, asc, c_d ? 'C' : 'D', in_byte, in_bit);
748 }
749
750 static void mk_sense_buffer(struct scsi_cmnd *scp, int key, int asc, int asq)
751 {
752         unsigned char *sbuff;
753
754         sbuff = scp->sense_buffer;
755         if (!sbuff) {
756                 sdev_printk(KERN_ERR, scp->device,
757                             "%s: sense_buffer is NULL\n", __func__);
758                 return;
759         }
760         memset(sbuff, 0, SCSI_SENSE_BUFFERSIZE);
761
762         scsi_build_sense_buffer(scsi_debug_dsense, sbuff, key, asc, asq);
763
764         if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
765                 sdev_printk(KERN_INFO, scp->device,
766                             "%s:  [sense_key,asc,ascq]: [0x%x,0x%x,0x%x]\n",
767                             my_name, key, asc, asq);
768 }
769
770 static void
771 mk_sense_invalid_opcode(struct scsi_cmnd *scp)
772 {
773         mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_OPCODE, 0);
774 }
775
776 static int scsi_debug_ioctl(struct scsi_device *dev, int cmd, void __user *arg)
777 {
778         if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts) {
779                 if (0x1261 == cmd)
780                         sdev_printk(KERN_INFO, dev,
781                                     "%s: BLKFLSBUF [0x1261]\n", __func__);
782                 else if (0x5331 == cmd)
783                         sdev_printk(KERN_INFO, dev,
784                                     "%s: CDROM_GET_CAPABILITY [0x5331]\n",
785                                     __func__);
786                 else
787                         sdev_printk(KERN_INFO, dev, "%s: cmd=0x%x\n",
788                                     __func__, cmd);
789         }
790         return -EINVAL;
791         /* return -ENOTTY; // correct return but upsets fdisk */
792 }
793
794 static void clear_luns_changed_on_target(struct sdebug_dev_info *devip)
795 {
796         struct sdebug_host_info *sdhp;
797         struct sdebug_dev_info *dp;
798
799         spin_lock(&sdebug_host_list_lock);
800         list_for_each_entry(sdhp, &sdebug_host_list, host_list) {
801                 list_for_each_entry(dp, &sdhp->dev_info_list, dev_list) {
802                         if ((devip->sdbg_host == dp->sdbg_host) &&
803                             (devip->target == dp->target))
804                                 clear_bit(SDEBUG_UA_LUNS_CHANGED, dp->uas_bm);
805                 }
806         }
807         spin_unlock(&sdebug_host_list_lock);
808 }
809
810 static int check_readiness(struct scsi_cmnd *SCpnt, int uas_only,
811                            struct sdebug_dev_info * devip)
812 {
813         int k;
814         bool debug = !!(SCSI_DEBUG_OPT_NOISE & scsi_debug_opts);
815
816         k = find_first_bit(devip->uas_bm, SDEBUG_NUM_UAS);
817         if (k != SDEBUG_NUM_UAS) {
818                 const char *cp = NULL;
819
820                 switch (k) {
821                 case SDEBUG_UA_POR:
822                         mk_sense_buffer(SCpnt, UNIT_ATTENTION,
823                                         UA_RESET_ASC, POWER_ON_RESET_ASCQ);
824                         if (debug)
825                                 cp = "power on reset";
826                         break;
827                 case SDEBUG_UA_BUS_RESET:
828                         mk_sense_buffer(SCpnt, UNIT_ATTENTION,
829                                         UA_RESET_ASC, BUS_RESET_ASCQ);
830                         if (debug)
831                                 cp = "bus reset";
832                         break;
833                 case SDEBUG_UA_MODE_CHANGED:
834                         mk_sense_buffer(SCpnt, UNIT_ATTENTION,
835                                         UA_CHANGED_ASC, MODE_CHANGED_ASCQ);
836                         if (debug)
837                                 cp = "mode parameters changed";
838                         break;
839                 case SDEBUG_UA_CAPACITY_CHANGED:
840                         mk_sense_buffer(SCpnt, UNIT_ATTENTION,
841                                         UA_CHANGED_ASC, CAPACITY_CHANGED_ASCQ);
842                         if (debug)
843                                 cp = "capacity data changed";
844                         break;
845                 case SDEBUG_UA_MICROCODE_CHANGED:
846                         mk_sense_buffer(SCpnt, UNIT_ATTENTION,
847                                  TARGET_CHANGED_ASC, MICROCODE_CHANGED_ASCQ);
848                         if (debug)
849                                 cp = "microcode has been changed";
850                         break;
851                 case SDEBUG_UA_MICROCODE_CHANGED_WO_RESET:
852                         mk_sense_buffer(SCpnt, UNIT_ATTENTION,
853                                         TARGET_CHANGED_ASC,
854                                         MICROCODE_CHANGED_WO_RESET_ASCQ);
855                         if (debug)
856                                 cp = "microcode has been changed without reset";
857                         break;
858                 case SDEBUG_UA_LUNS_CHANGED:
859                         /*
860                          * SPC-3 behavior is to report a UNIT ATTENTION with
861                          * ASC/ASCQ REPORTED LUNS DATA HAS CHANGED on every LUN
862                          * on the target, until a REPORT LUNS command is
863                          * received.  SPC-4 behavior is to report it only once.
864                          * NOTE:  scsi_debug_scsi_level does not use the same
865                          * values as struct scsi_device->scsi_level.
866                          */
867                         if (scsi_debug_scsi_level >= 6) /* SPC-4 and above */
868                                 clear_luns_changed_on_target(devip);
869                         mk_sense_buffer(SCpnt, UNIT_ATTENTION,
870                                         TARGET_CHANGED_ASC,
871                                         LUNS_CHANGED_ASCQ);
872                         if (debug)
873                                 cp = "reported luns data has changed";
874                         break;
875                 default:
876                         pr_warn("%s: unexpected unit attention code=%d\n",
877                                 __func__, k);
878                         if (debug)
879                                 cp = "unknown";
880                         break;
881                 }
882                 clear_bit(k, devip->uas_bm);
883                 if (debug)
884                         sdev_printk(KERN_INFO, SCpnt->device,
885                                    "%s reports: Unit attention: %s\n",
886                                    my_name, cp);
887                 return check_condition_result;
888         }
889         if ((UAS_TUR == uas_only) && devip->stopped) {
890                 mk_sense_buffer(SCpnt, NOT_READY, LOGICAL_UNIT_NOT_READY,
891                                 0x2);
892                 if (debug)
893                         sdev_printk(KERN_INFO, SCpnt->device,
894                                     "%s reports: Not ready: %s\n", my_name,
895                                     "initializing command required");
896                 return check_condition_result;
897         }
898         return 0;
899 }
900
901 /* Returns 0 if ok else (DID_ERROR << 16). Sets scp->resid . */
902 static int fill_from_dev_buffer(struct scsi_cmnd *scp, unsigned char *arr,
903                                 int arr_len)
904 {
905         int act_len;
906         struct scsi_data_buffer *sdb = scsi_in(scp);
907
908         if (!sdb->length)
909                 return 0;
910         if (!(scsi_bidi_cmnd(scp) || scp->sc_data_direction == DMA_FROM_DEVICE))
911                 return (DID_ERROR << 16);
912
913         act_len = sg_copy_from_buffer(sdb->table.sgl, sdb->table.nents,
914                                       arr, arr_len);
915         sdb->resid = scsi_bufflen(scp) - act_len;
916
917         return 0;
918 }
919
920 /* Returns number of bytes fetched into 'arr' or -1 if error. */
921 static int fetch_to_dev_buffer(struct scsi_cmnd *scp, unsigned char *arr,
922                                int arr_len)
923 {
924         if (!scsi_bufflen(scp))
925                 return 0;
926         if (!(scsi_bidi_cmnd(scp) || scp->sc_data_direction == DMA_TO_DEVICE))
927                 return -1;
928
929         return scsi_sg_copy_to_buffer(scp, arr, arr_len);
930 }
931
932
933 static const char * inq_vendor_id = "Linux   ";
934 static const char * inq_product_id = "scsi_debug      ";
935 static const char *inq_product_rev = "0184";    /* version less '.' */
936
937 /* Device identification VPD page. Returns number of bytes placed in arr */
938 static int inquiry_evpd_83(unsigned char * arr, int port_group_id,
939                            int target_dev_id, int dev_id_num,
940                            const char * dev_id_str,
941                            int dev_id_str_len)
942 {
943         int num, port_a;
944         char b[32];
945
946         port_a = target_dev_id + 1;
947         /* T10 vendor identifier field format (faked) */
948         arr[0] = 0x2;   /* ASCII */
949         arr[1] = 0x1;
950         arr[2] = 0x0;
951         memcpy(&arr[4], inq_vendor_id, 8);
952         memcpy(&arr[12], inq_product_id, 16);
953         memcpy(&arr[28], dev_id_str, dev_id_str_len);
954         num = 8 + 16 + dev_id_str_len;
955         arr[3] = num;
956         num += 4;
957         if (dev_id_num >= 0) {
958                 /* NAA-5, Logical unit identifier (binary) */
959                 arr[num++] = 0x1;       /* binary (not necessarily sas) */
960                 arr[num++] = 0x3;       /* PIV=0, lu, naa */
961                 arr[num++] = 0x0;
962                 arr[num++] = 0x8;
963                 arr[num++] = 0x53;  /* naa-5 ieee company id=0x333333 (fake) */
964                 arr[num++] = 0x33;
965                 arr[num++] = 0x33;
966                 arr[num++] = 0x30;
967                 arr[num++] = (dev_id_num >> 24);
968                 arr[num++] = (dev_id_num >> 16) & 0xff;
969                 arr[num++] = (dev_id_num >> 8) & 0xff;
970                 arr[num++] = dev_id_num & 0xff;
971                 /* Target relative port number */
972                 arr[num++] = 0x61;      /* proto=sas, binary */
973                 arr[num++] = 0x94;      /* PIV=1, target port, rel port */
974                 arr[num++] = 0x0;       /* reserved */
975                 arr[num++] = 0x4;       /* length */
976                 arr[num++] = 0x0;       /* reserved */
977                 arr[num++] = 0x0;       /* reserved */
978                 arr[num++] = 0x0;
979                 arr[num++] = 0x1;       /* relative port A */
980         }
981         /* NAA-5, Target port identifier */
982         arr[num++] = 0x61;      /* proto=sas, binary */
983         arr[num++] = 0x93;      /* piv=1, target port, naa */
984         arr[num++] = 0x0;
985         arr[num++] = 0x8;
986         arr[num++] = 0x52;      /* naa-5, company id=0x222222 (fake) */
987         arr[num++] = 0x22;
988         arr[num++] = 0x22;
989         arr[num++] = 0x20;
990         arr[num++] = (port_a >> 24);
991         arr[num++] = (port_a >> 16) & 0xff;
992         arr[num++] = (port_a >> 8) & 0xff;
993         arr[num++] = port_a & 0xff;
994         /* NAA-5, Target port group identifier */
995         arr[num++] = 0x61;      /* proto=sas, binary */
996         arr[num++] = 0x95;      /* piv=1, target port group id */
997         arr[num++] = 0x0;
998         arr[num++] = 0x4;
999         arr[num++] = 0;
1000         arr[num++] = 0;
1001         arr[num++] = (port_group_id >> 8) & 0xff;
1002         arr[num++] = port_group_id & 0xff;
1003         /* NAA-5, Target device identifier */
1004         arr[num++] = 0x61;      /* proto=sas, binary */
1005         arr[num++] = 0xa3;      /* piv=1, target device, naa */
1006         arr[num++] = 0x0;
1007         arr[num++] = 0x8;
1008         arr[num++] = 0x52;      /* naa-5, company id=0x222222 (fake) */
1009         arr[num++] = 0x22;
1010         arr[num++] = 0x22;
1011         arr[num++] = 0x20;
1012         arr[num++] = (target_dev_id >> 24);
1013         arr[num++] = (target_dev_id >> 16) & 0xff;
1014         arr[num++] = (target_dev_id >> 8) & 0xff;
1015         arr[num++] = target_dev_id & 0xff;
1016         /* SCSI name string: Target device identifier */
1017         arr[num++] = 0x63;      /* proto=sas, UTF-8 */
1018         arr[num++] = 0xa8;      /* piv=1, target device, SCSI name string */
1019         arr[num++] = 0x0;
1020         arr[num++] = 24;
1021         memcpy(arr + num, "naa.52222220", 12);
1022         num += 12;
1023         snprintf(b, sizeof(b), "%08X", target_dev_id);
1024         memcpy(arr + num, b, 8);
1025         num += 8;
1026         memset(arr + num, 0, 4);
1027         num += 4;
1028         return num;
1029 }
1030
1031
1032 static unsigned char vpd84_data[] = {
1033 /* from 4th byte */ 0x22,0x22,0x22,0x0,0xbb,0x0,
1034     0x22,0x22,0x22,0x0,0xbb,0x1,
1035     0x22,0x22,0x22,0x0,0xbb,0x2,
1036 };
1037
1038 /*  Software interface identification VPD page */
1039 static int inquiry_evpd_84(unsigned char * arr)
1040 {
1041         memcpy(arr, vpd84_data, sizeof(vpd84_data));
1042         return sizeof(vpd84_data);
1043 }
1044
1045 /* Management network addresses VPD page */
1046 static int inquiry_evpd_85(unsigned char * arr)
1047 {
1048         int num = 0;
1049         const char * na1 = "https://www.kernel.org/config";
1050         const char * na2 = "http://www.kernel.org/log";
1051         int plen, olen;
1052
1053         arr[num++] = 0x1;       /* lu, storage config */
1054         arr[num++] = 0x0;       /* reserved */
1055         arr[num++] = 0x0;
1056         olen = strlen(na1);
1057         plen = olen + 1;
1058         if (plen % 4)
1059                 plen = ((plen / 4) + 1) * 4;
1060         arr[num++] = plen;      /* length, null termianted, padded */
1061         memcpy(arr + num, na1, olen);
1062         memset(arr + num + olen, 0, plen - olen);
1063         num += plen;
1064
1065         arr[num++] = 0x4;       /* lu, logging */
1066         arr[num++] = 0x0;       /* reserved */
1067         arr[num++] = 0x0;
1068         olen = strlen(na2);
1069         plen = olen + 1;
1070         if (plen % 4)
1071                 plen = ((plen / 4) + 1) * 4;
1072         arr[num++] = plen;      /* length, null terminated, padded */
1073         memcpy(arr + num, na2, olen);
1074         memset(arr + num + olen, 0, plen - olen);
1075         num += plen;
1076
1077         return num;
1078 }
1079
1080 /* SCSI ports VPD page */
1081 static int inquiry_evpd_88(unsigned char * arr, int target_dev_id)
1082 {
1083         int num = 0;
1084         int port_a, port_b;
1085
1086         port_a = target_dev_id + 1;
1087         port_b = port_a + 1;
1088         arr[num++] = 0x0;       /* reserved */
1089         arr[num++] = 0x0;       /* reserved */
1090         arr[num++] = 0x0;
1091         arr[num++] = 0x1;       /* relative port 1 (primary) */
1092         memset(arr + num, 0, 6);
1093         num += 6;
1094         arr[num++] = 0x0;
1095         arr[num++] = 12;        /* length tp descriptor */
1096         /* naa-5 target port identifier (A) */
1097         arr[num++] = 0x61;      /* proto=sas, binary */
1098         arr[num++] = 0x93;      /* PIV=1, target port, NAA */
1099         arr[num++] = 0x0;       /* reserved */
1100         arr[num++] = 0x8;       /* length */
1101         arr[num++] = 0x52;      /* NAA-5, company_id=0x222222 (fake) */
1102         arr[num++] = 0x22;
1103         arr[num++] = 0x22;
1104         arr[num++] = 0x20;
1105         arr[num++] = (port_a >> 24);
1106         arr[num++] = (port_a >> 16) & 0xff;
1107         arr[num++] = (port_a >> 8) & 0xff;
1108         arr[num++] = port_a & 0xff;
1109
1110         arr[num++] = 0x0;       /* reserved */
1111         arr[num++] = 0x0;       /* reserved */
1112         arr[num++] = 0x0;
1113         arr[num++] = 0x2;       /* relative port 2 (secondary) */
1114         memset(arr + num, 0, 6);
1115         num += 6;
1116         arr[num++] = 0x0;
1117         arr[num++] = 12;        /* length tp descriptor */
1118         /* naa-5 target port identifier (B) */
1119         arr[num++] = 0x61;      /* proto=sas, binary */
1120         arr[num++] = 0x93;      /* PIV=1, target port, NAA */
1121         arr[num++] = 0x0;       /* reserved */
1122         arr[num++] = 0x8;       /* length */
1123         arr[num++] = 0x52;      /* NAA-5, company_id=0x222222 (fake) */
1124         arr[num++] = 0x22;
1125         arr[num++] = 0x22;
1126         arr[num++] = 0x20;
1127         arr[num++] = (port_b >> 24);
1128         arr[num++] = (port_b >> 16) & 0xff;
1129         arr[num++] = (port_b >> 8) & 0xff;
1130         arr[num++] = port_b & 0xff;
1131
1132         return num;
1133 }
1134
1135
1136 static unsigned char vpd89_data[] = {
1137 /* from 4th byte */ 0,0,0,0,
1138 'l','i','n','u','x',' ',' ',' ',
1139 'S','A','T',' ','s','c','s','i','_','d','e','b','u','g',' ',' ',
1140 '1','2','3','4',
1141 0x34,0,0,0,1,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,
1142 0xec,0,0,0,
1143 0x5a,0xc,0xff,0x3f,0x37,0xc8,0x10,0,0,0,0,0,0x3f,0,0,0,
1144 0,0,0,0,0x58,0x58,0x58,0x58,0x58,0x58,0x58,0x58,0x20,0x20,0x20,0x20,
1145 0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0,0,0,0x40,0x4,0,0x2e,0x33,
1146 0x38,0x31,0x20,0x20,0x20,0x20,0x54,0x53,0x38,0x33,0x30,0x30,0x33,0x31,
1147 0x53,0x41,
1148 0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,
1149 0x20,0x20,
1150 0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,
1151 0x10,0x80,
1152 0,0,0,0x2f,0,0,0,0x2,0,0x2,0x7,0,0xff,0xff,0x1,0,
1153 0x3f,0,0xc1,0xff,0x3e,0,0x10,0x1,0xb0,0xf8,0x50,0x9,0,0,0x7,0,
1154 0x3,0,0x78,0,0x78,0,0xf0,0,0x78,0,0,0,0,0,0,0,
1155 0,0,0,0,0,0,0,0,0x2,0,0,0,0,0,0,0,
1156 0x7e,0,0x1b,0,0x6b,0x34,0x1,0x7d,0x3,0x40,0x69,0x34,0x1,0x3c,0x3,0x40,
1157 0x7f,0x40,0,0,0,0,0xfe,0xfe,0,0,0,0,0,0xfe,0,0,
1158 0,0,0,0,0,0,0,0,0xb0,0xf8,0x50,0x9,0,0,0,0,
1159 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1160 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1161 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1162 0x1,0,0xb0,0xf8,0x50,0x9,0xb0,0xf8,0x50,0x9,0x20,0x20,0x2,0,0xb6,0x42,
1163 0,0x80,0x8a,0,0x6,0x3c,0xa,0x3c,0xff,0xff,0xc6,0x7,0,0x1,0,0x8,
1164 0xf0,0xf,0,0x10,0x2,0,0x30,0,0,0,0,0,0,0,0x6,0xfe,
1165 0,0,0x2,0,0x50,0,0x8a,0,0x4f,0x95,0,0,0x21,0,0xb,0,
1166 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1167 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1168 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1169 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1170 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1171 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1172 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1173 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1174 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1175 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1176 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1177 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0xa5,0x51,
1178 };
1179
1180 /* ATA Information VPD page */
1181 static int inquiry_evpd_89(unsigned char * arr)
1182 {
1183         memcpy(arr, vpd89_data, sizeof(vpd89_data));
1184         return sizeof(vpd89_data);
1185 }
1186
1187
1188 static unsigned char vpdb0_data[] = {
1189         /* from 4th byte */ 0,0,0,4, 0,0,0x4,0, 0,0,0,64,
1190         0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1191         0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1192         0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1193 };
1194
1195 /* Block limits VPD page (SBC-3) */
1196 static int inquiry_evpd_b0(unsigned char * arr)
1197 {
1198         unsigned int gran;
1199
1200         memcpy(arr, vpdb0_data, sizeof(vpdb0_data));
1201
1202         /* Optimal transfer length granularity */
1203         gran = 1 << scsi_debug_physblk_exp;
1204         arr[2] = (gran >> 8) & 0xff;
1205         arr[3] = gran & 0xff;
1206
1207         /* Maximum Transfer Length */
1208         if (sdebug_store_sectors > 0x400) {
1209                 arr[4] = (sdebug_store_sectors >> 24) & 0xff;
1210                 arr[5] = (sdebug_store_sectors >> 16) & 0xff;
1211                 arr[6] = (sdebug_store_sectors >> 8) & 0xff;
1212                 arr[7] = sdebug_store_sectors & 0xff;
1213         }
1214
1215         /* Optimal Transfer Length */
1216         put_unaligned_be32(scsi_debug_opt_blks, &arr[8]);
1217
1218         if (scsi_debug_lbpu) {
1219                 /* Maximum Unmap LBA Count */
1220                 put_unaligned_be32(scsi_debug_unmap_max_blocks, &arr[16]);
1221
1222                 /* Maximum Unmap Block Descriptor Count */
1223                 put_unaligned_be32(scsi_debug_unmap_max_desc, &arr[20]);
1224         }
1225
1226         /* Unmap Granularity Alignment */
1227         if (scsi_debug_unmap_alignment) {
1228                 put_unaligned_be32(scsi_debug_unmap_alignment, &arr[28]);
1229                 arr[28] |= 0x80; /* UGAVALID */
1230         }
1231
1232         /* Optimal Unmap Granularity */
1233         put_unaligned_be32(scsi_debug_unmap_granularity, &arr[24]);
1234
1235         /* Maximum WRITE SAME Length */
1236         put_unaligned_be64(scsi_debug_write_same_length, &arr[32]);
1237
1238         return 0x3c; /* Mandatory page length for Logical Block Provisioning */
1239
1240         return sizeof(vpdb0_data);
1241 }
1242
1243 /* Block device characteristics VPD page (SBC-3) */
1244 static int inquiry_evpd_b1(unsigned char *arr)
1245 {
1246         memset(arr, 0, 0x3c);
1247         arr[0] = 0;
1248         arr[1] = 1;     /* non rotating medium (e.g. solid state) */
1249         arr[2] = 0;
1250         arr[3] = 5;     /* less than 1.8" */
1251
1252         return 0x3c;
1253 }
1254
1255 /* Logical block provisioning VPD page (SBC-3) */
1256 static int inquiry_evpd_b2(unsigned char *arr)
1257 {
1258         memset(arr, 0, 0x4);
1259         arr[0] = 0;                     /* threshold exponent */
1260
1261         if (scsi_debug_lbpu)
1262                 arr[1] = 1 << 7;
1263
1264         if (scsi_debug_lbpws)
1265                 arr[1] |= 1 << 6;
1266
1267         if (scsi_debug_lbpws10)
1268                 arr[1] |= 1 << 5;
1269
1270         if (scsi_debug_lbprz)
1271                 arr[1] |= 1 << 2;
1272
1273         return 0x4;
1274 }
1275
1276 #define SDEBUG_LONG_INQ_SZ 96
1277 #define SDEBUG_MAX_INQ_ARR_SZ 584
1278
1279 static int resp_inquiry(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
1280 {
1281         unsigned char pq_pdt;
1282         unsigned char * arr;
1283         unsigned char *cmd = scp->cmnd;
1284         int alloc_len, n, ret;
1285         bool have_wlun;
1286
1287         alloc_len = (cmd[3] << 8) + cmd[4];
1288         arr = kzalloc(SDEBUG_MAX_INQ_ARR_SZ, GFP_ATOMIC);
1289         if (! arr)
1290                 return DID_REQUEUE << 16;
1291         have_wlun = (scp->device->lun == SAM2_WLUN_REPORT_LUNS);
1292         if (have_wlun)
1293                 pq_pdt = 0x1e;  /* present, wlun */
1294         else if (scsi_debug_no_lun_0 && (0 == devip->lun))
1295                 pq_pdt = 0x7f;  /* not present, no device type */
1296         else
1297                 pq_pdt = (scsi_debug_ptype & 0x1f);
1298         arr[0] = pq_pdt;
1299         if (0x2 & cmd[1]) {  /* CMDDT bit set */
1300                 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 1);
1301                 kfree(arr);
1302                 return check_condition_result;
1303         } else if (0x1 & cmd[1]) {  /* EVPD bit set */
1304                 int lu_id_num, port_group_id, target_dev_id, len;
1305                 char lu_id_str[6];
1306                 int host_no = devip->sdbg_host->shost->host_no;
1307                 
1308                 port_group_id = (((host_no + 1) & 0x7f) << 8) +
1309                     (devip->channel & 0x7f);
1310                 if (0 == scsi_debug_vpd_use_hostno)
1311                         host_no = 0;
1312                 lu_id_num = have_wlun ? -1 : (((host_no + 1) * 2000) +
1313                             (devip->target * 1000) + devip->lun);
1314                 target_dev_id = ((host_no + 1) * 2000) +
1315                                  (devip->target * 1000) - 3;
1316                 len = scnprintf(lu_id_str, 6, "%d", lu_id_num);
1317                 if (0 == cmd[2]) { /* supported vital product data pages */
1318                         arr[1] = cmd[2];        /*sanity */
1319                         n = 4;
1320                         arr[n++] = 0x0;   /* this page */
1321                         arr[n++] = 0x80;  /* unit serial number */
1322                         arr[n++] = 0x83;  /* device identification */
1323                         arr[n++] = 0x84;  /* software interface ident. */
1324                         arr[n++] = 0x85;  /* management network addresses */
1325                         arr[n++] = 0x86;  /* extended inquiry */
1326                         arr[n++] = 0x87;  /* mode page policy */
1327                         arr[n++] = 0x88;  /* SCSI ports */
1328                         arr[n++] = 0x89;  /* ATA information */
1329                         arr[n++] = 0xb0;  /* Block limits (SBC) */
1330                         arr[n++] = 0xb1;  /* Block characteristics (SBC) */
1331                         if (scsi_debug_lbp()) /* Logical Block Prov. (SBC) */
1332                                 arr[n++] = 0xb2;
1333                         arr[3] = n - 4;   /* number of supported VPD pages */
1334                 } else if (0x80 == cmd[2]) { /* unit serial number */
1335                         arr[1] = cmd[2];        /*sanity */
1336                         arr[3] = len;
1337                         memcpy(&arr[4], lu_id_str, len);
1338                 } else if (0x83 == cmd[2]) { /* device identification */
1339                         arr[1] = cmd[2];        /*sanity */
1340                         arr[3] = inquiry_evpd_83(&arr[4], port_group_id,
1341                                                  target_dev_id, lu_id_num,
1342                                                  lu_id_str, len);
1343                 } else if (0x84 == cmd[2]) { /* Software interface ident. */
1344                         arr[1] = cmd[2];        /*sanity */
1345                         arr[3] = inquiry_evpd_84(&arr[4]);
1346                 } else if (0x85 == cmd[2]) { /* Management network addresses */
1347                         arr[1] = cmd[2];        /*sanity */
1348                         arr[3] = inquiry_evpd_85(&arr[4]);
1349                 } else if (0x86 == cmd[2]) { /* extended inquiry */
1350                         arr[1] = cmd[2];        /*sanity */
1351                         arr[3] = 0x3c;  /* number of following entries */
1352                         if (scsi_debug_dif == SD_DIF_TYPE3_PROTECTION)
1353                                 arr[4] = 0x4;   /* SPT: GRD_CHK:1 */
1354                         else if (scsi_debug_dif)
1355                                 arr[4] = 0x5;   /* SPT: GRD_CHK:1, REF_CHK:1 */
1356                         else
1357                                 arr[4] = 0x0;   /* no protection stuff */
1358                         arr[5] = 0x7;   /* head of q, ordered + simple q's */
1359                 } else if (0x87 == cmd[2]) { /* mode page policy */
1360                         arr[1] = cmd[2];        /*sanity */
1361                         arr[3] = 0x8;   /* number of following entries */
1362                         arr[4] = 0x2;   /* disconnect-reconnect mp */
1363                         arr[6] = 0x80;  /* mlus, shared */
1364                         arr[8] = 0x18;   /* protocol specific lu */
1365                         arr[10] = 0x82;  /* mlus, per initiator port */
1366                 } else if (0x88 == cmd[2]) { /* SCSI Ports */
1367                         arr[1] = cmd[2];        /*sanity */
1368                         arr[3] = inquiry_evpd_88(&arr[4], target_dev_id);
1369                 } else if (0x89 == cmd[2]) { /* ATA information */
1370                         arr[1] = cmd[2];        /*sanity */
1371                         n = inquiry_evpd_89(&arr[4]);
1372                         arr[2] = (n >> 8);
1373                         arr[3] = (n & 0xff);
1374                 } else if (0xb0 == cmd[2]) { /* Block limits (SBC) */
1375                         arr[1] = cmd[2];        /*sanity */
1376                         arr[3] = inquiry_evpd_b0(&arr[4]);
1377                 } else if (0xb1 == cmd[2]) { /* Block characteristics (SBC) */
1378                         arr[1] = cmd[2];        /*sanity */
1379                         arr[3] = inquiry_evpd_b1(&arr[4]);
1380                 } else if (0xb2 == cmd[2]) { /* Logical Block Prov. (SBC) */
1381                         arr[1] = cmd[2];        /*sanity */
1382                         arr[3] = inquiry_evpd_b2(&arr[4]);
1383                 } else {
1384                         mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, -1);
1385                         kfree(arr);
1386                         return check_condition_result;
1387                 }
1388                 len = min(((arr[2] << 8) + arr[3]) + 4, alloc_len);
1389                 ret = fill_from_dev_buffer(scp, arr,
1390                             min(len, SDEBUG_MAX_INQ_ARR_SZ));
1391                 kfree(arr);
1392                 return ret;
1393         }
1394         /* drops through here for a standard inquiry */
1395         arr[1] = scsi_debug_removable ? 0x80 : 0;       /* Removable disk */
1396         arr[2] = scsi_debug_scsi_level;
1397         arr[3] = 2;    /* response_data_format==2 */
1398         arr[4] = SDEBUG_LONG_INQ_SZ - 5;
1399         arr[5] = scsi_debug_dif ? 1 : 0; /* PROTECT bit */
1400         if (0 == scsi_debug_vpd_use_hostno)
1401                 arr[5] = 0x10; /* claim: implicit TGPS */
1402         arr[6] = 0x10; /* claim: MultiP */
1403         /* arr[6] |= 0x40; ... claim: EncServ (enclosure services) */
1404         arr[7] = 0xa; /* claim: LINKED + CMDQUE */
1405         memcpy(&arr[8], inq_vendor_id, 8);
1406         memcpy(&arr[16], inq_product_id, 16);
1407         memcpy(&arr[32], inq_product_rev, 4);
1408         /* version descriptors (2 bytes each) follow */
1409         arr[58] = 0x0; arr[59] = 0xa2;  /* SAM-5 rev 4 */
1410         arr[60] = 0x4; arr[61] = 0x68;  /* SPC-4 rev 37 */
1411         n = 62;
1412         if (scsi_debug_ptype == 0) {
1413                 arr[n++] = 0x4; arr[n++] = 0xc5; /* SBC-4 rev 36 */
1414         } else if (scsi_debug_ptype == 1) {
1415                 arr[n++] = 0x5; arr[n++] = 0x25; /* SSC-4 rev 3 */
1416         }
1417         arr[n++] = 0x20; arr[n++] = 0xe6;  /* SPL-3 rev 7 */
1418         ret = fill_from_dev_buffer(scp, arr,
1419                             min(alloc_len, SDEBUG_LONG_INQ_SZ));
1420         kfree(arr);
1421         return ret;
1422 }
1423
1424 static int resp_requests(struct scsi_cmnd * scp,
1425                          struct sdebug_dev_info * devip)
1426 {
1427         unsigned char * sbuff;
1428         unsigned char *cmd = scp->cmnd;
1429         unsigned char arr[SCSI_SENSE_BUFFERSIZE];
1430         bool dsense, want_dsense;
1431         int len = 18;
1432
1433         memset(arr, 0, sizeof(arr));
1434         dsense = !!(cmd[1] & 1);
1435         want_dsense = dsense || scsi_debug_dsense;
1436         sbuff = scp->sense_buffer;
1437         if ((iec_m_pg[2] & 0x4) && (6 == (iec_m_pg[3] & 0xf))) {
1438                 if (dsense) {
1439                         arr[0] = 0x72;
1440                         arr[1] = 0x0;           /* NO_SENSE in sense_key */
1441                         arr[2] = THRESHOLD_EXCEEDED;
1442                         arr[3] = 0xff;          /* TEST set and MRIE==6 */
1443                         len = 8;
1444                 } else {
1445                         arr[0] = 0x70;
1446                         arr[2] = 0x0;           /* NO_SENSE in sense_key */
1447                         arr[7] = 0xa;           /* 18 byte sense buffer */
1448                         arr[12] = THRESHOLD_EXCEEDED;
1449                         arr[13] = 0xff;         /* TEST set and MRIE==6 */
1450                 }
1451         } else {
1452                 memcpy(arr, sbuff, SCSI_SENSE_BUFFERSIZE);
1453                 if (arr[0] >= 0x70 && dsense == scsi_debug_dsense)
1454                         ;       /* have sense and formats match */
1455                 else if (arr[0] <= 0x70) {
1456                         if (dsense) {
1457                                 memset(arr, 0, 8);
1458                                 arr[0] = 0x72;
1459                                 len = 8;
1460                         } else {
1461                                 memset(arr, 0, 18);
1462                                 arr[0] = 0x70;
1463                                 arr[7] = 0xa;
1464                         }
1465                 } else if (dsense) {
1466                         memset(arr, 0, 8);
1467                         arr[0] = 0x72;
1468                         arr[1] = sbuff[2];     /* sense key */
1469                         arr[2] = sbuff[12];    /* asc */
1470                         arr[3] = sbuff[13];    /* ascq */
1471                         len = 8;
1472                 } else {
1473                         memset(arr, 0, 18);
1474                         arr[0] = 0x70;
1475                         arr[2] = sbuff[1];
1476                         arr[7] = 0xa;
1477                         arr[12] = sbuff[1];
1478                         arr[13] = sbuff[3];
1479                 }
1480
1481         }
1482         mk_sense_buffer(scp, 0, NO_ADDITIONAL_SENSE, 0);
1483         return fill_from_dev_buffer(scp, arr, len);
1484 }
1485
1486 static int resp_start_stop(struct scsi_cmnd * scp,
1487                            struct sdebug_dev_info * devip)
1488 {
1489         unsigned char *cmd = scp->cmnd;
1490         int power_cond, start;
1491
1492         power_cond = (cmd[4] & 0xf0) >> 4;
1493         if (power_cond) {
1494                 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 4, 7);
1495                 return check_condition_result;
1496         }
1497         start = cmd[4] & 1;
1498         if (start == devip->stopped)
1499                 devip->stopped = !start;
1500         return 0;
1501 }
1502
1503 static sector_t get_sdebug_capacity(void)
1504 {
1505         if (scsi_debug_virtual_gb > 0)
1506                 return (sector_t)scsi_debug_virtual_gb *
1507                         (1073741824 / scsi_debug_sector_size);
1508         else
1509                 return sdebug_store_sectors;
1510 }
1511
1512 #define SDEBUG_READCAP_ARR_SZ 8
1513 static int resp_readcap(struct scsi_cmnd * scp,
1514                         struct sdebug_dev_info * devip)
1515 {
1516         unsigned char arr[SDEBUG_READCAP_ARR_SZ];
1517         unsigned int capac;
1518
1519         /* following just in case virtual_gb changed */
1520         sdebug_capacity = get_sdebug_capacity();
1521         memset(arr, 0, SDEBUG_READCAP_ARR_SZ);
1522         if (sdebug_capacity < 0xffffffff) {
1523                 capac = (unsigned int)sdebug_capacity - 1;
1524                 arr[0] = (capac >> 24);
1525                 arr[1] = (capac >> 16) & 0xff;
1526                 arr[2] = (capac >> 8) & 0xff;
1527                 arr[3] = capac & 0xff;
1528         } else {
1529                 arr[0] = 0xff;
1530                 arr[1] = 0xff;
1531                 arr[2] = 0xff;
1532                 arr[3] = 0xff;
1533         }
1534         arr[6] = (scsi_debug_sector_size >> 8) & 0xff;
1535         arr[7] = scsi_debug_sector_size & 0xff;
1536         return fill_from_dev_buffer(scp, arr, SDEBUG_READCAP_ARR_SZ);
1537 }
1538
1539 #define SDEBUG_READCAP16_ARR_SZ 32
1540 static int resp_readcap16(struct scsi_cmnd * scp,
1541                           struct sdebug_dev_info * devip)
1542 {
1543         unsigned char *cmd = scp->cmnd;
1544         unsigned char arr[SDEBUG_READCAP16_ARR_SZ];
1545         unsigned long long capac;
1546         int k, alloc_len;
1547
1548         alloc_len = ((cmd[10] << 24) + (cmd[11] << 16) + (cmd[12] << 8)
1549                      + cmd[13]);
1550         /* following just in case virtual_gb changed */
1551         sdebug_capacity = get_sdebug_capacity();
1552         memset(arr, 0, SDEBUG_READCAP16_ARR_SZ);
1553         capac = sdebug_capacity - 1;
1554         for (k = 0; k < 8; ++k, capac >>= 8)
1555                 arr[7 - k] = capac & 0xff;
1556         arr[8] = (scsi_debug_sector_size >> 24) & 0xff;
1557         arr[9] = (scsi_debug_sector_size >> 16) & 0xff;
1558         arr[10] = (scsi_debug_sector_size >> 8) & 0xff;
1559         arr[11] = scsi_debug_sector_size & 0xff;
1560         arr[13] = scsi_debug_physblk_exp & 0xf;
1561         arr[14] = (scsi_debug_lowest_aligned >> 8) & 0x3f;
1562
1563         if (scsi_debug_lbp()) {
1564                 arr[14] |= 0x80; /* LBPME */
1565                 if (scsi_debug_lbprz)
1566                         arr[14] |= 0x40; /* LBPRZ */
1567         }
1568
1569         arr[15] = scsi_debug_lowest_aligned & 0xff;
1570
1571         if (scsi_debug_dif) {
1572                 arr[12] = (scsi_debug_dif - 1) << 1; /* P_TYPE */
1573                 arr[12] |= 1; /* PROT_EN */
1574         }
1575
1576         return fill_from_dev_buffer(scp, arr,
1577                                     min(alloc_len, SDEBUG_READCAP16_ARR_SZ));
1578 }
1579
1580 #define SDEBUG_MAX_TGTPGS_ARR_SZ 1412
1581
1582 static int resp_report_tgtpgs(struct scsi_cmnd * scp,
1583                               struct sdebug_dev_info * devip)
1584 {
1585         unsigned char *cmd = scp->cmnd;
1586         unsigned char * arr;
1587         int host_no = devip->sdbg_host->shost->host_no;
1588         int n, ret, alen, rlen;
1589         int port_group_a, port_group_b, port_a, port_b;
1590
1591         alen = ((cmd[6] << 24) + (cmd[7] << 16) + (cmd[8] << 8)
1592                 + cmd[9]);
1593
1594         arr = kzalloc(SDEBUG_MAX_TGTPGS_ARR_SZ, GFP_ATOMIC);
1595         if (! arr)
1596                 return DID_REQUEUE << 16;
1597         /*
1598          * EVPD page 0x88 states we have two ports, one
1599          * real and a fake port with no device connected.
1600          * So we create two port groups with one port each
1601          * and set the group with port B to unavailable.
1602          */
1603         port_a = 0x1; /* relative port A */
1604         port_b = 0x2; /* relative port B */
1605         port_group_a = (((host_no + 1) & 0x7f) << 8) +
1606             (devip->channel & 0x7f);
1607         port_group_b = (((host_no + 1) & 0x7f) << 8) +
1608             (devip->channel & 0x7f) + 0x80;
1609
1610         /*
1611          * The asymmetric access state is cycled according to the host_id.
1612          */
1613         n = 4;
1614         if (0 == scsi_debug_vpd_use_hostno) {
1615             arr[n++] = host_no % 3; /* Asymm access state */
1616             arr[n++] = 0x0F; /* claim: all states are supported */
1617         } else {
1618             arr[n++] = 0x0; /* Active/Optimized path */
1619             arr[n++] = 0x01; /* claim: only support active/optimized paths */
1620         }
1621         arr[n++] = (port_group_a >> 8) & 0xff;
1622         arr[n++] = port_group_a & 0xff;
1623         arr[n++] = 0;    /* Reserved */
1624         arr[n++] = 0;    /* Status code */
1625         arr[n++] = 0;    /* Vendor unique */
1626         arr[n++] = 0x1;  /* One port per group */
1627         arr[n++] = 0;    /* Reserved */
1628         arr[n++] = 0;    /* Reserved */
1629         arr[n++] = (port_a >> 8) & 0xff;
1630         arr[n++] = port_a & 0xff;
1631         arr[n++] = 3;    /* Port unavailable */
1632         arr[n++] = 0x08; /* claim: only unavailalbe paths are supported */
1633         arr[n++] = (port_group_b >> 8) & 0xff;
1634         arr[n++] = port_group_b & 0xff;
1635         arr[n++] = 0;    /* Reserved */
1636         arr[n++] = 0;    /* Status code */
1637         arr[n++] = 0;    /* Vendor unique */
1638         arr[n++] = 0x1;  /* One port per group */
1639         arr[n++] = 0;    /* Reserved */
1640         arr[n++] = 0;    /* Reserved */
1641         arr[n++] = (port_b >> 8) & 0xff;
1642         arr[n++] = port_b & 0xff;
1643
1644         rlen = n - 4;
1645         arr[0] = (rlen >> 24) & 0xff;
1646         arr[1] = (rlen >> 16) & 0xff;
1647         arr[2] = (rlen >> 8) & 0xff;
1648         arr[3] = rlen & 0xff;
1649
1650         /*
1651          * Return the smallest value of either
1652          * - The allocated length
1653          * - The constructed command length
1654          * - The maximum array size
1655          */
1656         rlen = min(alen,n);
1657         ret = fill_from_dev_buffer(scp, arr,
1658                                    min(rlen, SDEBUG_MAX_TGTPGS_ARR_SZ));
1659         kfree(arr);
1660         return ret;
1661 }
1662
1663 static int
1664 resp_rsup_opcodes(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
1665 {
1666         bool rctd;
1667         u8 reporting_opts, req_opcode, sdeb_i, supp;
1668         u16 req_sa, u;
1669         u32 alloc_len, a_len;
1670         int k, offset, len, errsts, count, bump, na;
1671         const struct opcode_info_t *oip;
1672         const struct opcode_info_t *r_oip;
1673         u8 *arr;
1674         u8 *cmd = scp->cmnd;
1675
1676         rctd = !!(cmd[2] & 0x80);
1677         reporting_opts = cmd[2] & 0x7;
1678         req_opcode = cmd[3];
1679         req_sa = get_unaligned_be16(cmd + 4);
1680         alloc_len = get_unaligned_be32(cmd + 6);
1681         if (alloc_len < 4 || alloc_len > 0xffff) {
1682                 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 6, -1);
1683                 return check_condition_result;
1684         }
1685         if (alloc_len > 8192)
1686                 a_len = 8192;
1687         else
1688                 a_len = alloc_len;
1689         arr = kzalloc((a_len < 256) ? 320 : a_len + 64, GFP_ATOMIC);
1690         if (NULL == arr) {
1691                 mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
1692                                 INSUFF_RES_ASCQ);
1693                 return check_condition_result;
1694         }
1695         switch (reporting_opts) {
1696         case 0: /* all commands */
1697                 /* count number of commands */
1698                 for (count = 0, oip = opcode_info_arr;
1699                      oip->num_attached != 0xff; ++oip) {
1700                         if (F_INV_OP & oip->flags)
1701                                 continue;
1702                         count += (oip->num_attached + 1);
1703                 }
1704                 bump = rctd ? 20 : 8;
1705                 put_unaligned_be32(count * bump, arr);
1706                 for (offset = 4, oip = opcode_info_arr;
1707                      oip->num_attached != 0xff && offset < a_len; ++oip) {
1708                         if (F_INV_OP & oip->flags)
1709                                 continue;
1710                         na = oip->num_attached;
1711                         arr[offset] = oip->opcode;
1712                         put_unaligned_be16(oip->sa, arr + offset + 2);
1713                         if (rctd)
1714                                 arr[offset + 5] |= 0x2;
1715                         if (FF_SA & oip->flags)
1716                                 arr[offset + 5] |= 0x1;
1717                         put_unaligned_be16(oip->len_mask[0], arr + offset + 6);
1718                         if (rctd)
1719                                 put_unaligned_be16(0xa, arr + offset + 8);
1720                         r_oip = oip;
1721                         for (k = 0, oip = oip->arrp; k < na; ++k, ++oip) {
1722                                 if (F_INV_OP & oip->flags)
1723                                         continue;
1724                                 offset += bump;
1725                                 arr[offset] = oip->opcode;
1726                                 put_unaligned_be16(oip->sa, arr + offset + 2);
1727                                 if (rctd)
1728                                         arr[offset + 5] |= 0x2;
1729                                 if (FF_SA & oip->flags)
1730                                         arr[offset + 5] |= 0x1;
1731                                 put_unaligned_be16(oip->len_mask[0],
1732                                                    arr + offset + 6);
1733                                 if (rctd)
1734                                         put_unaligned_be16(0xa,
1735                                                            arr + offset + 8);
1736                         }
1737                         oip = r_oip;
1738                         offset += bump;
1739                 }
1740                 break;
1741         case 1: /* one command: opcode only */
1742         case 2: /* one command: opcode plus service action */
1743         case 3: /* one command: if sa==0 then opcode only else opcode+sa */
1744                 sdeb_i = opcode_ind_arr[req_opcode];
1745                 oip = &opcode_info_arr[sdeb_i];
1746                 if (F_INV_OP & oip->flags) {
1747                         supp = 1;
1748                         offset = 4;
1749                 } else {
1750                         if (1 == reporting_opts) {
1751                                 if (FF_SA & oip->flags) {
1752                                         mk_sense_invalid_fld(scp, SDEB_IN_CDB,
1753                                                              2, 2);
1754                                         kfree(arr);
1755                                         return check_condition_result;
1756                                 }
1757                                 req_sa = 0;
1758                         } else if (2 == reporting_opts &&
1759                                    0 == (FF_SA & oip->flags)) {
1760                                 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 4, -1);
1761                                 kfree(arr);     /* point at requested sa */
1762                                 return check_condition_result;
1763                         }
1764                         if (0 == (FF_SA & oip->flags) &&
1765                             req_opcode == oip->opcode)
1766                                 supp = 3;
1767                         else if (0 == (FF_SA & oip->flags)) {
1768                                 na = oip->num_attached;
1769                                 for (k = 0, oip = oip->arrp; k < na;
1770                                      ++k, ++oip) {
1771                                         if (req_opcode == oip->opcode)
1772                                                 break;
1773                                 }
1774                                 supp = (k >= na) ? 1 : 3;
1775                         } else if (req_sa != oip->sa) {
1776                                 na = oip->num_attached;
1777                                 for (k = 0, oip = oip->arrp; k < na;
1778                                      ++k, ++oip) {
1779                                         if (req_sa == oip->sa)
1780                                                 break;
1781                                 }
1782                                 supp = (k >= na) ? 1 : 3;
1783                         } else
1784                                 supp = 3;
1785                         if (3 == supp) {
1786                                 u = oip->len_mask[0];
1787                                 put_unaligned_be16(u, arr + 2);
1788                                 arr[4] = oip->opcode;
1789                                 for (k = 1; k < u; ++k)
1790                                         arr[4 + k] = (k < 16) ?
1791                                                  oip->len_mask[k] : 0xff;
1792                                 offset = 4 + u;
1793                         } else
1794                                 offset = 4;
1795                 }
1796                 arr[1] = (rctd ? 0x80 : 0) | supp;
1797                 if (rctd) {
1798                         put_unaligned_be16(0xa, arr + offset);
1799                         offset += 12;
1800                 }
1801                 break;
1802         default:
1803                 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 2);
1804                 kfree(arr);
1805                 return check_condition_result;
1806         }
1807         offset = (offset < a_len) ? offset : a_len;
1808         len = (offset < alloc_len) ? offset : alloc_len;
1809         errsts = fill_from_dev_buffer(scp, arr, len);
1810         kfree(arr);
1811         return errsts;
1812 }
1813
1814 static int
1815 resp_rsup_tmfs(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
1816 {
1817         bool repd;
1818         u32 alloc_len, len;
1819         u8 arr[16];
1820         u8 *cmd = scp->cmnd;
1821
1822         memset(arr, 0, sizeof(arr));
1823         repd = !!(cmd[2] & 0x80);
1824         alloc_len = get_unaligned_be32(cmd + 6);
1825         if (alloc_len < 4) {
1826                 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 6, -1);
1827                 return check_condition_result;
1828         }
1829         arr[0] = 0xc8;          /* ATS | ATSS | LURS */
1830         arr[1] = 0x1;           /* ITNRS */
1831         if (repd) {
1832                 arr[3] = 0xc;
1833                 len = 16;
1834         } else
1835                 len = 4;
1836
1837         len = (len < alloc_len) ? len : alloc_len;
1838         return fill_from_dev_buffer(scp, arr, len);
1839 }
1840
1841 /* <<Following mode page info copied from ST318451LW>> */
1842
1843 static int resp_err_recov_pg(unsigned char * p, int pcontrol, int target)
1844 {       /* Read-Write Error Recovery page for mode_sense */
1845         unsigned char err_recov_pg[] = {0x1, 0xa, 0xc0, 11, 240, 0, 0, 0,
1846                                         5, 0, 0xff, 0xff};
1847
1848         memcpy(p, err_recov_pg, sizeof(err_recov_pg));
1849         if (1 == pcontrol)
1850                 memset(p + 2, 0, sizeof(err_recov_pg) - 2);
1851         return sizeof(err_recov_pg);
1852 }
1853
1854 static int resp_disconnect_pg(unsigned char * p, int pcontrol, int target)
1855 {       /* Disconnect-Reconnect page for mode_sense */
1856         unsigned char disconnect_pg[] = {0x2, 0xe, 128, 128, 0, 10, 0, 0,
1857                                          0, 0, 0, 0, 0, 0, 0, 0};
1858
1859         memcpy(p, disconnect_pg, sizeof(disconnect_pg));
1860         if (1 == pcontrol)
1861                 memset(p + 2, 0, sizeof(disconnect_pg) - 2);
1862         return sizeof(disconnect_pg);
1863 }
1864
1865 static int resp_format_pg(unsigned char * p, int pcontrol, int target)
1866 {       /* Format device page for mode_sense */
1867         unsigned char format_pg[] = {0x3, 0x16, 0, 0, 0, 0, 0, 0,
1868                                      0, 0, 0, 0, 0, 0, 0, 0,
1869                                      0, 0, 0, 0, 0x40, 0, 0, 0};
1870
1871         memcpy(p, format_pg, sizeof(format_pg));
1872         p[10] = (sdebug_sectors_per >> 8) & 0xff;
1873         p[11] = sdebug_sectors_per & 0xff;
1874         p[12] = (scsi_debug_sector_size >> 8) & 0xff;
1875         p[13] = scsi_debug_sector_size & 0xff;
1876         if (scsi_debug_removable)
1877                 p[20] |= 0x20; /* should agree with INQUIRY */
1878         if (1 == pcontrol)
1879                 memset(p + 2, 0, sizeof(format_pg) - 2);
1880         return sizeof(format_pg);
1881 }
1882
1883 static int resp_caching_pg(unsigned char * p, int pcontrol, int target)
1884 {       /* Caching page for mode_sense */
1885         unsigned char ch_caching_pg[] = {/* 0x8, 18, */ 0x4, 0, 0, 0, 0, 0,
1886                 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
1887         unsigned char d_caching_pg[] = {0x8, 18, 0x14, 0, 0xff, 0xff, 0, 0,
1888                 0xff, 0xff, 0xff, 0xff, 0x80, 0x14, 0, 0,     0, 0, 0, 0};
1889
1890         if (SCSI_DEBUG_OPT_N_WCE & scsi_debug_opts)
1891                 caching_pg[2] &= ~0x4;  /* set WCE=0 (default WCE=1) */
1892         memcpy(p, caching_pg, sizeof(caching_pg));
1893         if (1 == pcontrol)
1894                 memcpy(p + 2, ch_caching_pg, sizeof(ch_caching_pg));
1895         else if (2 == pcontrol)
1896                 memcpy(p, d_caching_pg, sizeof(d_caching_pg));
1897         return sizeof(caching_pg);
1898 }
1899
1900 static int resp_ctrl_m_pg(unsigned char * p, int pcontrol, int target)
1901 {       /* Control mode page for mode_sense */
1902         unsigned char ch_ctrl_m_pg[] = {/* 0xa, 10, */ 0x6, 0, 0, 0, 0, 0,
1903                                         0, 0, 0, 0};
1904         unsigned char d_ctrl_m_pg[] = {0xa, 10, 2, 0, 0, 0, 0, 0,
1905                                      0, 0, 0x2, 0x4b};
1906
1907         if (scsi_debug_dsense)
1908                 ctrl_m_pg[2] |= 0x4;
1909         else
1910                 ctrl_m_pg[2] &= ~0x4;
1911
1912         if (scsi_debug_ato)
1913                 ctrl_m_pg[5] |= 0x80; /* ATO=1 */
1914
1915         memcpy(p, ctrl_m_pg, sizeof(ctrl_m_pg));
1916         if (1 == pcontrol)
1917                 memcpy(p + 2, ch_ctrl_m_pg, sizeof(ch_ctrl_m_pg));
1918         else if (2 == pcontrol)
1919                 memcpy(p, d_ctrl_m_pg, sizeof(d_ctrl_m_pg));
1920         return sizeof(ctrl_m_pg);
1921 }
1922
1923
1924 static int resp_iec_m_pg(unsigned char * p, int pcontrol, int target)
1925 {       /* Informational Exceptions control mode page for mode_sense */
1926         unsigned char ch_iec_m_pg[] = {/* 0x1c, 0xa, */ 0x4, 0xf, 0, 0, 0, 0,
1927                                        0, 0, 0x0, 0x0};
1928         unsigned char d_iec_m_pg[] = {0x1c, 0xa, 0x08, 0, 0, 0, 0, 0,
1929                                       0, 0, 0x0, 0x0};
1930
1931         memcpy(p, iec_m_pg, sizeof(iec_m_pg));
1932         if (1 == pcontrol)
1933                 memcpy(p + 2, ch_iec_m_pg, sizeof(ch_iec_m_pg));
1934         else if (2 == pcontrol)
1935                 memcpy(p, d_iec_m_pg, sizeof(d_iec_m_pg));
1936         return sizeof(iec_m_pg);
1937 }
1938
1939 static int resp_sas_sf_m_pg(unsigned char * p, int pcontrol, int target)
1940 {       /* SAS SSP mode page - short format for mode_sense */
1941         unsigned char sas_sf_m_pg[] = {0x19, 0x6,
1942                 0x6, 0x0, 0x7, 0xd0, 0x0, 0x0};
1943
1944         memcpy(p, sas_sf_m_pg, sizeof(sas_sf_m_pg));
1945         if (1 == pcontrol)
1946                 memset(p + 2, 0, sizeof(sas_sf_m_pg) - 2);
1947         return sizeof(sas_sf_m_pg);
1948 }
1949
1950
1951 static int resp_sas_pcd_m_spg(unsigned char * p, int pcontrol, int target,
1952                               int target_dev_id)
1953 {       /* SAS phy control and discover mode page for mode_sense */
1954         unsigned char sas_pcd_m_pg[] = {0x59, 0x1, 0, 0x64, 0, 0x6, 0, 2,
1955                     0, 0, 0, 0, 0x10, 0x9, 0x8, 0x0,
1956                     0x52, 0x22, 0x22, 0x20, 0x0, 0x0, 0x0, 0x0,
1957                     0x51, 0x11, 0x11, 0x10, 0x0, 0x0, 0x0, 0x1,
1958                     0x2, 0, 0, 0, 0, 0, 0, 0,
1959                     0x88, 0x99, 0, 0, 0, 0, 0, 0,
1960                     0, 0, 0, 0, 0, 0, 0, 0,
1961                     0, 1, 0, 0, 0x10, 0x9, 0x8, 0x0,
1962                     0x52, 0x22, 0x22, 0x20, 0x0, 0x0, 0x0, 0x0,
1963                     0x51, 0x11, 0x11, 0x10, 0x0, 0x0, 0x0, 0x1,
1964                     0x3, 0, 0, 0, 0, 0, 0, 0,
1965                     0x88, 0x99, 0, 0, 0, 0, 0, 0,
1966                     0, 0, 0, 0, 0, 0, 0, 0,
1967                 };
1968         int port_a, port_b;
1969
1970         port_a = target_dev_id + 1;
1971         port_b = port_a + 1;
1972         memcpy(p, sas_pcd_m_pg, sizeof(sas_pcd_m_pg));
1973         p[20] = (port_a >> 24);
1974         p[21] = (port_a >> 16) & 0xff;
1975         p[22] = (port_a >> 8) & 0xff;
1976         p[23] = port_a & 0xff;
1977         p[48 + 20] = (port_b >> 24);
1978         p[48 + 21] = (port_b >> 16) & 0xff;
1979         p[48 + 22] = (port_b >> 8) & 0xff;
1980         p[48 + 23] = port_b & 0xff;
1981         if (1 == pcontrol)
1982                 memset(p + 4, 0, sizeof(sas_pcd_m_pg) - 4);
1983         return sizeof(sas_pcd_m_pg);
1984 }
1985
1986 static int resp_sas_sha_m_spg(unsigned char * p, int pcontrol)
1987 {       /* SAS SSP shared protocol specific port mode subpage */
1988         unsigned char sas_sha_m_pg[] = {0x59, 0x2, 0, 0xc, 0, 0x6, 0x10, 0,
1989                     0, 0, 0, 0, 0, 0, 0, 0,
1990                 };
1991
1992         memcpy(p, sas_sha_m_pg, sizeof(sas_sha_m_pg));
1993         if (1 == pcontrol)
1994                 memset(p + 4, 0, sizeof(sas_sha_m_pg) - 4);
1995         return sizeof(sas_sha_m_pg);
1996 }
1997
1998 #define SDEBUG_MAX_MSENSE_SZ 256
1999
2000 static int
2001 resp_mode_sense(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
2002 {
2003         unsigned char dbd, llbaa;
2004         int pcontrol, pcode, subpcode, bd_len;
2005         unsigned char dev_spec;
2006         int k, alloc_len, msense_6, offset, len, target_dev_id;
2007         int target = scp->device->id;
2008         unsigned char * ap;
2009         unsigned char arr[SDEBUG_MAX_MSENSE_SZ];
2010         unsigned char *cmd = scp->cmnd;
2011
2012         dbd = !!(cmd[1] & 0x8);
2013         pcontrol = (cmd[2] & 0xc0) >> 6;
2014         pcode = cmd[2] & 0x3f;
2015         subpcode = cmd[3];
2016         msense_6 = (MODE_SENSE == cmd[0]);
2017         llbaa = msense_6 ? 0 : !!(cmd[1] & 0x10);
2018         if ((0 == scsi_debug_ptype) && (0 == dbd))
2019                 bd_len = llbaa ? 16 : 8;
2020         else
2021                 bd_len = 0;
2022         alloc_len = msense_6 ? cmd[4] : ((cmd[7] << 8) | cmd[8]);
2023         memset(arr, 0, SDEBUG_MAX_MSENSE_SZ);
2024         if (0x3 == pcontrol) {  /* Saving values not supported */
2025                 mk_sense_buffer(scp, ILLEGAL_REQUEST, SAVING_PARAMS_UNSUP, 0);
2026                 return check_condition_result;
2027         }
2028         target_dev_id = ((devip->sdbg_host->shost->host_no + 1) * 2000) +
2029                         (devip->target * 1000) - 3;
2030         /* set DPOFUA bit for disks */
2031         if (0 == scsi_debug_ptype)
2032                 dev_spec = (DEV_READONLY(target) ? 0x80 : 0x0) | 0x10;
2033         else
2034                 dev_spec = 0x0;
2035         if (msense_6) {
2036                 arr[2] = dev_spec;
2037                 arr[3] = bd_len;
2038                 offset = 4;
2039         } else {
2040                 arr[3] = dev_spec;
2041                 if (16 == bd_len)
2042                         arr[4] = 0x1;   /* set LONGLBA bit */
2043                 arr[7] = bd_len;        /* assume 255 or less */
2044                 offset = 8;
2045         }
2046         ap = arr + offset;
2047         if ((bd_len > 0) && (!sdebug_capacity))
2048                 sdebug_capacity = get_sdebug_capacity();
2049
2050         if (8 == bd_len) {
2051                 if (sdebug_capacity > 0xfffffffe) {
2052                         ap[0] = 0xff;
2053                         ap[1] = 0xff;
2054                         ap[2] = 0xff;
2055                         ap[3] = 0xff;
2056                 } else {
2057                         ap[0] = (sdebug_capacity >> 24) & 0xff;
2058                         ap[1] = (sdebug_capacity >> 16) & 0xff;
2059                         ap[2] = (sdebug_capacity >> 8) & 0xff;
2060                         ap[3] = sdebug_capacity & 0xff;
2061                 }
2062                 ap[6] = (scsi_debug_sector_size >> 8) & 0xff;
2063                 ap[7] = scsi_debug_sector_size & 0xff;
2064                 offset += bd_len;
2065                 ap = arr + offset;
2066         } else if (16 == bd_len) {
2067                 unsigned long long capac = sdebug_capacity;
2068
2069                 for (k = 0; k < 8; ++k, capac >>= 8)
2070                         ap[7 - k] = capac & 0xff;
2071                 ap[12] = (scsi_debug_sector_size >> 24) & 0xff;
2072                 ap[13] = (scsi_debug_sector_size >> 16) & 0xff;
2073                 ap[14] = (scsi_debug_sector_size >> 8) & 0xff;
2074                 ap[15] = scsi_debug_sector_size & 0xff;
2075                 offset += bd_len;
2076                 ap = arr + offset;
2077         }
2078
2079         if ((subpcode > 0x0) && (subpcode < 0xff) && (0x19 != pcode)) {
2080                 /* TODO: Control Extension page */
2081                 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
2082                 return check_condition_result;
2083         }
2084         switch (pcode) {
2085         case 0x1:       /* Read-Write error recovery page, direct access */
2086                 len = resp_err_recov_pg(ap, pcontrol, target);
2087                 offset += len;
2088                 break;
2089         case 0x2:       /* Disconnect-Reconnect page, all devices */
2090                 len = resp_disconnect_pg(ap, pcontrol, target);
2091                 offset += len;
2092                 break;
2093         case 0x3:       /* Format device page, direct access */
2094                 len = resp_format_pg(ap, pcontrol, target);
2095                 offset += len;
2096                 break;
2097         case 0x8:       /* Caching page, direct access */
2098                 len = resp_caching_pg(ap, pcontrol, target);
2099                 offset += len;
2100                 break;
2101         case 0xa:       /* Control Mode page, all devices */
2102                 len = resp_ctrl_m_pg(ap, pcontrol, target);
2103                 offset += len;
2104                 break;
2105         case 0x19:      /* if spc==1 then sas phy, control+discover */
2106                 if ((subpcode > 0x2) && (subpcode < 0xff)) {
2107                         mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
2108                         return check_condition_result;
2109                 }
2110                 len = 0;
2111                 if ((0x0 == subpcode) || (0xff == subpcode))
2112                         len += resp_sas_sf_m_pg(ap + len, pcontrol, target);
2113                 if ((0x1 == subpcode) || (0xff == subpcode))
2114                         len += resp_sas_pcd_m_spg(ap + len, pcontrol, target,
2115                                                   target_dev_id);
2116                 if ((0x2 == subpcode) || (0xff == subpcode))
2117                         len += resp_sas_sha_m_spg(ap + len, pcontrol);
2118                 offset += len;
2119                 break;
2120         case 0x1c:      /* Informational Exceptions Mode page, all devices */
2121                 len = resp_iec_m_pg(ap, pcontrol, target);
2122                 offset += len;
2123                 break;
2124         case 0x3f:      /* Read all Mode pages */
2125                 if ((0 == subpcode) || (0xff == subpcode)) {
2126                         len = resp_err_recov_pg(ap, pcontrol, target);
2127                         len += resp_disconnect_pg(ap + len, pcontrol, target);
2128                         len += resp_format_pg(ap + len, pcontrol, target);
2129                         len += resp_caching_pg(ap + len, pcontrol, target);
2130                         len += resp_ctrl_m_pg(ap + len, pcontrol, target);
2131                         len += resp_sas_sf_m_pg(ap + len, pcontrol, target);
2132                         if (0xff == subpcode) {
2133                                 len += resp_sas_pcd_m_spg(ap + len, pcontrol,
2134                                                   target, target_dev_id);
2135                                 len += resp_sas_sha_m_spg(ap + len, pcontrol);
2136                         }
2137                         len += resp_iec_m_pg(ap + len, pcontrol, target);
2138                 } else {
2139                         mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
2140                         return check_condition_result;
2141                 }
2142                 offset += len;
2143                 break;
2144         default:
2145                 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 5);
2146                 return check_condition_result;
2147         }
2148         if (msense_6)
2149                 arr[0] = offset - 1;
2150         else {
2151                 arr[0] = ((offset - 2) >> 8) & 0xff;
2152                 arr[1] = (offset - 2) & 0xff;
2153         }
2154         return fill_from_dev_buffer(scp, arr, min(alloc_len, offset));
2155 }
2156
2157 #define SDEBUG_MAX_MSELECT_SZ 512
2158
2159 static int
2160 resp_mode_select(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
2161 {
2162         int pf, sp, ps, md_len, bd_len, off, spf, pg_len;
2163         int param_len, res, mpage;
2164         unsigned char arr[SDEBUG_MAX_MSELECT_SZ];
2165         unsigned char *cmd = scp->cmnd;
2166         int mselect6 = (MODE_SELECT == cmd[0]);
2167
2168         memset(arr, 0, sizeof(arr));
2169         pf = cmd[1] & 0x10;
2170         sp = cmd[1] & 0x1;
2171         param_len = mselect6 ? cmd[4] : ((cmd[7] << 8) + cmd[8]);
2172         if ((0 == pf) || sp || (param_len > SDEBUG_MAX_MSELECT_SZ)) {
2173                 mk_sense_invalid_fld(scp, SDEB_IN_CDB, mselect6 ? 4 : 7, -1);
2174                 return check_condition_result;
2175         }
2176         res = fetch_to_dev_buffer(scp, arr, param_len);
2177         if (-1 == res)
2178                 return (DID_ERROR << 16);
2179         else if ((res < param_len) &&
2180                  (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts))
2181                 sdev_printk(KERN_INFO, scp->device,
2182                             "%s: cdb indicated=%d, IO sent=%d bytes\n",
2183                             __func__, param_len, res);
2184         md_len = mselect6 ? (arr[0] + 1) : ((arr[0] << 8) + arr[1] + 2);
2185         bd_len = mselect6 ? arr[3] : ((arr[6] << 8) + arr[7]);
2186         if (md_len > 2) {
2187                 mk_sense_invalid_fld(scp, SDEB_IN_DATA, 0, -1);
2188                 return check_condition_result;
2189         }
2190         off = bd_len + (mselect6 ? 4 : 8);
2191         mpage = arr[off] & 0x3f;
2192         ps = !!(arr[off] & 0x80);
2193         if (ps) {
2194                 mk_sense_invalid_fld(scp, SDEB_IN_DATA, off, 7);
2195                 return check_condition_result;
2196         }
2197         spf = !!(arr[off] & 0x40);
2198         pg_len = spf ? ((arr[off + 2] << 8) + arr[off + 3] + 4) :
2199                        (arr[off + 1] + 2);
2200         if ((pg_len + off) > param_len) {
2201                 mk_sense_buffer(scp, ILLEGAL_REQUEST,
2202                                 PARAMETER_LIST_LENGTH_ERR, 0);
2203                 return check_condition_result;
2204         }
2205         switch (mpage) {
2206         case 0x8:      /* Caching Mode page */
2207                 if (caching_pg[1] == arr[off + 1]) {
2208                         memcpy(caching_pg + 2, arr + off + 2,
2209                                sizeof(caching_pg) - 2);
2210                         goto set_mode_changed_ua;
2211                 }
2212                 break;
2213         case 0xa:      /* Control Mode page */
2214                 if (ctrl_m_pg[1] == arr[off + 1]) {
2215                         memcpy(ctrl_m_pg + 2, arr + off + 2,
2216                                sizeof(ctrl_m_pg) - 2);
2217                         scsi_debug_dsense = !!(ctrl_m_pg[2] & 0x4);
2218                         goto set_mode_changed_ua;
2219                 }
2220                 break;
2221         case 0x1c:      /* Informational Exceptions Mode page */
2222                 if (iec_m_pg[1] == arr[off + 1]) {
2223                         memcpy(iec_m_pg + 2, arr + off + 2,
2224                                sizeof(iec_m_pg) - 2);
2225                         goto set_mode_changed_ua;
2226                 }
2227                 break;
2228         default:
2229                 break;
2230         }
2231         mk_sense_invalid_fld(scp, SDEB_IN_DATA, off, 5);
2232         return check_condition_result;
2233 set_mode_changed_ua:
2234         set_bit(SDEBUG_UA_MODE_CHANGED, devip->uas_bm);
2235         return 0;
2236 }
2237
2238 static int resp_temp_l_pg(unsigned char * arr)
2239 {
2240         unsigned char temp_l_pg[] = {0x0, 0x0, 0x3, 0x2, 0x0, 38,
2241                                      0x0, 0x1, 0x3, 0x2, 0x0, 65,
2242                 };
2243
2244         memcpy(arr, temp_l_pg, sizeof(temp_l_pg));
2245         return sizeof(temp_l_pg);
2246 }
2247
2248 static int resp_ie_l_pg(unsigned char * arr)
2249 {
2250         unsigned char ie_l_pg[] = {0x0, 0x0, 0x3, 0x3, 0x0, 0x0, 38,
2251                 };
2252
2253         memcpy(arr, ie_l_pg, sizeof(ie_l_pg));
2254         if (iec_m_pg[2] & 0x4) {        /* TEST bit set */
2255                 arr[4] = THRESHOLD_EXCEEDED;
2256                 arr[5] = 0xff;
2257         }
2258         return sizeof(ie_l_pg);
2259 }
2260
2261 #define SDEBUG_MAX_LSENSE_SZ 512
2262
2263 static int resp_log_sense(struct scsi_cmnd * scp,
2264                           struct sdebug_dev_info * devip)
2265 {
2266         int ppc, sp, pcontrol, pcode, subpcode, alloc_len, len, n;
2267         unsigned char arr[SDEBUG_MAX_LSENSE_SZ];
2268         unsigned char *cmd = scp->cmnd;
2269
2270         memset(arr, 0, sizeof(arr));
2271         ppc = cmd[1] & 0x2;
2272         sp = cmd[1] & 0x1;
2273         if (ppc || sp) {
2274                 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, ppc ? 1 : 0);
2275                 return check_condition_result;
2276         }
2277         pcontrol = (cmd[2] & 0xc0) >> 6;
2278         pcode = cmd[2] & 0x3f;
2279         subpcode = cmd[3] & 0xff;
2280         alloc_len = (cmd[7] << 8) + cmd[8];
2281         arr[0] = pcode;
2282         if (0 == subpcode) {
2283                 switch (pcode) {
2284                 case 0x0:       /* Supported log pages log page */
2285                         n = 4;
2286                         arr[n++] = 0x0;         /* this page */
2287                         arr[n++] = 0xd;         /* Temperature */
2288                         arr[n++] = 0x2f;        /* Informational exceptions */
2289                         arr[3] = n - 4;
2290                         break;
2291                 case 0xd:       /* Temperature log page */
2292                         arr[3] = resp_temp_l_pg(arr + 4);
2293                         break;
2294                 case 0x2f:      /* Informational exceptions log page */
2295                         arr[3] = resp_ie_l_pg(arr + 4);
2296                         break;
2297                 default:
2298                         mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 5);
2299                         return check_condition_result;
2300                 }
2301         } else if (0xff == subpcode) {
2302                 arr[0] |= 0x40;
2303                 arr[1] = subpcode;
2304                 switch (pcode) {
2305                 case 0x0:       /* Supported log pages and subpages log page */
2306                         n = 4;
2307                         arr[n++] = 0x0;
2308                         arr[n++] = 0x0;         /* 0,0 page */
2309                         arr[n++] = 0x0;
2310                         arr[n++] = 0xff;        /* this page */
2311                         arr[n++] = 0xd;
2312                         arr[n++] = 0x0;         /* Temperature */
2313                         arr[n++] = 0x2f;
2314                         arr[n++] = 0x0; /* Informational exceptions */
2315                         arr[3] = n - 4;
2316                         break;
2317                 case 0xd:       /* Temperature subpages */
2318                         n = 4;
2319                         arr[n++] = 0xd;
2320                         arr[n++] = 0x0;         /* Temperature */
2321                         arr[3] = n - 4;
2322                         break;
2323                 case 0x2f:      /* Informational exceptions subpages */
2324                         n = 4;
2325                         arr[n++] = 0x2f;
2326                         arr[n++] = 0x0;         /* Informational exceptions */
2327                         arr[3] = n - 4;
2328                         break;
2329                 default:
2330                         mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 5);
2331                         return check_condition_result;
2332                 }
2333         } else {
2334                 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
2335                 return check_condition_result;
2336         }
2337         len = min(((arr[2] << 8) + arr[3]) + 4, alloc_len);
2338         return fill_from_dev_buffer(scp, arr,
2339                     min(len, SDEBUG_MAX_INQ_ARR_SZ));
2340 }
2341
2342 static int check_device_access_params(struct scsi_cmnd *scp,
2343                                       unsigned long long lba, unsigned int num)
2344 {
2345         if (lba + num > sdebug_capacity) {
2346                 mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
2347                 return check_condition_result;
2348         }
2349         /* transfer length excessive (tie in to block limits VPD page) */
2350         if (num > sdebug_store_sectors) {
2351                 /* needs work to find which cdb byte 'num' comes from */
2352                 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
2353                 return check_condition_result;
2354         }
2355         return 0;
2356 }
2357
2358 /* Returns number of bytes copied or -1 if error. */
2359 static int
2360 do_device_access(struct scsi_cmnd *scmd, u64 lba, u32 num, bool do_write)
2361 {
2362         int ret;
2363         u64 block, rest = 0;
2364         struct scsi_data_buffer *sdb;
2365         enum dma_data_direction dir;
2366         size_t (*func)(struct scatterlist *, unsigned int, void *, size_t,
2367                        off_t);
2368
2369         if (do_write) {
2370                 sdb = scsi_out(scmd);
2371                 dir = DMA_TO_DEVICE;
2372                 func = sg_pcopy_to_buffer;
2373         } else {
2374                 sdb = scsi_in(scmd);
2375                 dir = DMA_FROM_DEVICE;
2376                 func = sg_pcopy_from_buffer;
2377         }
2378
2379         if (!sdb->length)
2380                 return 0;
2381         if (!(scsi_bidi_cmnd(scmd) || scmd->sc_data_direction == dir))
2382                 return -1;
2383
2384         block = do_div(lba, sdebug_store_sectors);
2385         if (block + num > sdebug_store_sectors)
2386                 rest = block + num - sdebug_store_sectors;
2387
2388         ret = func(sdb->table.sgl, sdb->table.nents,
2389                    fake_storep + (block * scsi_debug_sector_size),
2390                    (num - rest) * scsi_debug_sector_size, 0);
2391         if (ret != (num - rest) * scsi_debug_sector_size)
2392                 return ret;
2393
2394         if (rest) {
2395                 ret += func(sdb->table.sgl, sdb->table.nents,
2396                             fake_storep, rest * scsi_debug_sector_size,
2397                             (num - rest) * scsi_debug_sector_size);
2398         }
2399
2400         return ret;
2401 }
2402
2403 /* If fake_store(lba,num) compares equal to arr(num), then copy top half of
2404  * arr into fake_store(lba,num) and return true. If comparison fails then
2405  * return false. */
2406 static bool
2407 comp_write_worker(u64 lba, u32 num, const u8 *arr)
2408 {
2409         bool res;
2410         u64 block, rest = 0;
2411         u32 store_blks = sdebug_store_sectors;
2412         u32 lb_size = scsi_debug_sector_size;
2413
2414         block = do_div(lba, store_blks);
2415         if (block + num > store_blks)
2416                 rest = block + num - store_blks;
2417
2418         res = !memcmp(fake_storep + (block * lb_size), arr,
2419                       (num - rest) * lb_size);
2420         if (!res)
2421                 return res;
2422         if (rest)
2423                 res = memcmp(fake_storep, arr + ((num - rest) * lb_size),
2424                              rest * lb_size);
2425         if (!res)
2426                 return res;
2427         arr += num * lb_size;
2428         memcpy(fake_storep + (block * lb_size), arr, (num - rest) * lb_size);
2429         if (rest)
2430                 memcpy(fake_storep, arr + ((num - rest) * lb_size),
2431                        rest * lb_size);
2432         return res;
2433 }
2434
2435 static __be16 dif_compute_csum(const void *buf, int len)
2436 {
2437         __be16 csum;
2438
2439         if (scsi_debug_guard)
2440                 csum = (__force __be16)ip_compute_csum(buf, len);
2441         else
2442                 csum = cpu_to_be16(crc_t10dif(buf, len));
2443
2444         return csum;
2445 }
2446
2447 static int dif_verify(struct sd_dif_tuple *sdt, const void *data,
2448                       sector_t sector, u32 ei_lba)
2449 {
2450         __be16 csum = dif_compute_csum(data, scsi_debug_sector_size);
2451
2452         if (sdt->guard_tag != csum) {
2453                 pr_err("%s: GUARD check failed on sector %lu rcvd 0x%04x, data 0x%04x\n",
2454                         __func__,
2455                         (unsigned long)sector,
2456                         be16_to_cpu(sdt->guard_tag),
2457                         be16_to_cpu(csum));
2458                 return 0x01;
2459         }
2460         if (scsi_debug_dif == SD_DIF_TYPE1_PROTECTION &&
2461             be32_to_cpu(sdt->ref_tag) != (sector & 0xffffffff)) {
2462                 pr_err("%s: REF check failed on sector %lu\n",
2463                         __func__, (unsigned long)sector);
2464                 return 0x03;
2465         }
2466         if (scsi_debug_dif == SD_DIF_TYPE2_PROTECTION &&
2467             be32_to_cpu(sdt->ref_tag) != ei_lba) {
2468                 pr_err("%s: REF check failed on sector %lu\n",
2469                         __func__, (unsigned long)sector);
2470                 return 0x03;
2471         }
2472         return 0;
2473 }
2474
2475 static void dif_copy_prot(struct scsi_cmnd *SCpnt, sector_t sector,
2476                           unsigned int sectors, bool read)
2477 {
2478         size_t resid;
2479         void *paddr;
2480         const void *dif_store_end = dif_storep + sdebug_store_sectors;
2481         struct sg_mapping_iter miter;
2482
2483         /* Bytes of protection data to copy into sgl */
2484         resid = sectors * sizeof(*dif_storep);
2485
2486         sg_miter_start(&miter, scsi_prot_sglist(SCpnt),
2487                         scsi_prot_sg_count(SCpnt), SG_MITER_ATOMIC |
2488                         (read ? SG_MITER_TO_SG : SG_MITER_FROM_SG));
2489
2490         while (sg_miter_next(&miter) && resid > 0) {
2491                 size_t len = min(miter.length, resid);
2492                 void *start = dif_store(sector);
2493                 size_t rest = 0;
2494
2495                 if (dif_store_end < start + len)
2496                         rest = start + len - dif_store_end;
2497
2498                 paddr = miter.addr;
2499
2500                 if (read)
2501                         memcpy(paddr, start, len - rest);
2502                 else
2503                         memcpy(start, paddr, len - rest);
2504
2505                 if (rest) {
2506                         if (read)
2507                                 memcpy(paddr + len - rest, dif_storep, rest);
2508                         else
2509                                 memcpy(dif_storep, paddr + len - rest, rest);
2510                 }
2511
2512                 sector += len / sizeof(*dif_storep);
2513                 resid -= len;
2514         }
2515         sg_miter_stop(&miter);
2516 }
2517
2518 static int prot_verify_read(struct scsi_cmnd *SCpnt, sector_t start_sec,
2519                             unsigned int sectors, u32 ei_lba)
2520 {
2521         unsigned int i;
2522         struct sd_dif_tuple *sdt;
2523         sector_t sector;
2524
2525         for (i = 0; i < sectors; i++, ei_lba++) {
2526                 int ret;
2527
2528                 sector = start_sec + i;
2529                 sdt = dif_store(sector);
2530
2531                 if (sdt->app_tag == cpu_to_be16(0xffff))
2532                         continue;
2533
2534                 ret = dif_verify(sdt, fake_store(sector), sector, ei_lba);
2535                 if (ret) {
2536                         dif_errors++;
2537                         return ret;
2538                 }
2539         }
2540
2541         dif_copy_prot(SCpnt, start_sec, sectors, true);
2542         dix_reads++;
2543
2544         return 0;
2545 }
2546
2547 static int
2548 resp_read_dt0(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
2549 {
2550         u8 *cmd = scp->cmnd;
2551         u64 lba;
2552         u32 num;
2553         u32 ei_lba;
2554         unsigned long iflags;
2555         int ret;
2556         bool check_prot;
2557
2558         switch (cmd[0]) {
2559         case READ_16:
2560                 ei_lba = 0;
2561                 lba = get_unaligned_be64(cmd + 2);
2562                 num = get_unaligned_be32(cmd + 10);
2563                 check_prot = true;
2564                 break;
2565         case READ_10:
2566                 ei_lba = 0;
2567                 lba = get_unaligned_be32(cmd + 2);
2568                 num = get_unaligned_be16(cmd + 7);
2569                 check_prot = true;
2570                 break;
2571         case READ_6:
2572                 ei_lba = 0;
2573                 lba = (u32)cmd[3] | (u32)cmd[2] << 8 |
2574                       (u32)(cmd[1] & 0x1f) << 16;
2575                 num = (0 == cmd[4]) ? 256 : cmd[4];
2576                 check_prot = true;
2577                 break;
2578         case READ_12:
2579                 ei_lba = 0;
2580                 lba = get_unaligned_be32(cmd + 2);
2581                 num = get_unaligned_be32(cmd + 6);
2582                 check_prot = true;
2583                 break;
2584         case XDWRITEREAD_10:
2585                 ei_lba = 0;
2586                 lba = get_unaligned_be32(cmd + 2);
2587                 num = get_unaligned_be16(cmd + 7);
2588                 check_prot = false;
2589                 break;
2590         default:        /* assume READ(32) */
2591                 lba = get_unaligned_be64(cmd + 12);
2592                 ei_lba = get_unaligned_be32(cmd + 20);
2593                 num = get_unaligned_be32(cmd + 28);
2594                 check_prot = false;
2595                 break;
2596         }
2597         if (check_prot) {
2598                 if (scsi_debug_dif == SD_DIF_TYPE2_PROTECTION &&
2599                     (cmd[1] & 0xe0)) {
2600                         mk_sense_invalid_opcode(scp);
2601                         return check_condition_result;
2602                 }
2603                 if ((scsi_debug_dif == SD_DIF_TYPE1_PROTECTION ||
2604                      scsi_debug_dif == SD_DIF_TYPE3_PROTECTION) &&
2605                     (cmd[1] & 0xe0) == 0)
2606                         sdev_printk(KERN_ERR, scp->device, "Unprotected RD "
2607                                     "to DIF device\n");
2608         }
2609         if (sdebug_any_injecting_opt) {
2610                 struct sdebug_scmd_extra_t *ep = scsi_cmd_priv(scp);
2611
2612                 if (ep->inj_short)
2613                         num /= 2;
2614         }
2615
2616         /* inline check_device_access_params() */
2617         if (lba + num > sdebug_capacity) {
2618                 mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
2619                 return check_condition_result;
2620         }
2621         /* transfer length excessive (tie in to block limits VPD page) */
2622         if (num > sdebug_store_sectors) {
2623                 /* needs work to find which cdb byte 'num' comes from */
2624                 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
2625                 return check_condition_result;
2626         }
2627
2628         if ((SCSI_DEBUG_OPT_MEDIUM_ERR & scsi_debug_opts) &&
2629             (lba <= (OPT_MEDIUM_ERR_ADDR + OPT_MEDIUM_ERR_NUM - 1)) &&
2630             ((lba + num) > OPT_MEDIUM_ERR_ADDR)) {
2631                 /* claim unrecoverable read error */
2632                 mk_sense_buffer(scp, MEDIUM_ERROR, UNRECOVERED_READ_ERR, 0);
2633                 /* set info field and valid bit for fixed descriptor */
2634                 if (0x70 == (scp->sense_buffer[0] & 0x7f)) {
2635                         scp->sense_buffer[0] |= 0x80;   /* Valid bit */
2636                         ret = (lba < OPT_MEDIUM_ERR_ADDR)
2637                               ? OPT_MEDIUM_ERR_ADDR : (int)lba;
2638                         put_unaligned_be32(ret, scp->sense_buffer + 3);
2639                 }
2640                 scsi_set_resid(scp, scsi_bufflen(scp));
2641                 return check_condition_result;
2642         }
2643
2644         read_lock_irqsave(&atomic_rw, iflags);
2645
2646         /* DIX + T10 DIF */
2647         if (scsi_debug_dix && scsi_prot_sg_count(scp)) {
2648                 int prot_ret = prot_verify_read(scp, lba, num, ei_lba);
2649
2650                 if (prot_ret) {
2651                         read_unlock_irqrestore(&atomic_rw, iflags);
2652                         mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, prot_ret);
2653                         return illegal_condition_result;
2654                 }
2655         }
2656
2657         ret = do_device_access(scp, lba, num, false);
2658         read_unlock_irqrestore(&atomic_rw, iflags);
2659         if (ret == -1)
2660                 return DID_ERROR << 16;
2661
2662         scsi_in(scp)->resid = scsi_bufflen(scp) - ret;
2663
2664         if (sdebug_any_injecting_opt) {
2665                 struct sdebug_scmd_extra_t *ep = scsi_cmd_priv(scp);
2666
2667                 if (ep->inj_recovered) {
2668                         mk_sense_buffer(scp, RECOVERED_ERROR,
2669                                         THRESHOLD_EXCEEDED, 0);
2670                         return check_condition_result;
2671                 } else if (ep->inj_transport) {
2672                         mk_sense_buffer(scp, ABORTED_COMMAND,
2673                                         TRANSPORT_PROBLEM, ACK_NAK_TO);
2674                         return check_condition_result;
2675                 } else if (ep->inj_dif) {
2676                         /* Logical block guard check failed */
2677                         mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1);
2678                         return illegal_condition_result;
2679                 } else if (ep->inj_dix) {
2680                         mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1);
2681                         return illegal_condition_result;
2682                 }
2683         }
2684         return 0;
2685 }
2686
2687 void dump_sector(unsigned char *buf, int len)
2688 {
2689         int i, j, n;
2690
2691         pr_err(">>> Sector Dump <<<\n");
2692         for (i = 0 ; i < len ; i += 16) {
2693                 char b[128];
2694
2695                 for (j = 0, n = 0; j < 16; j++) {
2696                         unsigned char c = buf[i+j];
2697
2698                         if (c >= 0x20 && c < 0x7e)
2699                                 n += scnprintf(b + n, sizeof(b) - n,
2700                                                " %c ", buf[i+j]);
2701                         else
2702                                 n += scnprintf(b + n, sizeof(b) - n,
2703                                                "%02x ", buf[i+j]);
2704                 }
2705                 pr_err("%04d: %s\n", i, b);
2706         }
2707 }
2708
2709 static int prot_verify_write(struct scsi_cmnd *SCpnt, sector_t start_sec,
2710                              unsigned int sectors, u32 ei_lba)
2711 {
2712         int ret;
2713         struct sd_dif_tuple *sdt;
2714         void *daddr;
2715         sector_t sector = start_sec;
2716         int ppage_offset;
2717         int dpage_offset;
2718         struct sg_mapping_iter diter;
2719         struct sg_mapping_iter piter;
2720
2721         BUG_ON(scsi_sg_count(SCpnt) == 0);
2722         BUG_ON(scsi_prot_sg_count(SCpnt) == 0);
2723
2724         sg_miter_start(&piter, scsi_prot_sglist(SCpnt),
2725                         scsi_prot_sg_count(SCpnt),
2726                         SG_MITER_ATOMIC | SG_MITER_FROM_SG);
2727         sg_miter_start(&diter, scsi_sglist(SCpnt), scsi_sg_count(SCpnt),
2728                         SG_MITER_ATOMIC | SG_MITER_FROM_SG);
2729
2730         /* For each protection page */
2731         while (sg_miter_next(&piter)) {
2732                 dpage_offset = 0;
2733                 if (WARN_ON(!sg_miter_next(&diter))) {
2734                         ret = 0x01;
2735                         goto out;
2736                 }
2737
2738                 for (ppage_offset = 0; ppage_offset < piter.length;
2739                      ppage_offset += sizeof(struct sd_dif_tuple)) {
2740                         /* If we're at the end of the current
2741                          * data page advance to the next one
2742                          */
2743                         if (dpage_offset >= diter.length) {
2744                                 if (WARN_ON(!sg_miter_next(&diter))) {
2745                                         ret = 0x01;
2746                                         goto out;
2747                                 }
2748                                 dpage_offset = 0;
2749                         }
2750
2751                         sdt = piter.addr + ppage_offset;
2752                         daddr = diter.addr + dpage_offset;
2753
2754                         ret = dif_verify(sdt, daddr, sector, ei_lba);
2755                         if (ret) {
2756                                 dump_sector(daddr, scsi_debug_sector_size);
2757                                 goto out;
2758                         }
2759
2760                         sector++;
2761                         ei_lba++;
2762                         dpage_offset += scsi_debug_sector_size;
2763                 }
2764                 diter.consumed = dpage_offset;
2765                 sg_miter_stop(&diter);
2766         }
2767         sg_miter_stop(&piter);
2768
2769         dif_copy_prot(SCpnt, start_sec, sectors, false);
2770         dix_writes++;
2771
2772         return 0;
2773
2774 out:
2775         dif_errors++;
2776         sg_miter_stop(&diter);
2777         sg_miter_stop(&piter);
2778         return ret;
2779 }
2780
2781 static unsigned long lba_to_map_index(sector_t lba)
2782 {
2783         if (scsi_debug_unmap_alignment) {
2784                 lba += scsi_debug_unmap_granularity -
2785                         scsi_debug_unmap_alignment;
2786         }
2787         do_div(lba, scsi_debug_unmap_granularity);
2788
2789         return lba;
2790 }
2791
2792 static sector_t map_index_to_lba(unsigned long index)
2793 {
2794         sector_t lba = index * scsi_debug_unmap_granularity;
2795
2796         if (scsi_debug_unmap_alignment) {
2797                 lba -= scsi_debug_unmap_granularity -
2798                         scsi_debug_unmap_alignment;
2799         }
2800
2801         return lba;
2802 }
2803
2804 static unsigned int map_state(sector_t lba, unsigned int *num)
2805 {
2806         sector_t end;
2807         unsigned int mapped;
2808         unsigned long index;
2809         unsigned long next;
2810
2811         index = lba_to_map_index(lba);
2812         mapped = test_bit(index, map_storep);
2813
2814         if (mapped)
2815                 next = find_next_zero_bit(map_storep, map_size, index);
2816         else
2817                 next = find_next_bit(map_storep, map_size, index);
2818
2819         end = min_t(sector_t, sdebug_store_sectors,  map_index_to_lba(next));
2820         *num = end - lba;
2821
2822         return mapped;
2823 }
2824
2825 static void map_region(sector_t lba, unsigned int len)
2826 {
2827         sector_t end = lba + len;
2828
2829         while (lba < end) {
2830                 unsigned long index = lba_to_map_index(lba);
2831
2832                 if (index < map_size)
2833                         set_bit(index, map_storep);
2834
2835                 lba = map_index_to_lba(index + 1);
2836         }
2837 }
2838
2839 static void unmap_region(sector_t lba, unsigned int len)
2840 {
2841         sector_t end = lba + len;
2842
2843         while (lba < end) {
2844                 unsigned long index = lba_to_map_index(lba);
2845
2846                 if (lba == map_index_to_lba(index) &&
2847                     lba + scsi_debug_unmap_granularity <= end &&
2848                     index < map_size) {
2849                         clear_bit(index, map_storep);
2850                         if (scsi_debug_lbprz) {
2851                                 memset(fake_storep +
2852                                        lba * scsi_debug_sector_size, 0,
2853                                        scsi_debug_sector_size *
2854                                        scsi_debug_unmap_granularity);
2855                         }
2856                         if (dif_storep) {
2857                                 memset(dif_storep + lba, 0xff,
2858                                        sizeof(*dif_storep) *
2859                                        scsi_debug_unmap_granularity);
2860                         }
2861                 }
2862                 lba = map_index_to_lba(index + 1);
2863         }
2864 }
2865
2866 static int
2867 resp_write_dt0(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
2868 {
2869         u8 *cmd = scp->cmnd;
2870         u64 lba;
2871         u32 num;
2872         u32 ei_lba;
2873         unsigned long iflags;
2874         int ret;
2875         bool check_prot;
2876
2877         switch (cmd[0]) {
2878         case WRITE_16:
2879                 ei_lba = 0;
2880                 lba = get_unaligned_be64(cmd + 2);
2881                 num = get_unaligned_be32(cmd + 10);
2882                 check_prot = true;
2883                 break;
2884         case WRITE_10:
2885                 ei_lba = 0;
2886                 lba = get_unaligned_be32(cmd + 2);
2887                 num = get_unaligned_be16(cmd + 7);
2888                 check_prot = true;
2889                 break;
2890         case WRITE_6:
2891                 ei_lba = 0;
2892                 lba = (u32)cmd[3] | (u32)cmd[2] << 8 |
2893                       (u32)(cmd[1] & 0x1f) << 16;
2894                 num = (0 == cmd[4]) ? 256 : cmd[4];
2895                 check_prot = true;
2896                 break;
2897         case WRITE_12:
2898                 ei_lba = 0;
2899                 lba = get_unaligned_be32(cmd + 2);
2900                 num = get_unaligned_be32(cmd + 6);
2901                 check_prot = true;
2902                 break;
2903         case 0x53:      /* XDWRITEREAD(10) */
2904                 ei_lba = 0;
2905                 lba = get_unaligned_be32(cmd + 2);
2906                 num = get_unaligned_be16(cmd + 7);
2907                 check_prot = false;
2908                 break;
2909         default:        /* assume WRITE(32) */
2910                 lba = get_unaligned_be64(cmd + 12);
2911                 ei_lba = get_unaligned_be32(cmd + 20);
2912                 num = get_unaligned_be32(cmd + 28);
2913                 check_prot = false;
2914                 break;
2915         }
2916         if (check_prot) {
2917                 if (scsi_debug_dif == SD_DIF_TYPE2_PROTECTION &&
2918                     (cmd[1] & 0xe0)) {
2919                         mk_sense_invalid_opcode(scp);
2920                         return check_condition_result;
2921                 }
2922                 if ((scsi_debug_dif == SD_DIF_TYPE1_PROTECTION ||
2923                      scsi_debug_dif == SD_DIF_TYPE3_PROTECTION) &&
2924                     (cmd[1] & 0xe0) == 0)
2925                         sdev_printk(KERN_ERR, scp->device, "Unprotected WR "
2926                                     "to DIF device\n");
2927         }
2928
2929         /* inline check_device_access_params() */
2930         if (lba + num > sdebug_capacity) {
2931                 mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
2932                 return check_condition_result;
2933         }
2934         /* transfer length excessive (tie in to block limits VPD page) */
2935         if (num > sdebug_store_sectors) {
2936                 /* needs work to find which cdb byte 'num' comes from */
2937                 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
2938                 return check_condition_result;
2939         }
2940
2941         write_lock_irqsave(&atomic_rw, iflags);
2942
2943         /* DIX + T10 DIF */
2944         if (scsi_debug_dix && scsi_prot_sg_count(scp)) {
2945                 int prot_ret = prot_verify_write(scp, lba, num, ei_lba);
2946
2947                 if (prot_ret) {
2948                         write_unlock_irqrestore(&atomic_rw, iflags);
2949                         mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, prot_ret);
2950                         return illegal_condition_result;
2951                 }
2952         }
2953
2954         ret = do_device_access(scp, lba, num, true);
2955         if (scsi_debug_lbp())
2956                 map_region(lba, num);
2957         write_unlock_irqrestore(&atomic_rw, iflags);
2958         if (-1 == ret)
2959                 return (DID_ERROR << 16);
2960         else if ((ret < (num * scsi_debug_sector_size)) &&
2961                  (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts))
2962                 sdev_printk(KERN_INFO, scp->device,
2963                             "%s: write: cdb indicated=%u, IO sent=%d bytes\n",
2964                             my_name, num * scsi_debug_sector_size, ret);
2965
2966         if (sdebug_any_injecting_opt) {
2967                 struct sdebug_scmd_extra_t *ep = scsi_cmd_priv(scp);
2968
2969                 if (ep->inj_recovered) {
2970                         mk_sense_buffer(scp, RECOVERED_ERROR,
2971                                         THRESHOLD_EXCEEDED, 0);
2972                         return check_condition_result;
2973                 } else if (ep->inj_dif) {
2974                         /* Logical block guard check failed */
2975                         mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1);
2976                         return illegal_condition_result;
2977                 } else if (ep->inj_dix) {
2978                         mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1);
2979                         return illegal_condition_result;
2980                 }
2981         }
2982         return 0;
2983 }
2984
2985 static int
2986 resp_write_same(struct scsi_cmnd *scp, u64 lba, u32 num, u32 ei_lba,
2987                 bool unmap, bool ndob)
2988 {
2989         unsigned long iflags;
2990         unsigned long long i;
2991         int ret;
2992
2993         ret = check_device_access_params(scp, lba, num);
2994         if (ret)
2995                 return ret;
2996
2997         write_lock_irqsave(&atomic_rw, iflags);
2998
2999         if (unmap && scsi_debug_lbp()) {
3000                 unmap_region(lba, num);
3001                 goto out;
3002         }
3003
3004         /* if ndob then zero 1 logical block, else fetch 1 logical block */
3005         if (ndob) {
3006                 memset(fake_storep + (lba * scsi_debug_sector_size), 0,
3007                        scsi_debug_sector_size);
3008                 ret = 0;
3009         } else
3010                 ret = fetch_to_dev_buffer(scp, fake_storep +
3011                                                (lba * scsi_debug_sector_size),
3012                                           scsi_debug_sector_size);
3013
3014         if (-1 == ret) {
3015                 write_unlock_irqrestore(&atomic_rw, iflags);
3016                 return (DID_ERROR << 16);
3017         } else if ((ret < (num * scsi_debug_sector_size)) &&
3018                  (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts))
3019                 sdev_printk(KERN_INFO, scp->device,
3020                             "%s: %s: cdb indicated=%u, IO sent=%d bytes\n",
3021                             my_name, "write same",
3022                             num * scsi_debug_sector_size, ret);
3023
3024         /* Copy first sector to remaining blocks */
3025         for (i = 1 ; i < num ; i++)
3026                 memcpy(fake_storep + ((lba + i) * scsi_debug_sector_size),
3027                        fake_storep + (lba * scsi_debug_sector_size),
3028                        scsi_debug_sector_size);
3029
3030         if (scsi_debug_lbp())
3031                 map_region(lba, num);
3032 out:
3033         write_unlock_irqrestore(&atomic_rw, iflags);
3034
3035         return 0;
3036 }
3037
3038 static int
3039 resp_write_same_10(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
3040 {
3041         u8 *cmd = scp->cmnd;
3042         u32 lba;
3043         u16 num;
3044         u32 ei_lba = 0;
3045         bool unmap = false;
3046
3047         if (cmd[1] & 0x8) {
3048                 if (scsi_debug_lbpws10 == 0) {
3049                         mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 3);
3050                         return check_condition_result;
3051                 } else
3052                         unmap = true;
3053         }
3054         lba = get_unaligned_be32(cmd + 2);
3055         num = get_unaligned_be16(cmd + 7);
3056         if (num > scsi_debug_write_same_length) {
3057                 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 7, -1);
3058                 return check_condition_result;
3059         }
3060         return resp_write_same(scp, lba, num, ei_lba, unmap, false);
3061 }
3062
3063 static int
3064 resp_write_same_16(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
3065 {
3066         u8 *cmd = scp->cmnd;
3067         u64 lba;
3068         u32 num;
3069         u32 ei_lba = 0;
3070         bool unmap = false;
3071         bool ndob = false;
3072
3073         if (cmd[1] & 0x8) {     /* UNMAP */
3074                 if (scsi_debug_lbpws == 0) {
3075                         mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 3);
3076                         return check_condition_result;
3077                 } else
3078                         unmap = true;
3079         }
3080         if (cmd[1] & 0x1)  /* NDOB (no data-out buffer, assumes zeroes) */
3081                 ndob = true;
3082         lba = get_unaligned_be64(cmd + 2);
3083         num = get_unaligned_be32(cmd + 10);
3084         if (num > scsi_debug_write_same_length) {
3085                 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 10, -1);
3086                 return check_condition_result;
3087         }
3088         return resp_write_same(scp, lba, num, ei_lba, unmap, ndob);
3089 }
3090
3091 /* Note the mode field is in the same position as the (lower) service action
3092  * field. For the Report supported operation codes command, SPC-4 suggests
3093  * each mode of this command should be reported separately; for future. */
3094 static int
3095 resp_write_buffer(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
3096 {
3097         u8 *cmd = scp->cmnd;
3098         struct scsi_device *sdp = scp->device;
3099         struct sdebug_dev_info *dp;
3100         u8 mode;
3101
3102         mode = cmd[1] & 0x1f;
3103         switch (mode) {
3104         case 0x4:       /* download microcode (MC) and activate (ACT) */
3105                 /* set UAs on this device only */
3106                 set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
3107                 set_bit(SDEBUG_UA_MICROCODE_CHANGED, devip->uas_bm);
3108                 break;
3109         case 0x5:       /* download MC, save and ACT */
3110                 set_bit(SDEBUG_UA_MICROCODE_CHANGED_WO_RESET, devip->uas_bm);
3111                 break;
3112         case 0x6:       /* download MC with offsets and ACT */
3113                 /* set UAs on most devices (LUs) in this target */
3114                 list_for_each_entry(dp,
3115                                     &devip->sdbg_host->dev_info_list,
3116                                     dev_list)
3117                         if (dp->target == sdp->id) {
3118                                 set_bit(SDEBUG_UA_BUS_RESET, dp->uas_bm);
3119                                 if (devip != dp)
3120                                         set_bit(SDEBUG_UA_MICROCODE_CHANGED,
3121                                                 dp->uas_bm);
3122                         }
3123                 break;
3124         case 0x7:       /* download MC with offsets, save, and ACT */
3125                 /* set UA on all devices (LUs) in this target */
3126                 list_for_each_entry(dp,
3127                                     &devip->sdbg_host->dev_info_list,
3128                                     dev_list)
3129                         if (dp->target == sdp->id)
3130                                 set_bit(SDEBUG_UA_MICROCODE_CHANGED_WO_RESET,
3131                                         dp->uas_bm);
3132                 break;
3133         default:
3134                 /* do nothing for this command for other mode values */
3135                 break;
3136         }
3137         return 0;
3138 }
3139
3140 static int
3141 resp_comp_write(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
3142 {
3143         u8 *cmd = scp->cmnd;
3144         u8 *arr;
3145         u8 *fake_storep_hold;
3146         u64 lba;
3147         u32 dnum;
3148         u32 lb_size = scsi_debug_sector_size;
3149         u8 num;
3150         unsigned long iflags;
3151         int ret;
3152         int retval = 0;
3153
3154         lba = get_unaligned_be64(cmd + 2);
3155         num = cmd[13];          /* 1 to a maximum of 255 logical blocks */
3156         if (0 == num)
3157                 return 0;       /* degenerate case, not an error */
3158         if (scsi_debug_dif == SD_DIF_TYPE2_PROTECTION &&
3159             (cmd[1] & 0xe0)) {
3160                 mk_sense_invalid_opcode(scp);
3161                 return check_condition_result;
3162         }
3163         if ((scsi_debug_dif == SD_DIF_TYPE1_PROTECTION ||
3164              scsi_debug_dif == SD_DIF_TYPE3_PROTECTION) &&
3165             (cmd[1] & 0xe0) == 0)
3166                 sdev_printk(KERN_ERR, scp->device, "Unprotected WR "
3167                             "to DIF device\n");
3168
3169         /* inline check_device_access_params() */
3170         if (lba + num > sdebug_capacity) {
3171                 mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
3172                 return check_condition_result;
3173         }
3174         /* transfer length excessive (tie in to block limits VPD page) */
3175         if (num > sdebug_store_sectors) {
3176                 /* needs work to find which cdb byte 'num' comes from */
3177                 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
3178                 return check_condition_result;
3179         }
3180         dnum = 2 * num;
3181         arr = kzalloc(dnum * lb_size, GFP_ATOMIC);
3182         if (NULL == arr) {
3183                 mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
3184                                 INSUFF_RES_ASCQ);
3185                 return check_condition_result;
3186         }
3187
3188         write_lock_irqsave(&atomic_rw, iflags);
3189
3190         /* trick do_device_access() to fetch both compare and write buffers
3191          * from data-in into arr. Safe (atomic) since write_lock held. */
3192         fake_storep_hold = fake_storep;
3193         fake_storep = arr;
3194         ret = do_device_access(scp, 0, dnum, true);
3195         fake_storep = fake_storep_hold;
3196         if (ret == -1) {
3197                 retval = DID_ERROR << 16;
3198                 goto cleanup;
3199         } else if ((ret < (dnum * lb_size)) &&
3200                  (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts))
3201                 sdev_printk(KERN_INFO, scp->device, "%s: compare_write: cdb "
3202                             "indicated=%u, IO sent=%d bytes\n", my_name,
3203                             dnum * lb_size, ret);
3204         if (!comp_write_worker(lba, num, arr)) {
3205                 mk_sense_buffer(scp, MISCOMPARE, MISCOMPARE_VERIFY_ASC, 0);
3206                 retval = check_condition_result;
3207                 goto cleanup;
3208         }
3209         if (scsi_debug_lbp())
3210                 map_region(lba, num);
3211 cleanup:
3212         write_unlock_irqrestore(&atomic_rw, iflags);
3213         kfree(arr);
3214         return retval;
3215 }
3216
3217 struct unmap_block_desc {
3218         __be64  lba;
3219         __be32  blocks;
3220         __be32  __reserved;
3221 };
3222
3223 static int
3224 resp_unmap(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
3225 {
3226         unsigned char *buf;
3227         struct unmap_block_desc *desc;
3228         unsigned int i, payload_len, descriptors;
3229         int ret;
3230         unsigned long iflags;
3231
3232
3233         if (!scsi_debug_lbp())
3234                 return 0;       /* fib and say its done */
3235         payload_len = get_unaligned_be16(scp->cmnd + 7);
3236         BUG_ON(scsi_bufflen(scp) != payload_len);
3237
3238         descriptors = (payload_len - 8) / 16;
3239         if (descriptors > scsi_debug_unmap_max_desc) {
3240                 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 7, -1);
3241                 return check_condition_result;
3242         }
3243
3244         buf = kmalloc(scsi_bufflen(scp), GFP_ATOMIC);
3245         if (!buf) {
3246                 mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
3247                                 INSUFF_RES_ASCQ);
3248                 return check_condition_result;
3249         }
3250
3251         scsi_sg_copy_to_buffer(scp, buf, scsi_bufflen(scp));
3252
3253         BUG_ON(get_unaligned_be16(&buf[0]) != payload_len - 2);
3254         BUG_ON(get_unaligned_be16(&buf[2]) != descriptors * 16);
3255
3256         desc = (void *)&buf[8];
3257
3258         write_lock_irqsave(&atomic_rw, iflags);
3259
3260         for (i = 0 ; i < descriptors ; i++) {
3261                 unsigned long long lba = get_unaligned_be64(&desc[i].lba);
3262                 unsigned int num = get_unaligned_be32(&desc[i].blocks);
3263
3264                 ret = check_device_access_params(scp, lba, num);
3265                 if (ret)
3266                         goto out;
3267
3268                 unmap_region(lba, num);
3269         }
3270
3271         ret = 0;
3272
3273 out:
3274         write_unlock_irqrestore(&atomic_rw, iflags);
3275         kfree(buf);
3276
3277         return ret;
3278 }
3279
3280 #define SDEBUG_GET_LBA_STATUS_LEN 32
3281
3282 static int
3283 resp_get_lba_status(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
3284 {
3285         u8 *cmd = scp->cmnd;
3286         u64 lba;
3287         u32 alloc_len, mapped, num;
3288         u8 arr[SDEBUG_GET_LBA_STATUS_LEN];
3289         int ret;
3290
3291         lba = get_unaligned_be64(cmd + 2);
3292         alloc_len = get_unaligned_be32(cmd + 10);
3293
3294         if (alloc_len < 24)
3295                 return 0;
3296
3297         ret = check_device_access_params(scp, lba, 1);
3298         if (ret)
3299                 return ret;
3300
3301         if (scsi_debug_lbp())
3302                 mapped = map_state(lba, &num);
3303         else {
3304                 mapped = 1;
3305                 /* following just in case virtual_gb changed */
3306                 sdebug_capacity = get_sdebug_capacity();
3307                 if (sdebug_capacity - lba <= 0xffffffff)
3308                         num = sdebug_capacity - lba;
3309                 else
3310                         num = 0xffffffff;
3311         }
3312
3313         memset(arr, 0, SDEBUG_GET_LBA_STATUS_LEN);
3314         put_unaligned_be32(20, arr);            /* Parameter Data Length */
3315         put_unaligned_be64(lba, arr + 8);       /* LBA */
3316         put_unaligned_be32(num, arr + 16);      /* Number of blocks */
3317         arr[20] = !mapped;              /* prov_stat=0: mapped; 1: dealloc */
3318
3319         return fill_from_dev_buffer(scp, arr, SDEBUG_GET_LBA_STATUS_LEN);
3320 }
3321
3322 #define SDEBUG_RLUN_ARR_SZ 256
3323
3324 static int resp_report_luns(struct scsi_cmnd * scp,
3325                             struct sdebug_dev_info * devip)
3326 {
3327         unsigned int alloc_len;
3328         int lun_cnt, i, upper, num, n, want_wlun, shortish;
3329         u64 lun;
3330         unsigned char *cmd = scp->cmnd;
3331         int select_report = (int)cmd[2];
3332         struct scsi_lun *one_lun;
3333         unsigned char arr[SDEBUG_RLUN_ARR_SZ];
3334         unsigned char * max_addr;
3335
3336         clear_luns_changed_on_target(devip);
3337         alloc_len = cmd[9] + (cmd[8] << 8) + (cmd[7] << 16) + (cmd[6] << 24);
3338         shortish = (alloc_len < 4);
3339         if (shortish || (select_report > 2)) {
3340                 mk_sense_invalid_fld(scp, SDEB_IN_CDB, shortish ? 6 : 2, -1);
3341                 return check_condition_result;
3342         }
3343         /* can produce response with up to 16k luns (lun 0 to lun 16383) */
3344         memset(arr, 0, SDEBUG_RLUN_ARR_SZ);
3345         lun_cnt = scsi_debug_max_luns;
3346         if (1 == select_report)
3347                 lun_cnt = 0;
3348         else if (scsi_debug_no_lun_0 && (lun_cnt > 0))
3349                 --lun_cnt;
3350         want_wlun = (select_report > 0) ? 1 : 0;
3351         num = lun_cnt + want_wlun;
3352         arr[2] = ((sizeof(struct scsi_lun) * num) >> 8) & 0xff;
3353         arr[3] = (sizeof(struct scsi_lun) * num) & 0xff;
3354         n = min((int)((SDEBUG_RLUN_ARR_SZ - 8) /
3355                             sizeof(struct scsi_lun)), num);
3356         if (n < num) {
3357                 want_wlun = 0;
3358                 lun_cnt = n;
3359         }
3360         one_lun = (struct scsi_lun *) &arr[8];
3361         max_addr = arr + SDEBUG_RLUN_ARR_SZ;
3362         for (i = 0, lun = (scsi_debug_no_lun_0 ? 1 : 0);
3363              ((i < lun_cnt) && ((unsigned char *)(one_lun + i) < max_addr));
3364              i++, lun++) {
3365                 upper = (lun >> 8) & 0x3f;
3366                 if (upper)
3367                         one_lun[i].scsi_lun[0] =
3368                             (upper | (SAM2_LUN_ADDRESS_METHOD << 6));
3369                 one_lun[i].scsi_lun[1] = lun & 0xff;
3370         }
3371         if (want_wlun) {
3372                 one_lun[i].scsi_lun[0] = (SAM2_WLUN_REPORT_LUNS >> 8) & 0xff;
3373                 one_lun[i].scsi_lun[1] = SAM2_WLUN_REPORT_LUNS & 0xff;
3374                 i++;
3375         }
3376         alloc_len = (unsigned char *)(one_lun + i) - arr;
3377         return fill_from_dev_buffer(scp, arr,
3378                                     min((int)alloc_len, SDEBUG_RLUN_ARR_SZ));
3379 }
3380
3381 static int resp_xdwriteread(struct scsi_cmnd *scp, unsigned long long lba,
3382                             unsigned int num, struct sdebug_dev_info *devip)
3383 {
3384         int j;
3385         unsigned char *kaddr, *buf;
3386         unsigned int offset;
3387         struct scsi_data_buffer *sdb = scsi_in(scp);
3388         struct sg_mapping_iter miter;
3389
3390         /* better not to use temporary buffer. */
3391         buf = kmalloc(scsi_bufflen(scp), GFP_ATOMIC);
3392         if (!buf) {
3393                 mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
3394                                 INSUFF_RES_ASCQ);
3395                 return check_condition_result;
3396         }
3397
3398         scsi_sg_copy_to_buffer(scp, buf, scsi_bufflen(scp));
3399
3400         offset = 0;
3401         sg_miter_start(&miter, sdb->table.sgl, sdb->table.nents,
3402                         SG_MITER_ATOMIC | SG_MITER_TO_SG);
3403
3404         while (sg_miter_next(&miter)) {
3405                 kaddr = miter.addr;
3406                 for (j = 0; j < miter.length; j++)
3407                         *(kaddr + j) ^= *(buf + offset + j);
3408
3409                 offset += miter.length;
3410         }
3411         sg_miter_stop(&miter);
3412         kfree(buf);
3413
3414         return 0;
3415 }
3416
3417 static int
3418 resp_xdwriteread_10(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
3419 {
3420         u8 *cmd = scp->cmnd;
3421         u64 lba;
3422         u32 num;
3423         int errsts;
3424
3425         if (!scsi_bidi_cmnd(scp)) {
3426                 mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
3427                                 INSUFF_RES_ASCQ);
3428                 return check_condition_result;
3429         }
3430         errsts = resp_read_dt0(scp, devip);
3431         if (errsts)
3432                 return errsts;
3433         if (!(cmd[1] & 0x4)) {          /* DISABLE_WRITE is not set */
3434                 errsts = resp_write_dt0(scp, devip);
3435                 if (errsts)
3436                         return errsts;
3437         }
3438         lba = get_unaligned_be32(cmd + 2);
3439         num = get_unaligned_be16(cmd + 7);
3440         return resp_xdwriteread(scp, lba, num, devip);
3441 }
3442
3443 /* When timer or tasklet goes off this function is called. */
3444 static void sdebug_q_cmd_complete(unsigned long indx)
3445 {
3446         int qa_indx;
3447         int retiring = 0;
3448         unsigned long iflags;
3449         struct sdebug_queued_cmd *sqcp;
3450         struct scsi_cmnd *scp;
3451         struct sdebug_dev_info *devip;
3452
3453         atomic_inc(&sdebug_completions);
3454         qa_indx = indx;
3455         if ((qa_indx < 0) || (qa_indx >= SCSI_DEBUG_CANQUEUE)) {
3456                 pr_err("%s: wild qa_indx=%d\n", __func__, qa_indx);
3457                 return;
3458         }
3459         spin_lock_irqsave(&queued_arr_lock, iflags);
3460         sqcp = &queued_arr[qa_indx];
3461         scp = sqcp->a_cmnd;
3462         if (NULL == scp) {
3463                 spin_unlock_irqrestore(&queued_arr_lock, iflags);
3464                 pr_err("%s: scp is NULL\n", __func__);
3465                 return;
3466         }
3467         devip = (struct sdebug_dev_info *)scp->device->hostdata;
3468         if (devip)
3469                 atomic_dec(&devip->num_in_q);
3470         else
3471                 pr_err("%s: devip=NULL\n", __func__);
3472         if (atomic_read(&retired_max_queue) > 0)
3473                 retiring = 1;
3474
3475         sqcp->a_cmnd = NULL;
3476         if (!test_and_clear_bit(qa_indx, queued_in_use_bm)) {
3477                 spin_unlock_irqrestore(&queued_arr_lock, iflags);
3478                 pr_err("%s: Unexpected completion\n", __func__);
3479                 return;
3480         }
3481
3482         if (unlikely(retiring)) {       /* user has reduced max_queue */
3483                 int k, retval;
3484
3485                 retval = atomic_read(&retired_max_queue);
3486                 if (qa_indx >= retval) {
3487                         spin_unlock_irqrestore(&queued_arr_lock, iflags);
3488                         pr_err("%s: index %d too large\n", __func__, retval);
3489                         return;
3490                 }
3491                 k = find_last_bit(queued_in_use_bm, retval);
3492                 if ((k < scsi_debug_max_queue) || (k == retval))
3493                         atomic_set(&retired_max_queue, 0);
3494                 else
3495                         atomic_set(&retired_max_queue, k + 1);
3496         }
3497         spin_unlock_irqrestore(&queued_arr_lock, iflags);
3498         scp->scsi_done(scp); /* callback to mid level */
3499 }
3500
3501 /* When high resolution timer goes off this function is called. */
3502 static enum hrtimer_restart
3503 sdebug_q_cmd_hrt_complete(struct hrtimer *timer)
3504 {
3505         int qa_indx;
3506         int retiring = 0;
3507         unsigned long iflags;
3508         struct sdebug_hrtimer *sd_hrtp = (struct sdebug_hrtimer *)timer;
3509         struct sdebug_queued_cmd *sqcp;
3510         struct scsi_cmnd *scp;
3511         struct sdebug_dev_info *devip;
3512
3513         atomic_inc(&sdebug_completions);
3514         qa_indx = sd_hrtp->qa_indx;
3515         if ((qa_indx < 0) || (qa_indx >= SCSI_DEBUG_CANQUEUE)) {
3516                 pr_err("%s: wild qa_indx=%d\n", __func__, qa_indx);
3517                 goto the_end;
3518         }
3519         spin_lock_irqsave(&queued_arr_lock, iflags);
3520         sqcp = &queued_arr[qa_indx];
3521         scp = sqcp->a_cmnd;
3522         if (NULL == scp) {
3523                 spin_unlock_irqrestore(&queued_arr_lock, iflags);
3524                 pr_err("%s: scp is NULL\n", __func__);
3525                 goto the_end;
3526         }
3527         devip = (struct sdebug_dev_info *)scp->device->hostdata;
3528         if (devip)
3529                 atomic_dec(&devip->num_in_q);
3530         else
3531                 pr_err("%s: devip=NULL\n", __func__);
3532         if (atomic_read(&retired_max_queue) > 0)
3533                 retiring = 1;
3534
3535         sqcp->a_cmnd = NULL;
3536         if (!test_and_clear_bit(qa_indx, queued_in_use_bm)) {
3537                 spin_unlock_irqrestore(&queued_arr_lock, iflags);
3538                 pr_err("%s: Unexpected completion\n", __func__);
3539                 goto the_end;
3540         }
3541
3542         if (unlikely(retiring)) {       /* user has reduced max_queue */
3543                 int k, retval;
3544
3545                 retval = atomic_read(&retired_max_queue);
3546                 if (qa_indx >= retval) {
3547                         spin_unlock_irqrestore(&queued_arr_lock, iflags);
3548                         pr_err("%s: index %d too large\n", __func__, retval);
3549                         goto the_end;
3550                 }
3551                 k = find_last_bit(queued_in_use_bm, retval);
3552                 if ((k < scsi_debug_max_queue) || (k == retval))
3553                         atomic_set(&retired_max_queue, 0);
3554                 else
3555                         atomic_set(&retired_max_queue, k + 1);
3556         }
3557         spin_unlock_irqrestore(&queued_arr_lock, iflags);
3558         scp->scsi_done(scp); /* callback to mid level */
3559 the_end:
3560         return HRTIMER_NORESTART;
3561 }
3562
3563 static struct sdebug_dev_info *
3564 sdebug_device_create(struct sdebug_host_info *sdbg_host, gfp_t flags)
3565 {
3566         struct sdebug_dev_info *devip;
3567
3568         devip = kzalloc(sizeof(*devip), flags);
3569         if (devip) {
3570                 devip->sdbg_host = sdbg_host;
3571                 list_add_tail(&devip->dev_list, &sdbg_host->dev_info_list);
3572         }
3573         return devip;
3574 }
3575
3576 static struct sdebug_dev_info * devInfoReg(struct scsi_device * sdev)
3577 {
3578         struct sdebug_host_info * sdbg_host;
3579         struct sdebug_dev_info * open_devip = NULL;
3580         struct sdebug_dev_info * devip =
3581                         (struct sdebug_dev_info *)sdev->hostdata;
3582
3583         if (devip)
3584                 return devip;
3585         sdbg_host = *(struct sdebug_host_info **)shost_priv(sdev->host);
3586         if (!sdbg_host) {
3587                 pr_err("%s: Host info NULL\n", __func__);
3588                 return NULL;
3589         }
3590         list_for_each_entry(devip, &sdbg_host->dev_info_list, dev_list) {
3591                 if ((devip->used) && (devip->channel == sdev->channel) &&
3592                     (devip->target == sdev->id) &&
3593                     (devip->lun == sdev->lun))
3594                         return devip;
3595                 else {
3596                         if ((!devip->used) && (!open_devip))
3597                                 open_devip = devip;
3598                 }
3599         }
3600         if (!open_devip) { /* try and make a new one */
3601                 open_devip = sdebug_device_create(sdbg_host, GFP_ATOMIC);
3602                 if (!open_devip) {
3603                         printk(KERN_ERR "%s: out of memory at line %d\n",
3604                                 __func__, __LINE__);
3605                         return NULL;
3606                 }
3607         }
3608
3609         open_devip->channel = sdev->channel;
3610         open_devip->target = sdev->id;
3611         open_devip->lun = sdev->lun;
3612         open_devip->sdbg_host = sdbg_host;
3613         atomic_set(&open_devip->num_in_q, 0);
3614         set_bit(SDEBUG_UA_POR, open_devip->uas_bm);
3615         open_devip->used = true;
3616         return open_devip;
3617 }
3618
3619 static int scsi_debug_slave_alloc(struct scsi_device *sdp)
3620 {
3621         if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
3622                 printk(KERN_INFO "scsi_debug: slave_alloc <%u %u %u %llu>\n",
3623                        sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
3624         queue_flag_set_unlocked(QUEUE_FLAG_BIDI, sdp->request_queue);
3625         return 0;
3626 }
3627
3628 static int scsi_debug_slave_configure(struct scsi_device *sdp)
3629 {
3630         struct sdebug_dev_info *devip;
3631
3632         if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
3633                 printk(KERN_INFO "scsi_debug: slave_configure <%u %u %u %llu>\n",
3634                        sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
3635         if (sdp->host->max_cmd_len != SCSI_DEBUG_MAX_CMD_LEN)
3636                 sdp->host->max_cmd_len = SCSI_DEBUG_MAX_CMD_LEN;
3637         devip = devInfoReg(sdp);
3638         if (NULL == devip)
3639                 return 1;       /* no resources, will be marked offline */
3640         sdp->hostdata = devip;
3641         blk_queue_max_segment_size(sdp->request_queue, -1U);
3642         if (scsi_debug_no_uld)
3643                 sdp->no_uld_attach = 1;
3644         return 0;
3645 }
3646
3647 static void scsi_debug_slave_destroy(struct scsi_device *sdp)
3648 {
3649         struct sdebug_dev_info *devip =
3650                 (struct sdebug_dev_info *)sdp->hostdata;
3651
3652         if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
3653                 printk(KERN_INFO "scsi_debug: slave_destroy <%u %u %u %llu>\n",
3654                        sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
3655         if (devip) {
3656                 /* make this slot available for re-use */
3657                 devip->used = false;
3658                 sdp->hostdata = NULL;
3659         }
3660 }
3661
3662 /* Returns 1 if cmnd found (deletes its timer or tasklet), else returns 0 */
3663 static int stop_queued_cmnd(struct scsi_cmnd *cmnd)
3664 {
3665         unsigned long iflags;
3666         int k, qmax, r_qmax;
3667         struct sdebug_queued_cmd *sqcp;
3668         struct sdebug_dev_info *devip;
3669
3670         spin_lock_irqsave(&queued_arr_lock, iflags);
3671         qmax = scsi_debug_max_queue;
3672         r_qmax = atomic_read(&retired_max_queue);
3673         if (r_qmax > qmax)
3674                 qmax = r_qmax;
3675         for (k = 0; k < qmax; ++k) {
3676                 if (test_bit(k, queued_in_use_bm)) {
3677                         sqcp = &queued_arr[k];
3678                         if (cmnd == sqcp->a_cmnd) {
3679                                 devip = (struct sdebug_dev_info *)
3680                                         cmnd->device->hostdata;
3681                                 if (devip)
3682                                         atomic_dec(&devip->num_in_q);
3683                                 sqcp->a_cmnd = NULL;
3684                                 spin_unlock_irqrestore(&queued_arr_lock,
3685                                                        iflags);
3686                                 if (scsi_debug_ndelay > 0) {
3687                                         if (sqcp->sd_hrtp)
3688                                                 hrtimer_cancel(
3689                                                         &sqcp->sd_hrtp->hrt);
3690                                 } else if (scsi_debug_delay > 0) {
3691                                         if (sqcp->cmnd_timerp)
3692                                                 del_timer_sync(
3693                                                         sqcp->cmnd_timerp);
3694                                 } else if (scsi_debug_delay < 0) {
3695                                         if (sqcp->tletp)
3696                                                 tasklet_kill(sqcp->tletp);
3697                                 }
3698                                 clear_bit(k, queued_in_use_bm);
3699                                 return 1;
3700                         }
3701                 }
3702         }
3703         spin_unlock_irqrestore(&queued_arr_lock, iflags);
3704         return 0;
3705 }
3706
3707 /* Deletes (stops) timers or tasklets of all queued commands */
3708 static void stop_all_queued(void)
3709 {
3710         unsigned long iflags;
3711         int k;
3712         struct sdebug_queued_cmd *sqcp;
3713         struct sdebug_dev_info *devip;
3714
3715         spin_lock_irqsave(&queued_arr_lock, iflags);
3716         for (k = 0; k < SCSI_DEBUG_CANQUEUE; ++k) {
3717                 if (test_bit(k, queued_in_use_bm)) {
3718                         sqcp = &queued_arr[k];
3719                         if (sqcp->a_cmnd) {
3720                                 devip = (struct sdebug_dev_info *)
3721                                         sqcp->a_cmnd->device->hostdata;
3722                                 if (devip)
3723                                         atomic_dec(&devip->num_in_q);
3724                                 sqcp->a_cmnd = NULL;
3725                                 spin_unlock_irqrestore(&queued_arr_lock,
3726                                                        iflags);
3727                                 if (scsi_debug_ndelay > 0) {
3728                                         if (sqcp->sd_hrtp)
3729                                                 hrtimer_cancel(
3730                                                         &sqcp->sd_hrtp->hrt);
3731                                 } else if (scsi_debug_delay > 0) {
3732                                         if (sqcp->cmnd_timerp)
3733                                                 del_timer_sync(
3734                                                         sqcp->cmnd_timerp);
3735                                 } else if (scsi_debug_delay < 0) {
3736                                         if (sqcp->tletp)
3737                                                 tasklet_kill(sqcp->tletp);
3738                                 }
3739                                 clear_bit(k, queued_in_use_bm);
3740                                 spin_lock_irqsave(&queued_arr_lock, iflags);
3741                         }
3742                 }
3743         }
3744         spin_unlock_irqrestore(&queued_arr_lock, iflags);
3745 }
3746
3747 /* Free queued command memory on heap */
3748 static void free_all_queued(void)
3749 {
3750         unsigned long iflags;
3751         int k;
3752         struct sdebug_queued_cmd *sqcp;
3753
3754         spin_lock_irqsave(&queued_arr_lock, iflags);
3755         for (k = 0; k < SCSI_DEBUG_CANQUEUE; ++k) {
3756                 sqcp = &queued_arr[k];
3757                 kfree(sqcp->cmnd_timerp);
3758                 sqcp->cmnd_timerp = NULL;
3759                 kfree(sqcp->tletp);
3760                 sqcp->tletp = NULL;
3761                 kfree(sqcp->sd_hrtp);
3762                 sqcp->sd_hrtp = NULL;
3763         }
3764         spin_unlock_irqrestore(&queued_arr_lock, iflags);
3765 }
3766
3767 static int scsi_debug_abort(struct scsi_cmnd *SCpnt)
3768 {
3769         ++num_aborts;
3770         if (SCpnt) {
3771                 if (SCpnt->device &&
3772                     (SCSI_DEBUG_OPT_ALL_NOISE & scsi_debug_opts))
3773                         sdev_printk(KERN_INFO, SCpnt->device, "%s\n",
3774                                     __func__);
3775                 stop_queued_cmnd(SCpnt);
3776         }
3777         return SUCCESS;
3778 }
3779
3780 static int scsi_debug_device_reset(struct scsi_cmnd * SCpnt)
3781 {
3782         struct sdebug_dev_info * devip;
3783
3784         ++num_dev_resets;
3785         if (SCpnt && SCpnt->device) {
3786                 struct scsi_device *sdp = SCpnt->device;
3787
3788                 if (SCSI_DEBUG_OPT_ALL_NOISE & scsi_debug_opts)
3789                         sdev_printk(KERN_INFO, sdp, "%s\n", __func__);
3790                 devip = devInfoReg(sdp);
3791                 if (devip)
3792                         set_bit(SDEBUG_UA_POR, devip->uas_bm);
3793         }
3794         return SUCCESS;
3795 }
3796
3797 static int scsi_debug_target_reset(struct scsi_cmnd *SCpnt)
3798 {
3799         struct sdebug_host_info *sdbg_host;
3800         struct sdebug_dev_info *devip;
3801         struct scsi_device *sdp;
3802         struct Scsi_Host *hp;
3803         int k = 0;
3804
3805         ++num_target_resets;
3806         if (!SCpnt)
3807                 goto lie;
3808         sdp = SCpnt->device;
3809         if (!sdp)
3810                 goto lie;
3811         if (SCSI_DEBUG_OPT_ALL_NOISE & scsi_debug_opts)
3812                 sdev_printk(KERN_INFO, sdp, "%s\n", __func__);
3813         hp = sdp->host;
3814         if (!hp)
3815                 goto lie;
3816         sdbg_host = *(struct sdebug_host_info **)shost_priv(hp);
3817         if (sdbg_host) {
3818                 list_for_each_entry(devip,
3819                                     &sdbg_host->dev_info_list,
3820                                     dev_list)
3821                         if (devip->target == sdp->id) {
3822                                 set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
3823                                 ++k;
3824                         }
3825         }
3826         if (SCSI_DEBUG_OPT_RESET_NOISE & scsi_debug_opts)
3827                 sdev_printk(KERN_INFO, sdp,
3828                             "%s: %d device(s) found in target\n", __func__, k);
3829 lie:
3830         return SUCCESS;
3831 }
3832
3833 static int scsi_debug_bus_reset(struct scsi_cmnd * SCpnt)
3834 {
3835         struct sdebug_host_info *sdbg_host;
3836         struct sdebug_dev_info *devip;
3837         struct scsi_device * sdp;
3838         struct Scsi_Host * hp;
3839         int k = 0;
3840
3841         ++num_bus_resets;
3842         if (!(SCpnt && SCpnt->device))
3843                 goto lie;
3844         sdp = SCpnt->device;
3845         if (SCSI_DEBUG_OPT_ALL_NOISE & scsi_debug_opts)
3846                 sdev_printk(KERN_INFO, sdp, "%s\n", __func__);
3847         hp = sdp->host;
3848         if (hp) {
3849                 sdbg_host = *(struct sdebug_host_info **)shost_priv(hp);
3850                 if (sdbg_host) {
3851                         list_for_each_entry(devip,
3852                                             &sdbg_host->dev_info_list,
3853                                             dev_list) {
3854                                 set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
3855                                 ++k;
3856                         }
3857                 }
3858         }
3859         if (SCSI_DEBUG_OPT_RESET_NOISE & scsi_debug_opts)
3860                 sdev_printk(KERN_INFO, sdp,
3861                             "%s: %d device(s) found in host\n", __func__, k);
3862 lie:
3863         return SUCCESS;
3864 }
3865
3866 static int scsi_debug_host_reset(struct scsi_cmnd * SCpnt)
3867 {
3868         struct sdebug_host_info * sdbg_host;
3869         struct sdebug_dev_info *devip;
3870         int k = 0;
3871
3872         ++num_host_resets;
3873         if ((SCpnt->device) && (SCSI_DEBUG_OPT_ALL_NOISE & scsi_debug_opts))
3874                 sdev_printk(KERN_INFO, SCpnt->device, "%s\n", __func__);
3875         spin_lock(&sdebug_host_list_lock);
3876         list_for_each_entry(sdbg_host, &sdebug_host_list, host_list) {
3877                 list_for_each_entry(devip, &sdbg_host->dev_info_list,
3878                                     dev_list) {
3879                         set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
3880                         ++k;
3881                 }
3882         }
3883         spin_unlock(&sdebug_host_list_lock);
3884         stop_all_queued();
3885         if (SCSI_DEBUG_OPT_RESET_NOISE & scsi_debug_opts)
3886                 sdev_printk(KERN_INFO, SCpnt->device,
3887                             "%s: %d device(s) found\n", __func__, k);
3888         return SUCCESS;
3889 }
3890
3891 static void __init sdebug_build_parts(unsigned char *ramp,
3892                                       unsigned long store_size)
3893 {
3894         struct partition * pp;
3895         int starts[SDEBUG_MAX_PARTS + 2];
3896         int sectors_per_part, num_sectors, k;
3897         int heads_by_sects, start_sec, end_sec;
3898
3899         /* assume partition table already zeroed */
3900         if ((scsi_debug_num_parts < 1) || (store_size < 1048576))
3901                 return;
3902         if (scsi_debug_num_parts > SDEBUG_MAX_PARTS) {
3903                 scsi_debug_num_parts = SDEBUG_MAX_PARTS;
3904                 pr_warn("%s: reducing partitions to %d\n", __func__,
3905                         SDEBUG_MAX_PARTS);
3906         }
3907         num_sectors = (int)sdebug_store_sectors;
3908         sectors_per_part = (num_sectors - sdebug_sectors_per)
3909                            / scsi_debug_num_parts;
3910         heads_by_sects = sdebug_heads * sdebug_sectors_per;
3911         starts[0] = sdebug_sectors_per;
3912         for (k = 1; k < scsi_debug_num_parts; ++k)
3913                 starts[k] = ((k * sectors_per_part) / heads_by_sects)
3914                             * heads_by_sects;
3915         starts[scsi_debug_num_parts] = num_sectors;
3916         starts[scsi_debug_num_parts + 1] = 0;
3917
3918         ramp[510] = 0x55;       /* magic partition markings */
3919         ramp[511] = 0xAA;
3920         pp = (struct partition *)(ramp + 0x1be);
3921         for (k = 0; starts[k + 1]; ++k, ++pp) {
3922                 start_sec = starts[k];
3923                 end_sec = starts[k + 1] - 1;
3924                 pp->boot_ind = 0;
3925
3926                 pp->cyl = start_sec / heads_by_sects;
3927                 pp->head = (start_sec - (pp->cyl * heads_by_sects))
3928                            / sdebug_sectors_per;
3929                 pp->sector = (start_sec % sdebug_sectors_per) + 1;
3930
3931                 pp->end_cyl = end_sec / heads_by_sects;
3932                 pp->end_head = (end_sec - (pp->end_cyl * heads_by_sects))
3933                                / sdebug_sectors_per;
3934                 pp->end_sector = (end_sec % sdebug_sectors_per) + 1;
3935
3936                 pp->start_sect = cpu_to_le32(start_sec);
3937                 pp->nr_sects = cpu_to_le32(end_sec - start_sec + 1);
3938                 pp->sys_ind = 0x83;     /* plain Linux partition */
3939         }
3940 }
3941
3942 static int
3943 schedule_resp(struct scsi_cmnd *cmnd, struct sdebug_dev_info *devip,
3944               int scsi_result, int delta_jiff)
3945 {
3946         unsigned long iflags;
3947         int k, num_in_q, qdepth, inject;
3948         struct sdebug_queued_cmd *sqcp = NULL;
3949         struct scsi_device *sdp = cmnd->device;
3950
3951         if (NULL == cmnd || NULL == devip) {
3952                 pr_warn("%s: called with NULL cmnd or devip pointer\n",
3953                         __func__);
3954                 /* no particularly good error to report back */
3955                 return SCSI_MLQUEUE_HOST_BUSY;
3956         }
3957         if ((scsi_result) && (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts))
3958                 sdev_printk(KERN_INFO, sdp, "%s: non-zero result=0x%x\n",
3959                             __func__, scsi_result);
3960         if (delta_jiff == 0)
3961                 goto respond_in_thread;
3962
3963         /* schedule the response at a later time if resources permit */
3964         spin_lock_irqsave(&queued_arr_lock, iflags);
3965         num_in_q = atomic_read(&devip->num_in_q);
3966         qdepth = cmnd->device->queue_depth;
3967         inject = 0;
3968         if ((qdepth > 0) && (num_in_q >= qdepth)) {
3969                 if (scsi_result) {
3970                         spin_unlock_irqrestore(&queued_arr_lock, iflags);
3971                         goto respond_in_thread;
3972                 } else
3973                         scsi_result = device_qfull_result;
3974         } else if ((scsi_debug_every_nth != 0) &&
3975                    (SCSI_DEBUG_OPT_RARE_TSF & scsi_debug_opts) &&
3976                    (scsi_result == 0)) {
3977                 if ((num_in_q == (qdepth - 1)) &&
3978                     (atomic_inc_return(&sdebug_a_tsf) >=
3979                      abs(scsi_debug_every_nth))) {
3980                         atomic_set(&sdebug_a_tsf, 0);
3981                         inject = 1;
3982                         scsi_result = device_qfull_result;
3983                 }
3984         }
3985
3986         k = find_first_zero_bit(queued_in_use_bm, scsi_debug_max_queue);
3987         if (k >= scsi_debug_max_queue) {
3988                 spin_unlock_irqrestore(&queued_arr_lock, iflags);
3989                 if (scsi_result)
3990                         goto respond_in_thread;
3991                 else if (SCSI_DEBUG_OPT_ALL_TSF & scsi_debug_opts)
3992                         scsi_result = device_qfull_result;
3993                 if (SCSI_DEBUG_OPT_Q_NOISE & scsi_debug_opts)
3994                         sdev_printk(KERN_INFO, sdp,
3995                                     "%s: max_queue=%d exceeded, %s\n",
3996                                     __func__, scsi_debug_max_queue,
3997                                     (scsi_result ?  "status: TASK SET FULL" :
3998                                                     "report: host busy"));
3999                 if (scsi_result)
4000                         goto respond_in_thread;
4001                 else
4002                         return SCSI_MLQUEUE_HOST_BUSY;
4003         }
4004         __set_bit(k, queued_in_use_bm);
4005         atomic_inc(&devip->num_in_q);
4006         sqcp = &queued_arr[k];
4007         sqcp->a_cmnd = cmnd;
4008         cmnd->result = scsi_result;
4009         spin_unlock_irqrestore(&queued_arr_lock, iflags);
4010         if (delta_jiff > 0) {
4011                 if (NULL == sqcp->cmnd_timerp) {
4012                         sqcp->cmnd_timerp = kmalloc(sizeof(struct timer_list),
4013                                                     GFP_ATOMIC);
4014                         if (NULL == sqcp->cmnd_timerp)
4015                                 return SCSI_MLQUEUE_HOST_BUSY;
4016                         init_timer(sqcp->cmnd_timerp);
4017                 }
4018                 sqcp->cmnd_timerp->function = sdebug_q_cmd_complete;
4019                 sqcp->cmnd_timerp->data = k;
4020                 sqcp->cmnd_timerp->expires = get_jiffies_64() + delta_jiff;
4021                 add_timer(sqcp->cmnd_timerp);
4022         } else if (scsi_debug_ndelay > 0) {
4023                 ktime_t kt = ktime_set(0, scsi_debug_ndelay);
4024                 struct sdebug_hrtimer *sd_hp = sqcp->sd_hrtp;
4025
4026                 if (NULL == sd_hp) {
4027                         sd_hp = kmalloc(sizeof(*sd_hp), GFP_ATOMIC);
4028                         if (NULL == sd_hp)
4029                                 return SCSI_MLQUEUE_HOST_BUSY;
4030                         sqcp->sd_hrtp = sd_hp;
4031                         hrtimer_init(&sd_hp->hrt, CLOCK_MONOTONIC,
4032                                      HRTIMER_MODE_REL);
4033                         sd_hp->hrt.function = sdebug_q_cmd_hrt_complete;
4034                         sd_hp->qa_indx = k;
4035                 }
4036                 hrtimer_start(&sd_hp->hrt, kt, HRTIMER_MODE_REL);
4037         } else {        /* delay < 0 */
4038                 if (NULL == sqcp->tletp) {
4039                         sqcp->tletp = kmalloc(sizeof(*sqcp->tletp),
4040                                               GFP_ATOMIC);
4041                         if (NULL == sqcp->tletp)
4042                                 return SCSI_MLQUEUE_HOST_BUSY;
4043                         tasklet_init(sqcp->tletp,
4044                                      sdebug_q_cmd_complete, k);
4045                 }
4046                 if (-1 == delta_jiff)
4047                         tasklet_hi_schedule(sqcp->tletp);
4048                 else
4049                         tasklet_schedule(sqcp->tletp);
4050         }
4051         if ((SCSI_DEBUG_OPT_Q_NOISE & scsi_debug_opts) &&
4052             (scsi_result == device_qfull_result))
4053                 sdev_printk(KERN_INFO, sdp,
4054                             "%s: num_in_q=%d +1, %s%s\n", __func__,
4055                             num_in_q, (inject ? "<inject> " : ""),
4056                             "status: TASK SET FULL");
4057         return 0;
4058
4059 respond_in_thread:      /* call back to mid-layer using invocation thread */
4060         cmnd->result = scsi_result;
4061         cmnd->scsi_done(cmnd);
4062         return 0;
4063 }
4064
4065 /* Note: The following macros create attribute files in the
4066    /sys/module/scsi_debug/parameters directory. Unfortunately this
4067    driver is unaware of a change and cannot trigger auxiliary actions
4068    as it can when the corresponding attribute in the
4069    /sys/bus/pseudo/drivers/scsi_debug directory is changed.
4070  */
4071 module_param_named(add_host, scsi_debug_add_host, int, S_IRUGO | S_IWUSR);
4072 module_param_named(ato, scsi_debug_ato, int, S_IRUGO);
4073 module_param_named(clustering, scsi_debug_clustering, bool, S_IRUGO | S_IWUSR);
4074 module_param_named(delay, scsi_debug_delay, int, S_IRUGO | S_IWUSR);
4075 module_param_named(dev_size_mb, scsi_debug_dev_size_mb, int, S_IRUGO);
4076 module_param_named(dif, scsi_debug_dif, int, S_IRUGO);
4077 module_param_named(dix, scsi_debug_dix, int, S_IRUGO);
4078 module_param_named(dsense, scsi_debug_dsense, int, S_IRUGO | S_IWUSR);
4079 module_param_named(every_nth, scsi_debug_every_nth, int, S_IRUGO | S_IWUSR);
4080 module_param_named(fake_rw, scsi_debug_fake_rw, int, S_IRUGO | S_IWUSR);
4081 module_param_named(guard, scsi_debug_guard, uint, S_IRUGO);
4082 module_param_named(host_lock, scsi_debug_host_lock, bool, S_IRUGO | S_IWUSR);
4083 module_param_named(lbpu, scsi_debug_lbpu, int, S_IRUGO);
4084 module_param_named(lbpws, scsi_debug_lbpws, int, S_IRUGO);
4085 module_param_named(lbpws10, scsi_debug_lbpws10, int, S_IRUGO);
4086 module_param_named(lbprz, scsi_debug_lbprz, int, S_IRUGO);
4087 module_param_named(lowest_aligned, scsi_debug_lowest_aligned, int, S_IRUGO);
4088 module_param_named(max_luns, scsi_debug_max_luns, int, S_IRUGO | S_IWUSR);
4089 module_param_named(max_queue, scsi_debug_max_queue, int, S_IRUGO | S_IWUSR);
4090 module_param_named(ndelay, scsi_debug_ndelay, int, S_IRUGO | S_IWUSR);
4091 module_param_named(no_lun_0, scsi_debug_no_lun_0, int, S_IRUGO | S_IWUSR);
4092 module_param_named(no_uld, scsi_debug_no_uld, int, S_IRUGO);
4093 module_param_named(num_parts, scsi_debug_num_parts, int, S_IRUGO);
4094 module_param_named(num_tgts, scsi_debug_num_tgts, int, S_IRUGO | S_IWUSR);
4095 module_param_named(opt_blks, scsi_debug_opt_blks, int, S_IRUGO);
4096 module_param_named(opts, scsi_debug_opts, int, S_IRUGO | S_IWUSR);
4097 module_param_named(physblk_exp, scsi_debug_physblk_exp, int, S_IRUGO);
4098 module_param_named(ptype, scsi_debug_ptype, int, S_IRUGO | S_IWUSR);
4099 module_param_named(removable, scsi_debug_removable, bool, S_IRUGO | S_IWUSR);
4100 module_param_named(scsi_level, scsi_debug_scsi_level, int, S_IRUGO);
4101 module_param_named(sector_size, scsi_debug_sector_size, int, S_IRUGO);
4102 module_param_named(strict, scsi_debug_strict, bool, S_IRUGO | S_IWUSR);
4103 module_param_named(unmap_alignment, scsi_debug_unmap_alignment, int, S_IRUGO);
4104 module_param_named(unmap_granularity, scsi_debug_unmap_granularity, int, S_IRUGO);
4105 module_param_named(unmap_max_blocks, scsi_debug_unmap_max_blocks, int, S_IRUGO);
4106 module_param_named(unmap_max_desc, scsi_debug_unmap_max_desc, int, S_IRUGO);
4107 module_param_named(virtual_gb, scsi_debug_virtual_gb, int, S_IRUGO | S_IWUSR);
4108 module_param_named(vpd_use_hostno, scsi_debug_vpd_use_hostno, int,
4109                    S_IRUGO | S_IWUSR);
4110 module_param_named(write_same_length, scsi_debug_write_same_length, int,
4111                    S_IRUGO | S_IWUSR);
4112
4113 MODULE_AUTHOR("Eric Youngdale + Douglas Gilbert");
4114 MODULE_DESCRIPTION("SCSI debug adapter driver");
4115 MODULE_LICENSE("GPL");
4116 MODULE_VERSION(SCSI_DEBUG_VERSION);
4117
4118 MODULE_PARM_DESC(add_host, "0..127 hosts allowed(def=1)");
4119 MODULE_PARM_DESC(ato, "application tag ownership: 0=disk 1=host (def=1)");
4120 MODULE_PARM_DESC(clustering, "when set enables larger transfers (def=0)");
4121 MODULE_PARM_DESC(delay, "response delay (def=1 jiffy); 0:imm, -1,-2:tiny");
4122 MODULE_PARM_DESC(dev_size_mb, "size in MiB of ram shared by devs(def=8)");
4123 MODULE_PARM_DESC(dif, "data integrity field type: 0-3 (def=0)");
4124 MODULE_PARM_DESC(dix, "data integrity extensions mask (def=0)");
4125 MODULE_PARM_DESC(dsense, "use descriptor sense format(def=0 -> fixed)");
4126 MODULE_PARM_DESC(every_nth, "timeout every nth command(def=0)");
4127 MODULE_PARM_DESC(fake_rw, "fake reads/writes instead of copying (def=0)");
4128 MODULE_PARM_DESC(guard, "protection checksum: 0=crc, 1=ip (def=0)");
4129 MODULE_PARM_DESC(host_lock, "use host_lock around all commands (def=0)");
4130 MODULE_PARM_DESC(lbpu, "enable LBP, support UNMAP command (def=0)");
4131 MODULE_PARM_DESC(lbpws, "enable LBP, support WRITE SAME(16) with UNMAP bit (def=0)");
4132 MODULE_PARM_DESC(lbpws10, "enable LBP, support WRITE SAME(10) with UNMAP bit (def=0)");
4133 MODULE_PARM_DESC(lbprz, "unmapped blocks return 0 on read (def=1)");
4134 MODULE_PARM_DESC(lowest_aligned, "lowest aligned lba (def=0)");
4135 MODULE_PARM_DESC(max_luns, "number of LUNs per target to simulate(def=1)");
4136 MODULE_PARM_DESC(max_queue, "max number of queued commands (1 to max(def))");
4137 MODULE_PARM_DESC(ndelay, "response delay in nanoseconds (def=0 -> ignore)");
4138 MODULE_PARM_DESC(no_lun_0, "no LU number 0 (def=0 -> have lun 0)");
4139 MODULE_PARM_DESC(no_uld, "stop ULD (e.g. sd driver) attaching (def=0))");
4140 MODULE_PARM_DESC(num_parts, "number of partitions(def=0)");
4141 MODULE_PARM_DESC(num_tgts, "number of targets per host to simulate(def=1)");
4142 MODULE_PARM_DESC(opt_blks, "optimal transfer length in block (def=64)");
4143 MODULE_PARM_DESC(opts, "1->noise, 2->medium_err, 4->timeout, 8->recovered_err... (def=0)");
4144 MODULE_PARM_DESC(physblk_exp, "physical block exponent (def=0)");
4145 MODULE_PARM_DESC(ptype, "SCSI peripheral type(def=0[disk])");
4146 MODULE_PARM_DESC(removable, "claim to have removable media (def=0)");
4147 MODULE_PARM_DESC(scsi_level, "SCSI level to simulate(def=6[SPC-4])");
4148 MODULE_PARM_DESC(sector_size, "logical block size in bytes (def=512)");
4149 MODULE_PARM_DESC(strict, "stricter checks: reserved field in cdb (def=0)");
4150 MODULE_PARM_DESC(unmap_alignment, "lowest aligned thin provisioning lba (def=0)");
4151 MODULE_PARM_DESC(unmap_granularity, "thin provisioning granularity in blocks (def=1)");
4152 MODULE_PARM_DESC(unmap_max_blocks, "max # of blocks can be unmapped in one cmd (def=0xffffffff)");
4153 MODULE_PARM_DESC(unmap_max_desc, "max # of ranges that can be unmapped in one cmd (def=256)");
4154 MODULE_PARM_DESC(virtual_gb, "virtual gigabyte (GiB) size (def=0 -> use dev_size_mb)");
4155 MODULE_PARM_DESC(vpd_use_hostno, "0 -> dev ids ignore hostno (def=1 -> unique dev ids)");
4156 MODULE_PARM_DESC(write_same_length, "Maximum blocks per WRITE SAME cmd (def=0xffff)");
4157
4158 static char sdebug_info[256];
4159
4160 static const char * scsi_debug_info(struct Scsi_Host * shp)
4161 {
4162         sprintf(sdebug_info, "scsi_debug, version %s [%s], "
4163                 "dev_size_mb=%d, opts=0x%x", SCSI_DEBUG_VERSION,
4164                 scsi_debug_version_date, scsi_debug_dev_size_mb,
4165                 scsi_debug_opts);
4166         return sdebug_info;
4167 }
4168
4169 /* 'echo <val> > /proc/scsi/scsi_debug/<host_id>' writes to opts */
4170 static int scsi_debug_write_info(struct Scsi_Host *host, char *buffer, int length)
4171 {
4172         char arr[16];
4173         int opts;
4174         int minLen = length > 15 ? 15 : length;
4175
4176         if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
4177                 return -EACCES;
4178         memcpy(arr, buffer, minLen);
4179         arr[minLen] = '\0';
4180         if (1 != sscanf(arr, "%d", &opts))
4181                 return -EINVAL;
4182         scsi_debug_opts = opts;
4183         if (scsi_debug_every_nth != 0)
4184                 atomic_set(&sdebug_cmnd_count, 0);
4185         return length;
4186 }
4187
4188 /* Output seen with 'cat /proc/scsi/scsi_debug/<host_id>'. It will be the
4189  * same for each scsi_debug host (if more than one). Some of the counters
4190  * output are not atomics so might be inaccurate in a busy system. */
4191 static int scsi_debug_show_info(struct seq_file *m, struct Scsi_Host *host)
4192 {
4193         int f, l;
4194         char b[32];
4195
4196         if (scsi_debug_every_nth > 0)
4197                 snprintf(b, sizeof(b), " (curr:%d)",
4198                          ((SCSI_DEBUG_OPT_RARE_TSF & scsi_debug_opts) ?
4199                                 atomic_read(&sdebug_a_tsf) :
4200                                 atomic_read(&sdebug_cmnd_count)));
4201         else
4202                 b[0] = '\0';
4203
4204         seq_printf(m, "scsi_debug adapter driver, version %s [%s]\n"
4205                 "num_tgts=%d, shared (ram) size=%d MB, opts=0x%x, "
4206                 "every_nth=%d%s\n"
4207                 "delay=%d, ndelay=%d, max_luns=%d, q_completions=%d\n"
4208                 "sector_size=%d bytes, cylinders=%d, heads=%d, sectors=%d\n"
4209                 "command aborts=%d; RESETs: device=%d, target=%d, bus=%d, "
4210                 "host=%d\ndix_reads=%d dix_writes=%d dif_errors=%d "
4211                 "usec_in_jiffy=%lu\n",
4212                 SCSI_DEBUG_VERSION, scsi_debug_version_date,
4213                 scsi_debug_num_tgts, scsi_debug_dev_size_mb, scsi_debug_opts,
4214                 scsi_debug_every_nth, b, scsi_debug_delay, scsi_debug_ndelay,
4215                 scsi_debug_max_luns, atomic_read(&sdebug_completions),
4216                 scsi_debug_sector_size, sdebug_cylinders_per, sdebug_heads,
4217                 sdebug_sectors_per, num_aborts, num_dev_resets,
4218                 num_target_resets, num_bus_resets, num_host_resets,
4219                 dix_reads, dix_writes, dif_errors, TICK_NSEC / 1000);
4220
4221         f = find_first_bit(queued_in_use_bm, scsi_debug_max_queue);
4222         if (f != scsi_debug_max_queue) {
4223                 l = find_last_bit(queued_in_use_bm, scsi_debug_max_queue);
4224                 seq_printf(m, "   %s BUSY: first,last bits set: %d,%d\n",
4225                            "queued_in_use_bm", f, l);
4226         }
4227         return 0;
4228 }
4229
4230 static ssize_t delay_show(struct device_driver *ddp, char *buf)
4231 {
4232         return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_delay);
4233 }
4234 /* Returns -EBUSY if delay is being changed and commands are queued */
4235 static ssize_t delay_store(struct device_driver *ddp, const char *buf,
4236                            size_t count)
4237 {
4238         int delay, res;
4239
4240         if ((count > 0) && (1 == sscanf(buf, "%d", &delay))) {
4241                 res = count;
4242                 if (scsi_debug_delay != delay) {
4243                         unsigned long iflags;
4244                         int k;
4245
4246                         spin_lock_irqsave(&queued_arr_lock, iflags);
4247                         k = find_first_bit(queued_in_use_bm,
4248                                            scsi_debug_max_queue);
4249                         if (k != scsi_debug_max_queue)
4250                                 res = -EBUSY;   /* have queued commands */
4251                         else {
4252                                 scsi_debug_delay = delay;
4253                                 scsi_debug_ndelay = 0;
4254                         }
4255                         spin_unlock_irqrestore(&queued_arr_lock, iflags);
4256                 }
4257                 return res;
4258         }
4259         return -EINVAL;
4260 }
4261 static DRIVER_ATTR_RW(delay);
4262
4263 static ssize_t ndelay_show(struct device_driver *ddp, char *buf)
4264 {
4265         return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_ndelay);
4266 }
4267 /* Returns -EBUSY if ndelay is being changed and commands are queued */
4268 /* If > 0 and accepted then scsi_debug_delay is set to DELAY_OVERRIDDEN */
4269 static ssize_t ndelay_store(struct device_driver *ddp, const char *buf,
4270                            size_t count)
4271 {
4272         unsigned long iflags;
4273         int ndelay, res, k;
4274
4275         if ((count > 0) && (1 == sscanf(buf, "%d", &ndelay)) &&
4276             (ndelay >= 0) && (ndelay < 1000000000)) {
4277                 res = count;
4278                 if (scsi_debug_ndelay != ndelay) {
4279                         spin_lock_irqsave(&queued_arr_lock, iflags);
4280                         k = find_first_bit(queued_in_use_bm,
4281                                            scsi_debug_max_queue);
4282                         if (k != scsi_debug_max_queue)
4283                                 res = -EBUSY;   /* have queued commands */
4284                         else {
4285                                 scsi_debug_ndelay = ndelay;
4286                                 scsi_debug_delay = ndelay ? DELAY_OVERRIDDEN
4287                                                           : DEF_DELAY;
4288                         }
4289                         spin_unlock_irqrestore(&queued_arr_lock, iflags);
4290                 }
4291                 return res;
4292         }
4293         return -EINVAL;
4294 }
4295 static DRIVER_ATTR_RW(ndelay);
4296
4297 static ssize_t opts_show(struct device_driver *ddp, char *buf)
4298 {
4299         return scnprintf(buf, PAGE_SIZE, "0x%x\n", scsi_debug_opts);
4300 }
4301
4302 static ssize_t opts_store(struct device_driver *ddp, const char *buf,
4303                           size_t count)
4304 {
4305         int opts;
4306         char work[20];
4307
4308         if (1 == sscanf(buf, "%10s", work)) {
4309                 if (0 == strncasecmp(work,"0x", 2)) {
4310                         if (1 == sscanf(&work[2], "%x", &opts))
4311                                 goto opts_done;
4312                 } else {
4313                         if (1 == sscanf(work, "%d", &opts))
4314                                 goto opts_done;
4315                 }
4316         }
4317         return -EINVAL;
4318 opts_done:
4319         scsi_debug_opts = opts;
4320         if (SCSI_DEBUG_OPT_RECOVERED_ERR & opts)
4321                 sdebug_any_injecting_opt = true;
4322         else if (SCSI_DEBUG_OPT_TRANSPORT_ERR & opts)
4323                 sdebug_any_injecting_opt = true;
4324         else if (SCSI_DEBUG_OPT_DIF_ERR & opts)
4325                 sdebug_any_injecting_opt = true;
4326         else if (SCSI_DEBUG_OPT_DIX_ERR & opts)
4327                 sdebug_any_injecting_opt = true;
4328         else if (SCSI_DEBUG_OPT_SHORT_TRANSFER & opts)
4329                 sdebug_any_injecting_opt = true;
4330         atomic_set(&sdebug_cmnd_count, 0);
4331         atomic_set(&sdebug_a_tsf, 0);
4332         return count;
4333 }
4334 static DRIVER_ATTR_RW(opts);
4335
4336 static ssize_t ptype_show(struct device_driver *ddp, char *buf)
4337 {
4338         return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_ptype);
4339 }
4340 static ssize_t ptype_store(struct device_driver *ddp, const char *buf,
4341                            size_t count)
4342 {
4343         int n;
4344
4345         if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
4346                 scsi_debug_ptype = n;
4347                 return count;
4348         }
4349         return -EINVAL;
4350 }
4351 static DRIVER_ATTR_RW(ptype);
4352
4353 static ssize_t dsense_show(struct device_driver *ddp, char *buf)
4354 {
4355         return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_dsense);
4356 }
4357 static ssize_t dsense_store(struct device_driver *ddp, const char *buf,
4358                             size_t count)
4359 {
4360         int n;
4361
4362         if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
4363                 scsi_debug_dsense = n;
4364                 return count;
4365         }
4366         return -EINVAL;
4367 }
4368 static DRIVER_ATTR_RW(dsense);
4369
4370 static ssize_t fake_rw_show(struct device_driver *ddp, char *buf)
4371 {
4372         return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_fake_rw);
4373 }
4374 static ssize_t fake_rw_store(struct device_driver *ddp, const char *buf,
4375                              size_t count)
4376 {
4377         int n;
4378
4379         if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
4380                 n = (n > 0);
4381                 scsi_debug_fake_rw = (scsi_debug_fake_rw > 0);
4382                 if (scsi_debug_fake_rw != n) {
4383                         if ((0 == n) && (NULL == fake_storep)) {
4384                                 unsigned long sz =
4385                                         (unsigned long)scsi_debug_dev_size_mb *
4386                                         1048576;
4387
4388                                 fake_storep = vmalloc(sz);
4389                                 if (NULL == fake_storep) {
4390                                         pr_err("%s: out of memory, 9\n",
4391                                                __func__);
4392                                         return -ENOMEM;
4393                                 }
4394                                 memset(fake_storep, 0, sz);
4395                         }
4396                         scsi_debug_fake_rw = n;
4397                 }
4398                 return count;
4399         }
4400         return -EINVAL;
4401 }
4402 static DRIVER_ATTR_RW(fake_rw);
4403
4404 static ssize_t no_lun_0_show(struct device_driver *ddp, char *buf)
4405 {
4406         return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_no_lun_0);
4407 }
4408 static ssize_t no_lun_0_store(struct device_driver *ddp, const char *buf,
4409                               size_t count)
4410 {
4411         int n;
4412
4413         if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
4414                 scsi_debug_no_lun_0 = n;
4415                 return count;
4416         }
4417         return -EINVAL;
4418 }
4419 static DRIVER_ATTR_RW(no_lun_0);
4420
4421 static ssize_t num_tgts_show(struct device_driver *ddp, char *buf)
4422 {
4423         return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_num_tgts);
4424 }
4425 static ssize_t num_tgts_store(struct device_driver *ddp, const char *buf,
4426                               size_t count)
4427 {
4428         int n;
4429
4430         if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
4431                 scsi_debug_num_tgts = n;
4432                 sdebug_max_tgts_luns();
4433                 return count;
4434         }
4435         return -EINVAL;
4436 }
4437 static DRIVER_ATTR_RW(num_tgts);
4438
4439 static ssize_t dev_size_mb_show(struct device_driver *ddp, char *buf)
4440 {
4441         return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_dev_size_mb);
4442 }
4443 static DRIVER_ATTR_RO(dev_size_mb);
4444
4445 static ssize_t num_parts_show(struct device_driver *ddp, char *buf)
4446 {
4447         return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_num_parts);
4448 }
4449 static DRIVER_ATTR_RO(num_parts);
4450
4451 static ssize_t every_nth_show(struct device_driver *ddp, char *buf)
4452 {
4453         return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_every_nth);
4454 }
4455 static ssize_t every_nth_store(struct device_driver *ddp, const char *buf,
4456                                size_t count)
4457 {
4458         int nth;
4459
4460         if ((count > 0) && (1 == sscanf(buf, "%d", &nth))) {
4461                 scsi_debug_every_nth = nth;
4462                 atomic_set(&sdebug_cmnd_count, 0);
4463                 return count;
4464         }
4465         return -EINVAL;
4466 }
4467 static DRIVER_ATTR_RW(every_nth);
4468
4469 static ssize_t max_luns_show(struct device_driver *ddp, char *buf)
4470 {
4471         return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_max_luns);
4472 }
4473 static ssize_t max_luns_store(struct device_driver *ddp, const char *buf,
4474                               size_t count)
4475 {
4476         int n;
4477         bool changed;
4478
4479         if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
4480                 changed = (scsi_debug_max_luns != n);
4481                 scsi_debug_max_luns = n;
4482                 sdebug_max_tgts_luns();
4483                 if (changed && (scsi_debug_scsi_level >= 5)) {  /* >= SPC-3 */
4484                         struct sdebug_host_info *sdhp;
4485                         struct sdebug_dev_info *dp;
4486
4487                         spin_lock(&sdebug_host_list_lock);
4488                         list_for_each_entry(sdhp, &sdebug_host_list,
4489                                             host_list) {
4490                                 list_for_each_entry(dp, &sdhp->dev_info_list,
4491                                                     dev_list) {
4492                                         set_bit(SDEBUG_UA_LUNS_CHANGED,
4493                                                 dp->uas_bm);
4494                                 }
4495                         }
4496                         spin_unlock(&sdebug_host_list_lock);
4497                 }
4498                 return count;
4499         }
4500         return -EINVAL;
4501 }
4502 static DRIVER_ATTR_RW(max_luns);
4503
4504 static ssize_t max_queue_show(struct device_driver *ddp, char *buf)
4505 {
4506         return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_max_queue);
4507 }
4508 /* N.B. max_queue can be changed while there are queued commands. In flight
4509  * commands beyond the new max_queue will be completed. */
4510 static ssize_t max_queue_store(struct device_driver *ddp, const char *buf,
4511                                size_t count)
4512 {
4513         unsigned long iflags;
4514         int n, k;
4515
4516         if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n > 0) &&
4517             (n <= SCSI_DEBUG_CANQUEUE)) {
4518                 spin_lock_irqsave(&queued_arr_lock, iflags);
4519                 k = find_last_bit(queued_in_use_bm, SCSI_DEBUG_CANQUEUE);
4520                 scsi_debug_max_queue = n;
4521                 if (SCSI_DEBUG_CANQUEUE == k)
4522                         atomic_set(&retired_max_queue, 0);
4523                 else if (k >= n)
4524                         atomic_set(&retired_max_queue, k + 1);
4525                 else
4526                         atomic_set(&retired_max_queue, 0);
4527                 spin_unlock_irqrestore(&queued_arr_lock, iflags);
4528                 return count;
4529         }
4530         return -EINVAL;
4531 }
4532 static DRIVER_ATTR_RW(max_queue);
4533
4534 static ssize_t no_uld_show(struct device_driver *ddp, char *buf)
4535 {
4536         return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_no_uld);
4537 }
4538 static DRIVER_ATTR_RO(no_uld);
4539
4540 static ssize_t scsi_level_show(struct device_driver *ddp, char *buf)
4541 {
4542         return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_scsi_level);
4543 }
4544 static DRIVER_ATTR_RO(scsi_level);
4545
4546 static ssize_t virtual_gb_show(struct device_driver *ddp, char *buf)
4547 {
4548         return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_virtual_gb);
4549 }
4550 static ssize_t virtual_gb_store(struct device_driver *ddp, const char *buf,
4551                                 size_t count)
4552 {
4553         int n;
4554         bool changed;
4555
4556         if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
4557                 changed = (scsi_debug_virtual_gb != n);
4558                 scsi_debug_virtual_gb = n;
4559                 sdebug_capacity = get_sdebug_capacity();
4560                 if (changed) {
4561                         struct sdebug_host_info *sdhp;
4562                         struct sdebug_dev_info *dp;
4563
4564                         spin_lock(&sdebug_host_list_lock);
4565                         list_for_each_entry(sdhp, &sdebug_host_list,
4566                                             host_list) {
4567                                 list_for_each_entry(dp, &sdhp->dev_info_list,
4568                                                     dev_list) {
4569                                         set_bit(SDEBUG_UA_CAPACITY_CHANGED,
4570                                                 dp->uas_bm);
4571                                 }
4572                         }
4573                         spin_unlock(&sdebug_host_list_lock);
4574                 }
4575                 return count;
4576         }
4577         return -EINVAL;
4578 }
4579 static DRIVER_ATTR_RW(virtual_gb);
4580
4581 static ssize_t add_host_show(struct device_driver *ddp, char *buf)
4582 {
4583         return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_add_host);
4584 }
4585
4586 static ssize_t add_host_store(struct device_driver *ddp, const char *buf,
4587                               size_t count)
4588 {
4589         int delta_hosts;
4590
4591         if (sscanf(buf, "%d", &delta_hosts) != 1)
4592                 return -EINVAL;
4593         if (delta_hosts > 0) {
4594                 do {
4595                         sdebug_add_adapter();
4596                 } while (--delta_hosts);
4597         } else if (delta_hosts < 0) {
4598                 do {
4599                         sdebug_remove_adapter();
4600                 } while (++delta_hosts);
4601         }
4602         return count;
4603 }
4604 static DRIVER_ATTR_RW(add_host);
4605
4606 static ssize_t vpd_use_hostno_show(struct device_driver *ddp, char *buf)
4607 {
4608         return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_vpd_use_hostno);
4609 }
4610 static ssize_t vpd_use_hostno_store(struct device_driver *ddp, const char *buf,
4611                                     size_t count)
4612 {
4613         int n;
4614
4615         if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
4616                 scsi_debug_vpd_use_hostno = n;
4617                 return count;
4618         }
4619         return -EINVAL;
4620 }
4621 static DRIVER_ATTR_RW(vpd_use_hostno);
4622
4623 static ssize_t sector_size_show(struct device_driver *ddp, char *buf)
4624 {
4625         return scnprintf(buf, PAGE_SIZE, "%u\n", scsi_debug_sector_size);
4626 }
4627 static DRIVER_ATTR_RO(sector_size);
4628
4629 static ssize_t dix_show(struct device_driver *ddp, char *buf)
4630 {
4631         return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_dix);
4632 }
4633 static DRIVER_ATTR_RO(dix);
4634
4635 static ssize_t dif_show(struct device_driver *ddp, char *buf)
4636 {
4637         return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_dif);
4638 }
4639 static DRIVER_ATTR_RO(dif);
4640
4641 static ssize_t guard_show(struct device_driver *ddp, char *buf)
4642 {
4643         return scnprintf(buf, PAGE_SIZE, "%u\n", scsi_debug_guard);
4644 }
4645 static DRIVER_ATTR_RO(guard);
4646
4647 static ssize_t ato_show(struct device_driver *ddp, char *buf)
4648 {
4649         return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_ato);
4650 }
4651 static DRIVER_ATTR_RO(ato);
4652
4653 static ssize_t map_show(struct device_driver *ddp, char *buf)
4654 {
4655         ssize_t count;
4656
4657         if (!scsi_debug_lbp())
4658                 return scnprintf(buf, PAGE_SIZE, "0-%u\n",
4659                                  sdebug_store_sectors);
4660
4661         count = scnprintf(buf, PAGE_SIZE - 1, "%*pbl",
4662                           (int)map_size, map_storep);
4663         buf[count++] = '\n';
4664         buf[count] = '\0';
4665
4666         return count;
4667 }
4668 static DRIVER_ATTR_RO(map);
4669
4670 static ssize_t removable_show(struct device_driver *ddp, char *buf)
4671 {
4672         return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_removable ? 1 : 0);
4673 }
4674 static ssize_t removable_store(struct device_driver *ddp, const char *buf,
4675                                size_t count)
4676 {
4677         int n;
4678
4679         if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
4680                 scsi_debug_removable = (n > 0);
4681                 return count;
4682         }
4683         return -EINVAL;
4684 }
4685 static DRIVER_ATTR_RW(removable);
4686
4687 static ssize_t host_lock_show(struct device_driver *ddp, char *buf)
4688 {
4689         return scnprintf(buf, PAGE_SIZE, "%d\n", !!scsi_debug_host_lock);
4690 }
4691 /* Returns -EBUSY if host_lock is being changed and commands are queued */
4692 static ssize_t host_lock_store(struct device_driver *ddp, const char *buf,
4693                                size_t count)
4694 {
4695         int n, res;
4696
4697         if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
4698                 bool new_host_lock = (n > 0);
4699
4700                 res = count;
4701                 if (new_host_lock != scsi_debug_host_lock) {
4702                         unsigned long iflags;
4703                         int k;
4704
4705                         spin_lock_irqsave(&queued_arr_lock, iflags);
4706                         k = find_first_bit(queued_in_use_bm,
4707                                            scsi_debug_max_queue);
4708                         if (k != scsi_debug_max_queue)
4709                                 res = -EBUSY;   /* have queued commands */
4710                         else
4711                                 scsi_debug_host_lock = new_host_lock;
4712                         spin_unlock_irqrestore(&queued_arr_lock, iflags);
4713                 }
4714                 return res;
4715         }
4716         return -EINVAL;
4717 }
4718 static DRIVER_ATTR_RW(host_lock);
4719
4720 static ssize_t strict_show(struct device_driver *ddp, char *buf)
4721 {
4722         return scnprintf(buf, PAGE_SIZE, "%d\n", !!scsi_debug_strict);
4723 }
4724 static ssize_t strict_store(struct device_driver *ddp, const char *buf,
4725                             size_t count)
4726 {
4727         int n;
4728
4729         if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
4730                 scsi_debug_strict = (n > 0);
4731                 return count;
4732         }
4733         return -EINVAL;
4734 }
4735 static DRIVER_ATTR_RW(strict);
4736
4737
4738 /* Note: The following array creates attribute files in the
4739    /sys/bus/pseudo/drivers/scsi_debug directory. The advantage of these
4740    files (over those found in the /sys/module/scsi_debug/parameters
4741    directory) is that auxiliary actions can be triggered when an attribute
4742    is changed. For example see: sdebug_add_host_store() above.
4743  */
4744
4745 static struct attribute *sdebug_drv_attrs[] = {
4746         &driver_attr_delay.attr,
4747         &driver_attr_opts.attr,
4748         &driver_attr_ptype.attr,
4749         &driver_attr_dsense.attr,
4750         &driver_attr_fake_rw.attr,
4751         &driver_attr_no_lun_0.attr,
4752         &driver_attr_num_tgts.attr,
4753         &driver_attr_dev_size_mb.attr,
4754         &driver_attr_num_parts.attr,
4755         &driver_attr_every_nth.attr,
4756         &driver_attr_max_luns.attr,
4757         &driver_attr_max_queue.attr,
4758         &driver_attr_no_uld.attr,
4759         &driver_attr_scsi_level.attr,
4760         &driver_attr_virtual_gb.attr,
4761         &driver_attr_add_host.attr,
4762         &driver_attr_vpd_use_hostno.attr,
4763         &driver_attr_sector_size.attr,
4764         &driver_attr_dix.attr,
4765         &driver_attr_dif.attr,
4766         &driver_attr_guard.attr,
4767         &driver_attr_ato.attr,
4768         &driver_attr_map.attr,
4769         &driver_attr_removable.attr,
4770         &driver_attr_host_lock.attr,
4771         &driver_attr_ndelay.attr,
4772         &driver_attr_strict.attr,
4773         NULL,
4774 };
4775 ATTRIBUTE_GROUPS(sdebug_drv);
4776
4777 static struct device *pseudo_primary;
4778
4779 static int __init scsi_debug_init(void)
4780 {
4781         unsigned long sz;
4782         int host_to_add;
4783         int k;
4784         int ret;
4785
4786         atomic_set(&sdebug_cmnd_count, 0);
4787         atomic_set(&sdebug_completions, 0);
4788         atomic_set(&retired_max_queue, 0);
4789
4790         if (scsi_debug_ndelay >= 1000000000) {
4791                 pr_warn("%s: ndelay must be less than 1 second, ignored\n",
4792                         __func__);
4793                 scsi_debug_ndelay = 0;
4794         } else if (scsi_debug_ndelay > 0)
4795                 scsi_debug_delay = DELAY_OVERRIDDEN;
4796
4797         switch (scsi_debug_sector_size) {
4798         case  512:
4799         case 1024:
4800         case 2048:
4801         case 4096:
4802                 break;
4803         default:
4804                 pr_err("%s: invalid sector_size %d\n", __func__,
4805                        scsi_debug_sector_size);
4806                 return -EINVAL;
4807         }
4808
4809         switch (scsi_debug_dif) {
4810
4811         case SD_DIF_TYPE0_PROTECTION:
4812         case SD_DIF_TYPE1_PROTECTION:
4813         case SD_DIF_TYPE2_PROTECTION:
4814         case SD_DIF_TYPE3_PROTECTION:
4815                 break;
4816
4817         default:
4818                 pr_err("%s: dif must be 0, 1, 2 or 3\n", __func__);
4819                 return -EINVAL;
4820         }
4821
4822         if (scsi_debug_guard > 1) {
4823                 pr_err("%s: guard must be 0 or 1\n", __func__);
4824                 return -EINVAL;
4825         }
4826
4827         if (scsi_debug_ato > 1) {
4828                 pr_err("%s: ato must be 0 or 1\n", __func__);
4829                 return -EINVAL;
4830         }
4831
4832         if (scsi_debug_physblk_exp > 15) {
4833                 pr_err("%s: invalid physblk_exp %u\n", __func__,
4834                        scsi_debug_physblk_exp);
4835                 return -EINVAL;
4836         }
4837
4838         if (scsi_debug_lowest_aligned > 0x3fff) {
4839                 pr_err("%s: lowest_aligned too big: %u\n", __func__,
4840                        scsi_debug_lowest_aligned);
4841                 return -EINVAL;
4842         }
4843
4844         if (scsi_debug_dev_size_mb < 1)
4845                 scsi_debug_dev_size_mb = 1;  /* force minimum 1 MB ramdisk */
4846         sz = (unsigned long)scsi_debug_dev_size_mb * 1048576;
4847         sdebug_store_sectors = sz / scsi_debug_sector_size;
4848         sdebug_capacity = get_sdebug_capacity();
4849
4850         /* play around with geometry, don't waste too much on track 0 */
4851         sdebug_heads = 8;
4852         sdebug_sectors_per = 32;
4853         if (scsi_debug_dev_size_mb >= 16)
4854                 sdebug_heads = 32;
4855         else if (scsi_debug_dev_size_mb >= 256)
4856                 sdebug_heads = 64;
4857         sdebug_cylinders_per = (unsigned long)sdebug_capacity /
4858                                (sdebug_sectors_per * sdebug_heads);
4859         if (sdebug_cylinders_per >= 1024) {
4860                 /* other LLDs do this; implies >= 1GB ram disk ... */
4861                 sdebug_heads = 255;
4862                 sdebug_sectors_per = 63;
4863                 sdebug_cylinders_per = (unsigned long)sdebug_capacity /
4864                                (sdebug_sectors_per * sdebug_heads);
4865         }
4866
4867         if (0 == scsi_debug_fake_rw) {
4868                 fake_storep = vmalloc(sz);
4869                 if (NULL == fake_storep) {
4870                         pr_err("%s: out of memory, 1\n", __func__);
4871                         return -ENOMEM;
4872                 }
4873                 memset(fake_storep, 0, sz);
4874                 if (scsi_debug_num_parts > 0)
4875                         sdebug_build_parts(fake_storep, sz);
4876         }
4877
4878         if (scsi_debug_dix) {
4879                 int dif_size;
4880
4881                 dif_size = sdebug_store_sectors * sizeof(struct sd_dif_tuple);
4882                 dif_storep = vmalloc(dif_size);
4883
4884                 pr_err("%s: dif_storep %u bytes @ %p\n", __func__, dif_size,
4885                         dif_storep);
4886
4887                 if (dif_storep == NULL) {
4888                         pr_err("%s: out of mem. (DIX)\n", __func__);
4889                         ret = -ENOMEM;
4890                         goto free_vm;
4891                 }
4892
4893                 memset(dif_storep, 0xff, dif_size);
4894         }
4895
4896         /* Logical Block Provisioning */
4897         if (scsi_debug_lbp()) {
4898                 scsi_debug_unmap_max_blocks =
4899                         clamp(scsi_debug_unmap_max_blocks, 0U, 0xffffffffU);
4900
4901                 scsi_debug_unmap_max_desc =
4902                         clamp(scsi_debug_unmap_max_desc, 0U, 256U);
4903
4904                 scsi_debug_unmap_granularity =
4905                         clamp(scsi_debug_unmap_granularity, 1U, 0xffffffffU);
4906
4907                 if (scsi_debug_unmap_alignment &&
4908                     scsi_debug_unmap_granularity <=
4909                     scsi_debug_unmap_alignment) {
4910                         pr_err("%s: ERR: unmap_granularity <= unmap_alignment\n",
4911                                __func__);
4912                         return -EINVAL;
4913                 }
4914
4915                 map_size = lba_to_map_index(sdebug_store_sectors - 1) + 1;
4916                 map_storep = vmalloc(BITS_TO_LONGS(map_size) * sizeof(long));
4917
4918                 pr_info("%s: %lu provisioning blocks\n", __func__, map_size);
4919
4920                 if (map_storep == NULL) {
4921                         pr_err("%s: out of mem. (MAP)\n", __func__);
4922                         ret = -ENOMEM;
4923                         goto free_vm;
4924                 }
4925
4926                 bitmap_zero(map_storep, map_size);
4927
4928                 /* Map first 1KB for partition table */
4929                 if (scsi_debug_num_parts)
4930                         map_region(0, 2);
4931         }
4932
4933         pseudo_primary = root_device_register("pseudo_0");
4934         if (IS_ERR(pseudo_primary)) {
4935                 pr_warn("%s: root_device_register() error\n", __func__);
4936                 ret = PTR_ERR(pseudo_primary);
4937                 goto free_vm;
4938         }
4939         ret = bus_register(&pseudo_lld_bus);
4940         if (ret < 0) {
4941                 pr_warn("%s: bus_register error: %d\n", __func__, ret);
4942                 goto dev_unreg;
4943         }
4944         ret = driver_register(&sdebug_driverfs_driver);
4945         if (ret < 0) {
4946                 pr_warn("%s: driver_register error: %d\n", __func__, ret);
4947                 goto bus_unreg;
4948         }
4949
4950         host_to_add = scsi_debug_add_host;
4951         scsi_debug_add_host = 0;
4952
4953         for (k = 0; k < host_to_add; k++) {
4954                 if (sdebug_add_adapter()) {
4955                         pr_err("%s: sdebug_add_adapter failed k=%d\n",
4956                                 __func__, k);
4957                         break;
4958                 }
4959         }
4960
4961         if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts) {
4962                 pr_info("%s: built %d host(s)\n", __func__,
4963                         scsi_debug_add_host);
4964         }
4965         return 0;
4966
4967 bus_unreg:
4968         bus_unregister(&pseudo_lld_bus);
4969 dev_unreg:
4970         root_device_unregister(pseudo_primary);
4971 free_vm:
4972         if (map_storep)
4973                 vfree(map_storep);
4974         if (dif_storep)
4975                 vfree(dif_storep);
4976         vfree(fake_storep);
4977
4978         return ret;
4979 }
4980
4981 static void __exit scsi_debug_exit(void)
4982 {
4983         int k = scsi_debug_add_host;
4984
4985         stop_all_queued();
4986         free_all_queued();
4987         for (; k; k--)
4988                 sdebug_remove_adapter();
4989         driver_unregister(&sdebug_driverfs_driver);
4990         bus_unregister(&pseudo_lld_bus);
4991         root_device_unregister(pseudo_primary);
4992
4993         if (dif_storep)
4994                 vfree(dif_storep);
4995
4996         vfree(fake_storep);
4997 }
4998
4999 device_initcall(scsi_debug_init);
5000 module_exit(scsi_debug_exit);
5001
5002 static void sdebug_release_adapter(struct device * dev)
5003 {
5004         struct sdebug_host_info *sdbg_host;
5005
5006         sdbg_host = to_sdebug_host(dev);
5007         kfree(sdbg_host);
5008 }
5009
5010 static int sdebug_add_adapter(void)
5011 {
5012         int k, devs_per_host;
5013         int error = 0;
5014         struct sdebug_host_info *sdbg_host;
5015         struct sdebug_dev_info *sdbg_devinfo, *tmp;
5016
5017         sdbg_host = kzalloc(sizeof(*sdbg_host),GFP_KERNEL);
5018         if (NULL == sdbg_host) {
5019                 printk(KERN_ERR "%s: out of memory at line %d\n",
5020                        __func__, __LINE__);
5021                 return -ENOMEM;
5022         }
5023
5024         INIT_LIST_HEAD(&sdbg_host->dev_info_list);
5025
5026         devs_per_host = scsi_debug_num_tgts * scsi_debug_max_luns;
5027         for (k = 0; k < devs_per_host; k++) {
5028                 sdbg_devinfo = sdebug_device_create(sdbg_host, GFP_KERNEL);
5029                 if (!sdbg_devinfo) {
5030                         printk(KERN_ERR "%s: out of memory at line %d\n",
5031                                __func__, __LINE__);
5032                         error = -ENOMEM;
5033                         goto clean;
5034                 }
5035         }
5036
5037         spin_lock(&sdebug_host_list_lock);
5038         list_add_tail(&sdbg_host->host_list, &sdebug_host_list);
5039         spin_unlock(&sdebug_host_list_lock);
5040
5041         sdbg_host->dev.bus = &pseudo_lld_bus;
5042         sdbg_host->dev.parent = pseudo_primary;
5043         sdbg_host->dev.release = &sdebug_release_adapter;
5044         dev_set_name(&sdbg_host->dev, "adapter%d", scsi_debug_add_host);
5045
5046         error = device_register(&sdbg_host->dev);
5047
5048         if (error)
5049                 goto clean;
5050
5051         ++scsi_debug_add_host;
5052         return error;
5053
5054 clean:
5055         list_for_each_entry_safe(sdbg_devinfo, tmp, &sdbg_host->dev_info_list,
5056                                  dev_list) {
5057                 list_del(&sdbg_devinfo->dev_list);
5058                 kfree(sdbg_devinfo);
5059         }
5060
5061         kfree(sdbg_host);
5062         return error;
5063 }
5064
5065 static void sdebug_remove_adapter(void)
5066 {
5067         struct sdebug_host_info * sdbg_host = NULL;
5068
5069         spin_lock(&sdebug_host_list_lock);
5070         if (!list_empty(&sdebug_host_list)) {
5071                 sdbg_host = list_entry(sdebug_host_list.prev,
5072                                        struct sdebug_host_info, host_list);
5073                 list_del(&sdbg_host->host_list);
5074         }
5075         spin_unlock(&sdebug_host_list_lock);
5076
5077         if (!sdbg_host)
5078                 return;
5079
5080         device_unregister(&sdbg_host->dev);
5081         --scsi_debug_add_host;
5082 }
5083
5084 static int
5085 sdebug_change_qdepth(struct scsi_device *sdev, int qdepth)
5086 {
5087         int num_in_q = 0;
5088         unsigned long iflags;
5089         struct sdebug_dev_info *devip;
5090
5091         spin_lock_irqsave(&queued_arr_lock, iflags);
5092         devip = (struct sdebug_dev_info *)sdev->hostdata;
5093         if (NULL == devip) {
5094                 spin_unlock_irqrestore(&queued_arr_lock, iflags);
5095                 return  -ENODEV;
5096         }
5097         num_in_q = atomic_read(&devip->num_in_q);
5098         spin_unlock_irqrestore(&queued_arr_lock, iflags);
5099
5100         if (qdepth < 1)
5101                 qdepth = 1;
5102         /* allow to exceed max host queued_arr elements for testing */
5103         if (qdepth > SCSI_DEBUG_CANQUEUE + 10)
5104                 qdepth = SCSI_DEBUG_CANQUEUE + 10;
5105         scsi_change_queue_depth(sdev, qdepth);
5106
5107         if (SCSI_DEBUG_OPT_Q_NOISE & scsi_debug_opts) {
5108                 sdev_printk(KERN_INFO, sdev,
5109                             "%s: qdepth=%d, num_in_q=%d\n",
5110                             __func__, qdepth, num_in_q);
5111         }
5112         return sdev->queue_depth;
5113 }
5114
5115 static int
5116 check_inject(struct scsi_cmnd *scp)
5117 {
5118         struct sdebug_scmd_extra_t *ep = scsi_cmd_priv(scp);
5119
5120         memset(ep, 0, sizeof(struct sdebug_scmd_extra_t));
5121
5122         if (atomic_inc_return(&sdebug_cmnd_count) >=
5123             abs(scsi_debug_every_nth)) {
5124                 atomic_set(&sdebug_cmnd_count, 0);
5125                 if (scsi_debug_every_nth < -1)
5126                         scsi_debug_every_nth = -1;
5127                 if (SCSI_DEBUG_OPT_TIMEOUT & scsi_debug_opts)
5128                         return 1; /* ignore command causing timeout */
5129                 else if (SCSI_DEBUG_OPT_MAC_TIMEOUT & scsi_debug_opts &&
5130                          scsi_medium_access_command(scp))
5131                         return 1; /* time out reads and writes */
5132                 if (sdebug_any_injecting_opt) {
5133                         int opts = scsi_debug_opts;
5134
5135                         if (SCSI_DEBUG_OPT_RECOVERED_ERR & opts)
5136                                 ep->inj_recovered = true;
5137                         else if (SCSI_DEBUG_OPT_TRANSPORT_ERR & opts)
5138                                 ep->inj_transport = true;
5139                         else if (SCSI_DEBUG_OPT_DIF_ERR & opts)
5140                                 ep->inj_dif = true;
5141                         else if (SCSI_DEBUG_OPT_DIX_ERR & opts)
5142                                 ep->inj_dix = true;
5143                         else if (SCSI_DEBUG_OPT_SHORT_TRANSFER & opts)
5144                                 ep->inj_short = true;
5145                 }
5146         }
5147         return 0;
5148 }
5149
5150 static int
5151 scsi_debug_queuecommand(struct scsi_cmnd *scp)
5152 {
5153         u8 sdeb_i;
5154         struct scsi_device *sdp = scp->device;
5155         const struct opcode_info_t *oip;
5156         const struct opcode_info_t *r_oip;
5157         struct sdebug_dev_info *devip;
5158         u8 *cmd = scp->cmnd;
5159         int (*r_pfp)(struct scsi_cmnd *, struct sdebug_dev_info *);
5160         int k, na;
5161         int errsts = 0;
5162         int errsts_no_connect = DID_NO_CONNECT << 16;
5163         u32 flags;
5164         u16 sa;
5165         u8 opcode = cmd[0];
5166         bool has_wlun_rl;
5167         bool debug = !!(SCSI_DEBUG_OPT_NOISE & scsi_debug_opts);
5168
5169         scsi_set_resid(scp, 0);
5170         if (debug && !(SCSI_DEBUG_OPT_NO_CDB_NOISE & scsi_debug_opts)) {
5171                 char b[120];
5172                 int n, len, sb;
5173
5174                 len = scp->cmd_len;
5175                 sb = (int)sizeof(b);
5176                 if (len > 32)
5177                         strcpy(b, "too long, over 32 bytes");
5178                 else {
5179                         for (k = 0, n = 0; k < len && n < sb; ++k)
5180                                 n += scnprintf(b + n, sb - n, "%02x ",
5181                                                (u32)cmd[k]);
5182                 }
5183                 sdev_printk(KERN_INFO, sdp, "%s: cmd %s\n", my_name, b);
5184         }
5185         has_wlun_rl = (sdp->lun == SAM2_WLUN_REPORT_LUNS);
5186         if ((sdp->lun >= scsi_debug_max_luns) && !has_wlun_rl)
5187                 return schedule_resp(scp, NULL, errsts_no_connect, 0);
5188
5189         sdeb_i = opcode_ind_arr[opcode];        /* fully mapped */
5190         oip = &opcode_info_arr[sdeb_i];         /* safe if table consistent */
5191         devip = (struct sdebug_dev_info *)sdp->hostdata;
5192         if (!devip) {
5193                 devip = devInfoReg(sdp);
5194                 if (NULL == devip)
5195                         return schedule_resp(scp, NULL, errsts_no_connect, 0);
5196         }
5197         na = oip->num_attached;
5198         r_pfp = oip->pfp;
5199         if (na) {       /* multiple commands with this opcode */
5200                 r_oip = oip;
5201                 if (FF_SA & r_oip->flags) {
5202                         if (F_SA_LOW & oip->flags)
5203                                 sa = 0x1f & cmd[1];
5204                         else
5205                                 sa = get_unaligned_be16(cmd + 8);
5206                         for (k = 0; k <= na; oip = r_oip->arrp + k++) {
5207                                 if (opcode == oip->opcode && sa == oip->sa)
5208                                         break;
5209                         }
5210                 } else {   /* since no service action only check opcode */
5211                         for (k = 0; k <= na; oip = r_oip->arrp + k++) {
5212                                 if (opcode == oip->opcode)
5213                                         break;
5214                         }
5215                 }
5216                 if (k > na) {
5217                         if (F_SA_LOW & r_oip->flags)
5218                                 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 4);
5219                         else if (F_SA_HIGH & r_oip->flags)
5220                                 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 8, 7);
5221                         else
5222                                 mk_sense_invalid_opcode(scp);
5223                         goto check_cond;
5224                 }
5225         }       /* else (when na==0) we assume the oip is a match */
5226         flags = oip->flags;
5227         if (F_INV_OP & flags) {
5228                 mk_sense_invalid_opcode(scp);
5229                 goto check_cond;
5230         }
5231         if (has_wlun_rl && !(F_RL_WLUN_OK & flags)) {
5232                 if (debug)
5233                         sdev_printk(KERN_INFO, sdp, "scsi_debug: Opcode: "
5234                                     "0x%x not supported for wlun\n", opcode);
5235                 mk_sense_invalid_opcode(scp);
5236                 goto check_cond;
5237         }
5238         if (scsi_debug_strict) {        /* check cdb against mask */
5239                 u8 rem;
5240                 int j;
5241
5242                 for (k = 1; k < oip->len_mask[0] && k < 16; ++k) {
5243                         rem = ~oip->len_mask[k] & cmd[k];
5244                         if (rem) {
5245                                 for (j = 7; j >= 0; --j, rem <<= 1) {
5246                                         if (0x80 & rem)
5247                                                 break;
5248                                 }
5249                                 mk_sense_invalid_fld(scp, SDEB_IN_CDB, k, j);
5250                                 goto check_cond;
5251                         }
5252                 }
5253         }
5254         if (!(F_SKIP_UA & flags) &&
5255             SDEBUG_NUM_UAS != find_first_bit(devip->uas_bm, SDEBUG_NUM_UAS)) {
5256                 errsts = check_readiness(scp, UAS_ONLY, devip);
5257                 if (errsts)
5258                         goto check_cond;
5259         }
5260         if ((F_M_ACCESS & flags) && devip->stopped) {
5261                 mk_sense_buffer(scp, NOT_READY, LOGICAL_UNIT_NOT_READY, 0x2);
5262                 if (debug)
5263                         sdev_printk(KERN_INFO, sdp, "%s reports: Not ready: "
5264                                     "%s\n", my_name, "initializing command "
5265                                     "required");
5266                 errsts = check_condition_result;
5267                 goto fini;
5268         }
5269         if (scsi_debug_fake_rw && (F_FAKE_RW & flags))
5270                 goto fini;
5271         if (scsi_debug_every_nth) {
5272                 if (check_inject(scp))
5273                         return 0;       /* ignore command: make trouble */
5274         }
5275         if (oip->pfp)   /* if this command has a resp_* function, call it */
5276                 errsts = oip->pfp(scp, devip);
5277         else if (r_pfp) /* if leaf function ptr NULL, try the root's */
5278                 errsts = r_pfp(scp, devip);
5279
5280 fini:
5281         return schedule_resp(scp, devip, errsts,
5282                              ((F_DELAY_OVERR & flags) ? 0 : scsi_debug_delay));
5283 check_cond:
5284         return schedule_resp(scp, devip, check_condition_result, 0);
5285 }
5286
5287 static int
5288 sdebug_queuecommand_lock_or_not(struct Scsi_Host *shost, struct scsi_cmnd *cmd)
5289 {
5290         if (scsi_debug_host_lock) {
5291                 unsigned long iflags;
5292                 int rc;
5293
5294                 spin_lock_irqsave(shost->host_lock, iflags);
5295                 rc = scsi_debug_queuecommand(cmd);
5296                 spin_unlock_irqrestore(shost->host_lock, iflags);
5297                 return rc;
5298         } else
5299                 return scsi_debug_queuecommand(cmd);
5300 }
5301
5302 static struct scsi_host_template sdebug_driver_template = {
5303         .show_info =            scsi_debug_show_info,
5304         .write_info =           scsi_debug_write_info,
5305         .proc_name =            sdebug_proc_name,
5306         .name =                 "SCSI DEBUG",
5307         .info =                 scsi_debug_info,
5308         .slave_alloc =          scsi_debug_slave_alloc,
5309         .slave_configure =      scsi_debug_slave_configure,
5310         .slave_destroy =        scsi_debug_slave_destroy,
5311         .ioctl =                scsi_debug_ioctl,
5312         .queuecommand =         sdebug_queuecommand_lock_or_not,
5313         .change_queue_depth =   sdebug_change_qdepth,
5314         .eh_abort_handler =     scsi_debug_abort,
5315         .eh_device_reset_handler = scsi_debug_device_reset,
5316         .eh_target_reset_handler = scsi_debug_target_reset,
5317         .eh_bus_reset_handler = scsi_debug_bus_reset,
5318         .eh_host_reset_handler = scsi_debug_host_reset,
5319         .can_queue =            SCSI_DEBUG_CANQUEUE,
5320         .this_id =              7,
5321         .sg_tablesize =         SCSI_MAX_SG_CHAIN_SEGMENTS,
5322         .cmd_per_lun =          DEF_CMD_PER_LUN,
5323         .max_sectors =          -1U,
5324         .use_clustering =       DISABLE_CLUSTERING,
5325         .module =               THIS_MODULE,
5326         .track_queue_depth =    1,
5327         .cmd_size =             sizeof(struct sdebug_scmd_extra_t),
5328 };
5329
5330 static int sdebug_driver_probe(struct device * dev)
5331 {
5332         int error = 0;
5333         int opts;
5334         struct sdebug_host_info *sdbg_host;
5335         struct Scsi_Host *hpnt;
5336         int host_prot;
5337
5338         sdbg_host = to_sdebug_host(dev);
5339
5340         sdebug_driver_template.can_queue = scsi_debug_max_queue;
5341         if (scsi_debug_clustering)
5342                 sdebug_driver_template.use_clustering = ENABLE_CLUSTERING;
5343         hpnt = scsi_host_alloc(&sdebug_driver_template, sizeof(sdbg_host));
5344         if (NULL == hpnt) {
5345                 pr_err("%s: scsi_host_alloc failed\n", __func__);
5346                 error = -ENODEV;
5347                 return error;
5348         }
5349
5350         sdbg_host->shost = hpnt;
5351         *((struct sdebug_host_info **)hpnt->hostdata) = sdbg_host;
5352         if ((hpnt->this_id >= 0) && (scsi_debug_num_tgts > hpnt->this_id))
5353                 hpnt->max_id = scsi_debug_num_tgts + 1;
5354         else
5355                 hpnt->max_id = scsi_debug_num_tgts;
5356         hpnt->max_lun = SAM2_WLUN_REPORT_LUNS;  /* = scsi_debug_max_luns; */
5357
5358         host_prot = 0;
5359
5360         switch (scsi_debug_dif) {
5361
5362         case SD_DIF_TYPE1_PROTECTION:
5363                 host_prot = SHOST_DIF_TYPE1_PROTECTION;
5364                 if (scsi_debug_dix)
5365                         host_prot |= SHOST_DIX_TYPE1_PROTECTION;
5366                 break;
5367
5368         case SD_DIF_TYPE2_PROTECTION:
5369                 host_prot = SHOST_DIF_TYPE2_PROTECTION;
5370                 if (scsi_debug_dix)
5371                         host_prot |= SHOST_DIX_TYPE2_PROTECTION;
5372                 break;
5373
5374         case SD_DIF_TYPE3_PROTECTION:
5375                 host_prot = SHOST_DIF_TYPE3_PROTECTION;
5376                 if (scsi_debug_dix)
5377                         host_prot |= SHOST_DIX_TYPE3_PROTECTION;
5378                 break;
5379
5380         default:
5381                 if (scsi_debug_dix)
5382                         host_prot |= SHOST_DIX_TYPE0_PROTECTION;
5383                 break;
5384         }
5385
5386         scsi_host_set_prot(hpnt, host_prot);
5387
5388         printk(KERN_INFO "scsi_debug: host protection%s%s%s%s%s%s%s\n",
5389                (host_prot & SHOST_DIF_TYPE1_PROTECTION) ? " DIF1" : "",
5390                (host_prot & SHOST_DIF_TYPE2_PROTECTION) ? " DIF2" : "",
5391                (host_prot & SHOST_DIF_TYPE3_PROTECTION) ? " DIF3" : "",
5392                (host_prot & SHOST_DIX_TYPE0_PROTECTION) ? " DIX0" : "",
5393                (host_prot & SHOST_DIX_TYPE1_PROTECTION) ? " DIX1" : "",
5394                (host_prot & SHOST_DIX_TYPE2_PROTECTION) ? " DIX2" : "",
5395                (host_prot & SHOST_DIX_TYPE3_PROTECTION) ? " DIX3" : "");
5396
5397         if (scsi_debug_guard == 1)
5398                 scsi_host_set_guard(hpnt, SHOST_DIX_GUARD_IP);
5399         else
5400                 scsi_host_set_guard(hpnt, SHOST_DIX_GUARD_CRC);
5401
5402         opts = scsi_debug_opts;
5403         if (SCSI_DEBUG_OPT_RECOVERED_ERR & opts)
5404                 sdebug_any_injecting_opt = true;
5405         else if (SCSI_DEBUG_OPT_TRANSPORT_ERR & opts)
5406                 sdebug_any_injecting_opt = true;
5407         else if (SCSI_DEBUG_OPT_DIF_ERR & opts)
5408                 sdebug_any_injecting_opt = true;
5409         else if (SCSI_DEBUG_OPT_DIX_ERR & opts)
5410                 sdebug_any_injecting_opt = true;
5411         else if (SCSI_DEBUG_OPT_SHORT_TRANSFER & opts)
5412                 sdebug_any_injecting_opt = true;
5413
5414         error = scsi_add_host(hpnt, &sdbg_host->dev);
5415         if (error) {
5416                 printk(KERN_ERR "%s: scsi_add_host failed\n", __func__);
5417                 error = -ENODEV;
5418                 scsi_host_put(hpnt);
5419         } else
5420                 scsi_scan_host(hpnt);
5421
5422         return error;
5423 }
5424
5425 static int sdebug_driver_remove(struct device * dev)
5426 {
5427         struct sdebug_host_info *sdbg_host;
5428         struct sdebug_dev_info *sdbg_devinfo, *tmp;
5429
5430         sdbg_host = to_sdebug_host(dev);
5431
5432         if (!sdbg_host) {
5433                 printk(KERN_ERR "%s: Unable to locate host info\n",
5434                        __func__);
5435                 return -ENODEV;
5436         }
5437
5438         scsi_remove_host(sdbg_host->shost);
5439
5440         list_for_each_entry_safe(sdbg_devinfo, tmp, &sdbg_host->dev_info_list,
5441                                  dev_list) {
5442                 list_del(&sdbg_devinfo->dev_list);
5443                 kfree(sdbg_devinfo);
5444         }
5445
5446         scsi_host_put(sdbg_host->shost);
5447         return 0;
5448 }
5449
5450 static int pseudo_lld_bus_match(struct device *dev,
5451                                 struct device_driver *dev_driver)
5452 {
5453         return 1;
5454 }
5455
5456 static struct bus_type pseudo_lld_bus = {
5457         .name = "pseudo",
5458         .match = pseudo_lld_bus_match,
5459         .probe = sdebug_driver_probe,
5460         .remove = sdebug_driver_remove,
5461         .drv_groups = sdebug_drv_groups,
5462 };