882744852aacb822aa3264bf75fe761b2d494c9f
[kvmfornfv.git] / kernel / drivers / scsi / ipr.c
1 /*
2  * ipr.c -- driver for IBM Power Linux RAID adapters
3  *
4  * Written By: Brian King <brking@us.ibm.com>, IBM Corporation
5  *
6  * Copyright (C) 2003, 2004 IBM Corporation
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License as published by
10  * the Free Software Foundation; either version 2 of the License, or
11  * (at your option) any later version.
12  *
13  * This program is distributed in the hope that it will be useful,
14  * but WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
16  * GNU General Public License for more details.
17  *
18  * You should have received a copy of the GNU General Public License
19  * along with this program; if not, write to the Free Software
20  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
21  *
22  */
23
24 /*
25  * Notes:
26  *
27  * This driver is used to control the following SCSI adapters:
28  *
29  * IBM iSeries: 5702, 5703, 2780, 5709, 570A, 570B
30  *
31  * IBM pSeries: PCI-X Dual Channel Ultra 320 SCSI RAID Adapter
32  *              PCI-X Dual Channel Ultra 320 SCSI Adapter
33  *              PCI-X Dual Channel Ultra 320 SCSI RAID Enablement Card
34  *              Embedded SCSI adapter on p615 and p655 systems
35  *
36  * Supported Hardware Features:
37  *      - Ultra 320 SCSI controller
38  *      - PCI-X host interface
39  *      - Embedded PowerPC RISC Processor and Hardware XOR DMA Engine
40  *      - Non-Volatile Write Cache
41  *      - Supports attachment of non-RAID disks, tape, and optical devices
42  *      - RAID Levels 0, 5, 10
43  *      - Hot spare
44  *      - Background Parity Checking
45  *      - Background Data Scrubbing
46  *      - Ability to increase the capacity of an existing RAID 5 disk array
47  *              by adding disks
48  *
49  * Driver Features:
50  *      - Tagged command queuing
51  *      - Adapter microcode download
52  *      - PCI hot plug
53  *      - SCSI device hot plug
54  *
55  */
56
57 #include <linux/fs.h>
58 #include <linux/init.h>
59 #include <linux/types.h>
60 #include <linux/errno.h>
61 #include <linux/kernel.h>
62 #include <linux/slab.h>
63 #include <linux/vmalloc.h>
64 #include <linux/ioport.h>
65 #include <linux/delay.h>
66 #include <linux/pci.h>
67 #include <linux/wait.h>
68 #include <linux/spinlock.h>
69 #include <linux/sched.h>
70 #include <linux/interrupt.h>
71 #include <linux/blkdev.h>
72 #include <linux/firmware.h>
73 #include <linux/module.h>
74 #include <linux/moduleparam.h>
75 #include <linux/libata.h>
76 #include <linux/hdreg.h>
77 #include <linux/reboot.h>
78 #include <linux/stringify.h>
79 #include <asm/io.h>
80 #include <asm/irq.h>
81 #include <asm/processor.h>
82 #include <scsi/scsi.h>
83 #include <scsi/scsi_host.h>
84 #include <scsi/scsi_tcq.h>
85 #include <scsi/scsi_eh.h>
86 #include <scsi/scsi_cmnd.h>
87 #include "ipr.h"
88
89 /*
90  *   Global Data
91  */
92 static LIST_HEAD(ipr_ioa_head);
93 static unsigned int ipr_log_level = IPR_DEFAULT_LOG_LEVEL;
94 static unsigned int ipr_max_speed = 1;
95 static int ipr_testmode = 0;
96 static unsigned int ipr_fastfail = 0;
97 static unsigned int ipr_transop_timeout = 0;
98 static unsigned int ipr_debug = 0;
99 static unsigned int ipr_max_devs = IPR_DEFAULT_SIS64_DEVS;
100 static unsigned int ipr_dual_ioa_raid = 1;
101 static unsigned int ipr_number_of_msix = 2;
102 static unsigned int ipr_fast_reboot;
103 static DEFINE_SPINLOCK(ipr_driver_lock);
104
105 /* This table describes the differences between DMA controller chips */
106 static const struct ipr_chip_cfg_t ipr_chip_cfg[] = {
107         { /* Gemstone, Citrine, Obsidian, and Obsidian-E */
108                 .mailbox = 0x0042C,
109                 .max_cmds = 100,
110                 .cache_line_size = 0x20,
111                 .clear_isr = 1,
112                 .iopoll_weight = 0,
113                 {
114                         .set_interrupt_mask_reg = 0x0022C,
115                         .clr_interrupt_mask_reg = 0x00230,
116                         .clr_interrupt_mask_reg32 = 0x00230,
117                         .sense_interrupt_mask_reg = 0x0022C,
118                         .sense_interrupt_mask_reg32 = 0x0022C,
119                         .clr_interrupt_reg = 0x00228,
120                         .clr_interrupt_reg32 = 0x00228,
121                         .sense_interrupt_reg = 0x00224,
122                         .sense_interrupt_reg32 = 0x00224,
123                         .ioarrin_reg = 0x00404,
124                         .sense_uproc_interrupt_reg = 0x00214,
125                         .sense_uproc_interrupt_reg32 = 0x00214,
126                         .set_uproc_interrupt_reg = 0x00214,
127                         .set_uproc_interrupt_reg32 = 0x00214,
128                         .clr_uproc_interrupt_reg = 0x00218,
129                         .clr_uproc_interrupt_reg32 = 0x00218
130                 }
131         },
132         { /* Snipe and Scamp */
133                 .mailbox = 0x0052C,
134                 .max_cmds = 100,
135                 .cache_line_size = 0x20,
136                 .clear_isr = 1,
137                 .iopoll_weight = 0,
138                 {
139                         .set_interrupt_mask_reg = 0x00288,
140                         .clr_interrupt_mask_reg = 0x0028C,
141                         .clr_interrupt_mask_reg32 = 0x0028C,
142                         .sense_interrupt_mask_reg = 0x00288,
143                         .sense_interrupt_mask_reg32 = 0x00288,
144                         .clr_interrupt_reg = 0x00284,
145                         .clr_interrupt_reg32 = 0x00284,
146                         .sense_interrupt_reg = 0x00280,
147                         .sense_interrupt_reg32 = 0x00280,
148                         .ioarrin_reg = 0x00504,
149                         .sense_uproc_interrupt_reg = 0x00290,
150                         .sense_uproc_interrupt_reg32 = 0x00290,
151                         .set_uproc_interrupt_reg = 0x00290,
152                         .set_uproc_interrupt_reg32 = 0x00290,
153                         .clr_uproc_interrupt_reg = 0x00294,
154                         .clr_uproc_interrupt_reg32 = 0x00294
155                 }
156         },
157         { /* CRoC */
158                 .mailbox = 0x00044,
159                 .max_cmds = 1000,
160                 .cache_line_size = 0x20,
161                 .clear_isr = 0,
162                 .iopoll_weight = 64,
163                 {
164                         .set_interrupt_mask_reg = 0x00010,
165                         .clr_interrupt_mask_reg = 0x00018,
166                         .clr_interrupt_mask_reg32 = 0x0001C,
167                         .sense_interrupt_mask_reg = 0x00010,
168                         .sense_interrupt_mask_reg32 = 0x00014,
169                         .clr_interrupt_reg = 0x00008,
170                         .clr_interrupt_reg32 = 0x0000C,
171                         .sense_interrupt_reg = 0x00000,
172                         .sense_interrupt_reg32 = 0x00004,
173                         .ioarrin_reg = 0x00070,
174                         .sense_uproc_interrupt_reg = 0x00020,
175                         .sense_uproc_interrupt_reg32 = 0x00024,
176                         .set_uproc_interrupt_reg = 0x00020,
177                         .set_uproc_interrupt_reg32 = 0x00024,
178                         .clr_uproc_interrupt_reg = 0x00028,
179                         .clr_uproc_interrupt_reg32 = 0x0002C,
180                         .init_feedback_reg = 0x0005C,
181                         .dump_addr_reg = 0x00064,
182                         .dump_data_reg = 0x00068,
183                         .endian_swap_reg = 0x00084
184                 }
185         },
186 };
187
188 static const struct ipr_chip_t ipr_chip[] = {
189         { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE, IPR_USE_LSI, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
190         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE, IPR_USE_LSI, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
191         { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN, IPR_USE_LSI, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
192         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN, IPR_USE_LSI, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
193         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E, IPR_USE_MSI, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
194         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_SNIPE, IPR_USE_LSI, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[1] },
195         { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP, IPR_USE_LSI, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[1] },
196         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2, IPR_USE_MSI, IPR_SIS64, IPR_MMIO, &ipr_chip_cfg[2] },
197         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE, IPR_USE_MSI, IPR_SIS64, IPR_MMIO, &ipr_chip_cfg[2] }
198 };
199
200 static int ipr_max_bus_speeds[] = {
201         IPR_80MBs_SCSI_RATE, IPR_U160_SCSI_RATE, IPR_U320_SCSI_RATE
202 };
203
204 MODULE_AUTHOR("Brian King <brking@us.ibm.com>");
205 MODULE_DESCRIPTION("IBM Power RAID SCSI Adapter Driver");
206 module_param_named(max_speed, ipr_max_speed, uint, 0);
207 MODULE_PARM_DESC(max_speed, "Maximum bus speed (0-2). Default: 1=U160. Speeds: 0=80 MB/s, 1=U160, 2=U320");
208 module_param_named(log_level, ipr_log_level, uint, 0);
209 MODULE_PARM_DESC(log_level, "Set to 0 - 4 for increasing verbosity of device driver");
210 module_param_named(testmode, ipr_testmode, int, 0);
211 MODULE_PARM_DESC(testmode, "DANGEROUS!!! Allows unsupported configurations");
212 module_param_named(fastfail, ipr_fastfail, int, S_IRUGO | S_IWUSR);
213 MODULE_PARM_DESC(fastfail, "Reduce timeouts and retries");
214 module_param_named(transop_timeout, ipr_transop_timeout, int, 0);
215 MODULE_PARM_DESC(transop_timeout, "Time in seconds to wait for adapter to come operational (default: 300)");
216 module_param_named(debug, ipr_debug, int, S_IRUGO | S_IWUSR);
217 MODULE_PARM_DESC(debug, "Enable device driver debugging logging. Set to 1 to enable. (default: 0)");
218 module_param_named(dual_ioa_raid, ipr_dual_ioa_raid, int, 0);
219 MODULE_PARM_DESC(dual_ioa_raid, "Enable dual adapter RAID support. Set to 1 to enable. (default: 1)");
220 module_param_named(max_devs, ipr_max_devs, int, 0);
221 MODULE_PARM_DESC(max_devs, "Specify the maximum number of physical devices. "
222                  "[Default=" __stringify(IPR_DEFAULT_SIS64_DEVS) "]");
223 module_param_named(number_of_msix, ipr_number_of_msix, int, 0);
224 MODULE_PARM_DESC(number_of_msix, "Specify the number of MSIX interrupts to use on capable adapters (1 - 16).  (default:2)");
225 module_param_named(fast_reboot, ipr_fast_reboot, int, S_IRUGO | S_IWUSR);
226 MODULE_PARM_DESC(fast_reboot, "Skip adapter shutdown during reboot. Set to 1 to enable. (default: 0)");
227 MODULE_LICENSE("GPL");
228 MODULE_VERSION(IPR_DRIVER_VERSION);
229
230 /*  A constant array of IOASCs/URCs/Error Messages */
231 static const
232 struct ipr_error_table_t ipr_error_table[] = {
233         {0x00000000, 1, IPR_DEFAULT_LOG_LEVEL,
234         "8155: An unknown error was received"},
235         {0x00330000, 0, 0,
236         "Soft underlength error"},
237         {0x005A0000, 0, 0,
238         "Command to be cancelled not found"},
239         {0x00808000, 0, 0,
240         "Qualified success"},
241         {0x01080000, 1, IPR_DEFAULT_LOG_LEVEL,
242         "FFFE: Soft device bus error recovered by the IOA"},
243         {0x01088100, 0, IPR_DEFAULT_LOG_LEVEL,
244         "4101: Soft device bus fabric error"},
245         {0x01100100, 0, IPR_DEFAULT_LOG_LEVEL,
246         "FFFC: Logical block guard error recovered by the device"},
247         {0x01100300, 0, IPR_DEFAULT_LOG_LEVEL,
248         "FFFC: Logical block reference tag error recovered by the device"},
249         {0x01108300, 0, IPR_DEFAULT_LOG_LEVEL,
250         "4171: Recovered scatter list tag / sequence number error"},
251         {0x01109000, 0, IPR_DEFAULT_LOG_LEVEL,
252         "FF3D: Recovered logical block CRC error on IOA to Host transfer"},
253         {0x01109200, 0, IPR_DEFAULT_LOG_LEVEL,
254         "4171: Recovered logical block sequence number error on IOA to Host transfer"},
255         {0x0110A000, 0, IPR_DEFAULT_LOG_LEVEL,
256         "FFFD: Recovered logical block reference tag error detected by the IOA"},
257         {0x0110A100, 0, IPR_DEFAULT_LOG_LEVEL,
258         "FFFD: Logical block guard error recovered by the IOA"},
259         {0x01170600, 0, IPR_DEFAULT_LOG_LEVEL,
260         "FFF9: Device sector reassign successful"},
261         {0x01170900, 0, IPR_DEFAULT_LOG_LEVEL,
262         "FFF7: Media error recovered by device rewrite procedures"},
263         {0x01180200, 0, IPR_DEFAULT_LOG_LEVEL,
264         "7001: IOA sector reassignment successful"},
265         {0x01180500, 0, IPR_DEFAULT_LOG_LEVEL,
266         "FFF9: Soft media error. Sector reassignment recommended"},
267         {0x01180600, 0, IPR_DEFAULT_LOG_LEVEL,
268         "FFF7: Media error recovered by IOA rewrite procedures"},
269         {0x01418000, 0, IPR_DEFAULT_LOG_LEVEL,
270         "FF3D: Soft PCI bus error recovered by the IOA"},
271         {0x01440000, 1, IPR_DEFAULT_LOG_LEVEL,
272         "FFF6: Device hardware error recovered by the IOA"},
273         {0x01448100, 0, IPR_DEFAULT_LOG_LEVEL,
274         "FFF6: Device hardware error recovered by the device"},
275         {0x01448200, 1, IPR_DEFAULT_LOG_LEVEL,
276         "FF3D: Soft IOA error recovered by the IOA"},
277         {0x01448300, 0, IPR_DEFAULT_LOG_LEVEL,
278         "FFFA: Undefined device response recovered by the IOA"},
279         {0x014A0000, 1, IPR_DEFAULT_LOG_LEVEL,
280         "FFF6: Device bus error, message or command phase"},
281         {0x014A8000, 0, IPR_DEFAULT_LOG_LEVEL,
282         "FFFE: Task Management Function failed"},
283         {0x015D0000, 0, IPR_DEFAULT_LOG_LEVEL,
284         "FFF6: Failure prediction threshold exceeded"},
285         {0x015D9200, 0, IPR_DEFAULT_LOG_LEVEL,
286         "8009: Impending cache battery pack failure"},
287         {0x02040100, 0, 0,
288         "Logical Unit in process of becoming ready"},
289         {0x02040200, 0, 0,
290         "Initializing command required"},
291         {0x02040400, 0, 0,
292         "34FF: Disk device format in progress"},
293         {0x02040C00, 0, 0,
294         "Logical unit not accessible, target port in unavailable state"},
295         {0x02048000, 0, IPR_DEFAULT_LOG_LEVEL,
296         "9070: IOA requested reset"},
297         {0x023F0000, 0, 0,
298         "Synchronization required"},
299         {0x02408500, 0, 0,
300         "IOA microcode download required"},
301         {0x02408600, 0, 0,
302         "Device bus connection is prohibited by host"},
303         {0x024E0000, 0, 0,
304         "No ready, IOA shutdown"},
305         {0x025A0000, 0, 0,
306         "Not ready, IOA has been shutdown"},
307         {0x02670100, 0, IPR_DEFAULT_LOG_LEVEL,
308         "3020: Storage subsystem configuration error"},
309         {0x03110B00, 0, 0,
310         "FFF5: Medium error, data unreadable, recommend reassign"},
311         {0x03110C00, 0, 0,
312         "7000: Medium error, data unreadable, do not reassign"},
313         {0x03310000, 0, IPR_DEFAULT_LOG_LEVEL,
314         "FFF3: Disk media format bad"},
315         {0x04050000, 0, IPR_DEFAULT_LOG_LEVEL,
316         "3002: Addressed device failed to respond to selection"},
317         {0x04080000, 1, IPR_DEFAULT_LOG_LEVEL,
318         "3100: Device bus error"},
319         {0x04080100, 0, IPR_DEFAULT_LOG_LEVEL,
320         "3109: IOA timed out a device command"},
321         {0x04088000, 0, 0,
322         "3120: SCSI bus is not operational"},
323         {0x04088100, 0, IPR_DEFAULT_LOG_LEVEL,
324         "4100: Hard device bus fabric error"},
325         {0x04100100, 0, IPR_DEFAULT_LOG_LEVEL,
326         "310C: Logical block guard error detected by the device"},
327         {0x04100300, 0, IPR_DEFAULT_LOG_LEVEL,
328         "310C: Logical block reference tag error detected by the device"},
329         {0x04108300, 1, IPR_DEFAULT_LOG_LEVEL,
330         "4170: Scatter list tag / sequence number error"},
331         {0x04109000, 1, IPR_DEFAULT_LOG_LEVEL,
332         "8150: Logical block CRC error on IOA to Host transfer"},
333         {0x04109200, 1, IPR_DEFAULT_LOG_LEVEL,
334         "4170: Logical block sequence number error on IOA to Host transfer"},
335         {0x0410A000, 0, IPR_DEFAULT_LOG_LEVEL,
336         "310D: Logical block reference tag error detected by the IOA"},
337         {0x0410A100, 0, IPR_DEFAULT_LOG_LEVEL,
338         "310D: Logical block guard error detected by the IOA"},
339         {0x04118000, 0, IPR_DEFAULT_LOG_LEVEL,
340         "9000: IOA reserved area data check"},
341         {0x04118100, 0, IPR_DEFAULT_LOG_LEVEL,
342         "9001: IOA reserved area invalid data pattern"},
343         {0x04118200, 0, IPR_DEFAULT_LOG_LEVEL,
344         "9002: IOA reserved area LRC error"},
345         {0x04118300, 1, IPR_DEFAULT_LOG_LEVEL,
346         "Hardware Error, IOA metadata access error"},
347         {0x04320000, 0, IPR_DEFAULT_LOG_LEVEL,
348         "102E: Out of alternate sectors for disk storage"},
349         {0x04330000, 1, IPR_DEFAULT_LOG_LEVEL,
350         "FFF4: Data transfer underlength error"},
351         {0x04338000, 1, IPR_DEFAULT_LOG_LEVEL,
352         "FFF4: Data transfer overlength error"},
353         {0x043E0100, 0, IPR_DEFAULT_LOG_LEVEL,
354         "3400: Logical unit failure"},
355         {0x04408500, 0, IPR_DEFAULT_LOG_LEVEL,
356         "FFF4: Device microcode is corrupt"},
357         {0x04418000, 1, IPR_DEFAULT_LOG_LEVEL,
358         "8150: PCI bus error"},
359         {0x04430000, 1, 0,
360         "Unsupported device bus message received"},
361         {0x04440000, 1, IPR_DEFAULT_LOG_LEVEL,
362         "FFF4: Disk device problem"},
363         {0x04448200, 1, IPR_DEFAULT_LOG_LEVEL,
364         "8150: Permanent IOA failure"},
365         {0x04448300, 0, IPR_DEFAULT_LOG_LEVEL,
366         "3010: Disk device returned wrong response to IOA"},
367         {0x04448400, 0, IPR_DEFAULT_LOG_LEVEL,
368         "8151: IOA microcode error"},
369         {0x04448500, 0, 0,
370         "Device bus status error"},
371         {0x04448600, 0, IPR_DEFAULT_LOG_LEVEL,
372         "8157: IOA error requiring IOA reset to recover"},
373         {0x04448700, 0, 0,
374         "ATA device status error"},
375         {0x04490000, 0, 0,
376         "Message reject received from the device"},
377         {0x04449200, 0, IPR_DEFAULT_LOG_LEVEL,
378         "8008: A permanent cache battery pack failure occurred"},
379         {0x0444A000, 0, IPR_DEFAULT_LOG_LEVEL,
380         "9090: Disk unit has been modified after the last known status"},
381         {0x0444A200, 0, IPR_DEFAULT_LOG_LEVEL,
382         "9081: IOA detected device error"},
383         {0x0444A300, 0, IPR_DEFAULT_LOG_LEVEL,
384         "9082: IOA detected device error"},
385         {0x044A0000, 1, IPR_DEFAULT_LOG_LEVEL,
386         "3110: Device bus error, message or command phase"},
387         {0x044A8000, 1, IPR_DEFAULT_LOG_LEVEL,
388         "3110: SAS Command / Task Management Function failed"},
389         {0x04670400, 0, IPR_DEFAULT_LOG_LEVEL,
390         "9091: Incorrect hardware configuration change has been detected"},
391         {0x04678000, 0, IPR_DEFAULT_LOG_LEVEL,
392         "9073: Invalid multi-adapter configuration"},
393         {0x04678100, 0, IPR_DEFAULT_LOG_LEVEL,
394         "4010: Incorrect connection between cascaded expanders"},
395         {0x04678200, 0, IPR_DEFAULT_LOG_LEVEL,
396         "4020: Connections exceed IOA design limits"},
397         {0x04678300, 0, IPR_DEFAULT_LOG_LEVEL,
398         "4030: Incorrect multipath connection"},
399         {0x04679000, 0, IPR_DEFAULT_LOG_LEVEL,
400         "4110: Unsupported enclosure function"},
401         {0x04679800, 0, IPR_DEFAULT_LOG_LEVEL,
402         "4120: SAS cable VPD cannot be read"},
403         {0x046E0000, 0, IPR_DEFAULT_LOG_LEVEL,
404         "FFF4: Command to logical unit failed"},
405         {0x05240000, 1, 0,
406         "Illegal request, invalid request type or request packet"},
407         {0x05250000, 0, 0,
408         "Illegal request, invalid resource handle"},
409         {0x05258000, 0, 0,
410         "Illegal request, commands not allowed to this device"},
411         {0x05258100, 0, 0,
412         "Illegal request, command not allowed to a secondary adapter"},
413         {0x05258200, 0, 0,
414         "Illegal request, command not allowed to a non-optimized resource"},
415         {0x05260000, 0, 0,
416         "Illegal request, invalid field in parameter list"},
417         {0x05260100, 0, 0,
418         "Illegal request, parameter not supported"},
419         {0x05260200, 0, 0,
420         "Illegal request, parameter value invalid"},
421         {0x052C0000, 0, 0,
422         "Illegal request, command sequence error"},
423         {0x052C8000, 1, 0,
424         "Illegal request, dual adapter support not enabled"},
425         {0x052C8100, 1, 0,
426         "Illegal request, another cable connector was physically disabled"},
427         {0x054E8000, 1, 0,
428         "Illegal request, inconsistent group id/group count"},
429         {0x06040500, 0, IPR_DEFAULT_LOG_LEVEL,
430         "9031: Array protection temporarily suspended, protection resuming"},
431         {0x06040600, 0, IPR_DEFAULT_LOG_LEVEL,
432         "9040: Array protection temporarily suspended, protection resuming"},
433         {0x060B0100, 0, IPR_DEFAULT_LOG_LEVEL,
434         "4080: IOA exceeded maximum operating temperature"},
435         {0x060B8000, 0, IPR_DEFAULT_LOG_LEVEL,
436         "4085: Service required"},
437         {0x06288000, 0, IPR_DEFAULT_LOG_LEVEL,
438         "3140: Device bus not ready to ready transition"},
439         {0x06290000, 0, IPR_DEFAULT_LOG_LEVEL,
440         "FFFB: SCSI bus was reset"},
441         {0x06290500, 0, 0,
442         "FFFE: SCSI bus transition to single ended"},
443         {0x06290600, 0, 0,
444         "FFFE: SCSI bus transition to LVD"},
445         {0x06298000, 0, IPR_DEFAULT_LOG_LEVEL,
446         "FFFB: SCSI bus was reset by another initiator"},
447         {0x063F0300, 0, IPR_DEFAULT_LOG_LEVEL,
448         "3029: A device replacement has occurred"},
449         {0x063F8300, 0, IPR_DEFAULT_LOG_LEVEL,
450         "4102: Device bus fabric performance degradation"},
451         {0x064C8000, 0, IPR_DEFAULT_LOG_LEVEL,
452         "9051: IOA cache data exists for a missing or failed device"},
453         {0x064C8100, 0, IPR_DEFAULT_LOG_LEVEL,
454         "9055: Auxiliary cache IOA contains cache data needed by the primary IOA"},
455         {0x06670100, 0, IPR_DEFAULT_LOG_LEVEL,
456         "9025: Disk unit is not supported at its physical location"},
457         {0x06670600, 0, IPR_DEFAULT_LOG_LEVEL,
458         "3020: IOA detected a SCSI bus configuration error"},
459         {0x06678000, 0, IPR_DEFAULT_LOG_LEVEL,
460         "3150: SCSI bus configuration error"},
461         {0x06678100, 0, IPR_DEFAULT_LOG_LEVEL,
462         "9074: Asymmetric advanced function disk configuration"},
463         {0x06678300, 0, IPR_DEFAULT_LOG_LEVEL,
464         "4040: Incomplete multipath connection between IOA and enclosure"},
465         {0x06678400, 0, IPR_DEFAULT_LOG_LEVEL,
466         "4041: Incomplete multipath connection between enclosure and device"},
467         {0x06678500, 0, IPR_DEFAULT_LOG_LEVEL,
468         "9075: Incomplete multipath connection between IOA and remote IOA"},
469         {0x06678600, 0, IPR_DEFAULT_LOG_LEVEL,
470         "9076: Configuration error, missing remote IOA"},
471         {0x06679100, 0, IPR_DEFAULT_LOG_LEVEL,
472         "4050: Enclosure does not support a required multipath function"},
473         {0x06679800, 0, IPR_DEFAULT_LOG_LEVEL,
474         "4121: Configuration error, required cable is missing"},
475         {0x06679900, 0, IPR_DEFAULT_LOG_LEVEL,
476         "4122: Cable is not plugged into the correct location on remote IOA"},
477         {0x06679A00, 0, IPR_DEFAULT_LOG_LEVEL,
478         "4123: Configuration error, invalid cable vital product data"},
479         {0x06679B00, 0, IPR_DEFAULT_LOG_LEVEL,
480         "4124: Configuration error, both cable ends are plugged into the same IOA"},
481         {0x06690000, 0, IPR_DEFAULT_LOG_LEVEL,
482         "4070: Logically bad block written on device"},
483         {0x06690200, 0, IPR_DEFAULT_LOG_LEVEL,
484         "9041: Array protection temporarily suspended"},
485         {0x06698200, 0, IPR_DEFAULT_LOG_LEVEL,
486         "9042: Corrupt array parity detected on specified device"},
487         {0x066B0200, 0, IPR_DEFAULT_LOG_LEVEL,
488         "9030: Array no longer protected due to missing or failed disk unit"},
489         {0x066B8000, 0, IPR_DEFAULT_LOG_LEVEL,
490         "9071: Link operational transition"},
491         {0x066B8100, 0, IPR_DEFAULT_LOG_LEVEL,
492         "9072: Link not operational transition"},
493         {0x066B8200, 0, IPR_DEFAULT_LOG_LEVEL,
494         "9032: Array exposed but still protected"},
495         {0x066B8300, 0, IPR_DEFAULT_LOG_LEVEL + 1,
496         "70DD: Device forced failed by disrupt device command"},
497         {0x066B9100, 0, IPR_DEFAULT_LOG_LEVEL,
498         "4061: Multipath redundancy level got better"},
499         {0x066B9200, 0, IPR_DEFAULT_LOG_LEVEL,
500         "4060: Multipath redundancy level got worse"},
501         {0x06808100, 0, IPR_DEFAULT_LOG_LEVEL,
502         "9083: Device raw mode enabled"},
503         {0x06808200, 0, IPR_DEFAULT_LOG_LEVEL,
504         "9084: Device raw mode disabled"},
505         {0x07270000, 0, 0,
506         "Failure due to other device"},
507         {0x07278000, 0, IPR_DEFAULT_LOG_LEVEL,
508         "9008: IOA does not support functions expected by devices"},
509         {0x07278100, 0, IPR_DEFAULT_LOG_LEVEL,
510         "9010: Cache data associated with attached devices cannot be found"},
511         {0x07278200, 0, IPR_DEFAULT_LOG_LEVEL,
512         "9011: Cache data belongs to devices other than those attached"},
513         {0x07278400, 0, IPR_DEFAULT_LOG_LEVEL,
514         "9020: Array missing 2 or more devices with only 1 device present"},
515         {0x07278500, 0, IPR_DEFAULT_LOG_LEVEL,
516         "9021: Array missing 2 or more devices with 2 or more devices present"},
517         {0x07278600, 0, IPR_DEFAULT_LOG_LEVEL,
518         "9022: Exposed array is missing a required device"},
519         {0x07278700, 0, IPR_DEFAULT_LOG_LEVEL,
520         "9023: Array member(s) not at required physical locations"},
521         {0x07278800, 0, IPR_DEFAULT_LOG_LEVEL,
522         "9024: Array not functional due to present hardware configuration"},
523         {0x07278900, 0, IPR_DEFAULT_LOG_LEVEL,
524         "9026: Array not functional due to present hardware configuration"},
525         {0x07278A00, 0, IPR_DEFAULT_LOG_LEVEL,
526         "9027: Array is missing a device and parity is out of sync"},
527         {0x07278B00, 0, IPR_DEFAULT_LOG_LEVEL,
528         "9028: Maximum number of arrays already exist"},
529         {0x07278C00, 0, IPR_DEFAULT_LOG_LEVEL,
530         "9050: Required cache data cannot be located for a disk unit"},
531         {0x07278D00, 0, IPR_DEFAULT_LOG_LEVEL,
532         "9052: Cache data exists for a device that has been modified"},
533         {0x07278F00, 0, IPR_DEFAULT_LOG_LEVEL,
534         "9054: IOA resources not available due to previous problems"},
535         {0x07279100, 0, IPR_DEFAULT_LOG_LEVEL,
536         "9092: Disk unit requires initialization before use"},
537         {0x07279200, 0, IPR_DEFAULT_LOG_LEVEL,
538         "9029: Incorrect hardware configuration change has been detected"},
539         {0x07279600, 0, IPR_DEFAULT_LOG_LEVEL,
540         "9060: One or more disk pairs are missing from an array"},
541         {0x07279700, 0, IPR_DEFAULT_LOG_LEVEL,
542         "9061: One or more disks are missing from an array"},
543         {0x07279800, 0, IPR_DEFAULT_LOG_LEVEL,
544         "9062: One or more disks are missing from an array"},
545         {0x07279900, 0, IPR_DEFAULT_LOG_LEVEL,
546         "9063: Maximum number of functional arrays has been exceeded"},
547         {0x07279A00, 0, 0,
548         "Data protect, other volume set problem"},
549         {0x0B260000, 0, 0,
550         "Aborted command, invalid descriptor"},
551         {0x0B3F9000, 0, 0,
552         "Target operating conditions have changed, dual adapter takeover"},
553         {0x0B530200, 0, 0,
554         "Aborted command, medium removal prevented"},
555         {0x0B5A0000, 0, 0,
556         "Command terminated by host"},
557         {0x0B5B8000, 0, 0,
558         "Aborted command, command terminated by host"}
559 };
560
561 static const struct ipr_ses_table_entry ipr_ses_table[] = {
562         { "2104-DL1        ", "XXXXXXXXXXXXXXXX", 80 },
563         { "2104-TL1        ", "XXXXXXXXXXXXXXXX", 80 },
564         { "HSBP07M P U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Hidive 7 slot */
565         { "HSBP05M P U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Hidive 5 slot */
566         { "HSBP05M S U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Bowtie */
567         { "HSBP06E ASU2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* MartinFenning */
568         { "2104-DU3        ", "XXXXXXXXXXXXXXXX", 160 },
569         { "2104-TU3        ", "XXXXXXXXXXXXXXXX", 160 },
570         { "HSBP04C RSU2SCSI", "XXXXXXX*XXXXXXXX", 160 },
571         { "HSBP06E RSU2SCSI", "XXXXXXX*XXXXXXXX", 160 },
572         { "St  V1S2        ", "XXXXXXXXXXXXXXXX", 160 },
573         { "HSBPD4M  PU3SCSI", "XXXXXXX*XXXXXXXX", 160 },
574         { "VSBPD1H   U3SCSI", "XXXXXXX*XXXXXXXX", 160 }
575 };
576
577 /*
578  *  Function Prototypes
579  */
580 static int ipr_reset_alert(struct ipr_cmnd *);
581 static void ipr_process_ccn(struct ipr_cmnd *);
582 static void ipr_process_error(struct ipr_cmnd *);
583 static void ipr_reset_ioa_job(struct ipr_cmnd *);
584 static void ipr_initiate_ioa_reset(struct ipr_ioa_cfg *,
585                                    enum ipr_shutdown_type);
586
587 #ifdef CONFIG_SCSI_IPR_TRACE
588 /**
589  * ipr_trc_hook - Add a trace entry to the driver trace
590  * @ipr_cmd:    ipr command struct
591  * @type:               trace type
592  * @add_data:   additional data
593  *
594  * Return value:
595  *      none
596  **/
597 static void ipr_trc_hook(struct ipr_cmnd *ipr_cmd,
598                          u8 type, u32 add_data)
599 {
600         struct ipr_trace_entry *trace_entry;
601         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
602
603         trace_entry = &ioa_cfg->trace[atomic_add_return
604                         (1, &ioa_cfg->trace_index)%IPR_NUM_TRACE_ENTRIES];
605         trace_entry->time = jiffies;
606         trace_entry->op_code = ipr_cmd->ioarcb.cmd_pkt.cdb[0];
607         trace_entry->type = type;
608         if (ipr_cmd->ioa_cfg->sis64)
609                 trace_entry->ata_op_code = ipr_cmd->i.ata_ioadl.regs.command;
610         else
611                 trace_entry->ata_op_code = ipr_cmd->ioarcb.u.add_data.u.regs.command;
612         trace_entry->cmd_index = ipr_cmd->cmd_index & 0xff;
613         trace_entry->res_handle = ipr_cmd->ioarcb.res_handle;
614         trace_entry->u.add_data = add_data;
615         wmb();
616 }
617 #else
618 #define ipr_trc_hook(ipr_cmd, type, add_data) do { } while (0)
619 #endif
620
621 /**
622  * ipr_lock_and_done - Acquire lock and complete command
623  * @ipr_cmd:    ipr command struct
624  *
625  * Return value:
626  *      none
627  **/
628 static void ipr_lock_and_done(struct ipr_cmnd *ipr_cmd)
629 {
630         unsigned long lock_flags;
631         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
632
633         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
634         ipr_cmd->done(ipr_cmd);
635         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
636 }
637
638 /**
639  * ipr_reinit_ipr_cmnd - Re-initialize an IPR Cmnd block for reuse
640  * @ipr_cmd:    ipr command struct
641  *
642  * Return value:
643  *      none
644  **/
645 static void ipr_reinit_ipr_cmnd(struct ipr_cmnd *ipr_cmd)
646 {
647         struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
648         struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
649         struct ipr_ioasa64 *ioasa64 = &ipr_cmd->s.ioasa64;
650         dma_addr_t dma_addr = ipr_cmd->dma_addr;
651         int hrrq_id;
652
653         hrrq_id = ioarcb->cmd_pkt.hrrq_id;
654         memset(&ioarcb->cmd_pkt, 0, sizeof(struct ipr_cmd_pkt));
655         ioarcb->cmd_pkt.hrrq_id = hrrq_id;
656         ioarcb->data_transfer_length = 0;
657         ioarcb->read_data_transfer_length = 0;
658         ioarcb->ioadl_len = 0;
659         ioarcb->read_ioadl_len = 0;
660
661         if (ipr_cmd->ioa_cfg->sis64) {
662                 ioarcb->u.sis64_addr_data.data_ioadl_addr =
663                         cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ioadl64));
664                 ioasa64->u.gata.status = 0;
665         } else {
666                 ioarcb->write_ioadl_addr =
667                         cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, i.ioadl));
668                 ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
669                 ioasa->u.gata.status = 0;
670         }
671
672         ioasa->hdr.ioasc = 0;
673         ioasa->hdr.residual_data_len = 0;
674         ipr_cmd->scsi_cmd = NULL;
675         ipr_cmd->qc = NULL;
676         ipr_cmd->sense_buffer[0] = 0;
677         ipr_cmd->dma_use_sg = 0;
678 }
679
680 /**
681  * ipr_init_ipr_cmnd - Initialize an IPR Cmnd block
682  * @ipr_cmd:    ipr command struct
683  *
684  * Return value:
685  *      none
686  **/
687 static void ipr_init_ipr_cmnd(struct ipr_cmnd *ipr_cmd,
688                               void (*fast_done) (struct ipr_cmnd *))
689 {
690         ipr_reinit_ipr_cmnd(ipr_cmd);
691         ipr_cmd->u.scratch = 0;
692         ipr_cmd->sibling = NULL;
693         ipr_cmd->eh_comp = NULL;
694         ipr_cmd->fast_done = fast_done;
695         init_timer(&ipr_cmd->timer);
696 }
697
698 /**
699  * __ipr_get_free_ipr_cmnd - Get a free IPR Cmnd block
700  * @ioa_cfg:    ioa config struct
701  *
702  * Return value:
703  *      pointer to ipr command struct
704  **/
705 static
706 struct ipr_cmnd *__ipr_get_free_ipr_cmnd(struct ipr_hrr_queue *hrrq)
707 {
708         struct ipr_cmnd *ipr_cmd = NULL;
709
710         if (likely(!list_empty(&hrrq->hrrq_free_q))) {
711                 ipr_cmd = list_entry(hrrq->hrrq_free_q.next,
712                         struct ipr_cmnd, queue);
713                 list_del(&ipr_cmd->queue);
714         }
715
716
717         return ipr_cmd;
718 }
719
720 /**
721  * ipr_get_free_ipr_cmnd - Get a free IPR Cmnd block and initialize it
722  * @ioa_cfg:    ioa config struct
723  *
724  * Return value:
725  *      pointer to ipr command struct
726  **/
727 static
728 struct ipr_cmnd *ipr_get_free_ipr_cmnd(struct ipr_ioa_cfg *ioa_cfg)
729 {
730         struct ipr_cmnd *ipr_cmd =
731                 __ipr_get_free_ipr_cmnd(&ioa_cfg->hrrq[IPR_INIT_HRRQ]);
732         ipr_init_ipr_cmnd(ipr_cmd, ipr_lock_and_done);
733         return ipr_cmd;
734 }
735
736 /**
737  * ipr_mask_and_clear_interrupts - Mask all and clear specified interrupts
738  * @ioa_cfg:    ioa config struct
739  * @clr_ints:     interrupts to clear
740  *
741  * This function masks all interrupts on the adapter, then clears the
742  * interrupts specified in the mask
743  *
744  * Return value:
745  *      none
746  **/
747 static void ipr_mask_and_clear_interrupts(struct ipr_ioa_cfg *ioa_cfg,
748                                           u32 clr_ints)
749 {
750         volatile u32 int_reg;
751         int i;
752
753         /* Stop new interrupts */
754         for (i = 0; i < ioa_cfg->hrrq_num; i++) {
755                 spin_lock(&ioa_cfg->hrrq[i]._lock);
756                 ioa_cfg->hrrq[i].allow_interrupts = 0;
757                 spin_unlock(&ioa_cfg->hrrq[i]._lock);
758         }
759         wmb();
760
761         /* Set interrupt mask to stop all new interrupts */
762         if (ioa_cfg->sis64)
763                 writeq(~0, ioa_cfg->regs.set_interrupt_mask_reg);
764         else
765                 writel(~0, ioa_cfg->regs.set_interrupt_mask_reg);
766
767         /* Clear any pending interrupts */
768         if (ioa_cfg->sis64)
769                 writel(~0, ioa_cfg->regs.clr_interrupt_reg);
770         writel(clr_ints, ioa_cfg->regs.clr_interrupt_reg32);
771         int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
772 }
773
774 /**
775  * ipr_save_pcix_cmd_reg - Save PCI-X command register
776  * @ioa_cfg:    ioa config struct
777  *
778  * Return value:
779  *      0 on success / -EIO on failure
780  **/
781 static int ipr_save_pcix_cmd_reg(struct ipr_ioa_cfg *ioa_cfg)
782 {
783         int pcix_cmd_reg = pci_find_capability(ioa_cfg->pdev, PCI_CAP_ID_PCIX);
784
785         if (pcix_cmd_reg == 0)
786                 return 0;
787
788         if (pci_read_config_word(ioa_cfg->pdev, pcix_cmd_reg + PCI_X_CMD,
789                                  &ioa_cfg->saved_pcix_cmd_reg) != PCIBIOS_SUCCESSFUL) {
790                 dev_err(&ioa_cfg->pdev->dev, "Failed to save PCI-X command register\n");
791                 return -EIO;
792         }
793
794         ioa_cfg->saved_pcix_cmd_reg |= PCI_X_CMD_DPERR_E | PCI_X_CMD_ERO;
795         return 0;
796 }
797
798 /**
799  * ipr_set_pcix_cmd_reg - Setup PCI-X command register
800  * @ioa_cfg:    ioa config struct
801  *
802  * Return value:
803  *      0 on success / -EIO on failure
804  **/
805 static int ipr_set_pcix_cmd_reg(struct ipr_ioa_cfg *ioa_cfg)
806 {
807         int pcix_cmd_reg = pci_find_capability(ioa_cfg->pdev, PCI_CAP_ID_PCIX);
808
809         if (pcix_cmd_reg) {
810                 if (pci_write_config_word(ioa_cfg->pdev, pcix_cmd_reg + PCI_X_CMD,
811                                           ioa_cfg->saved_pcix_cmd_reg) != PCIBIOS_SUCCESSFUL) {
812                         dev_err(&ioa_cfg->pdev->dev, "Failed to setup PCI-X command register\n");
813                         return -EIO;
814                 }
815         }
816
817         return 0;
818 }
819
820 /**
821  * ipr_sata_eh_done - done function for aborted SATA commands
822  * @ipr_cmd:    ipr command struct
823  *
824  * This function is invoked for ops generated to SATA
825  * devices which are being aborted.
826  *
827  * Return value:
828  *      none
829  **/
830 static void ipr_sata_eh_done(struct ipr_cmnd *ipr_cmd)
831 {
832         struct ata_queued_cmd *qc = ipr_cmd->qc;
833         struct ipr_sata_port *sata_port = qc->ap->private_data;
834
835         qc->err_mask |= AC_ERR_OTHER;
836         sata_port->ioasa.status |= ATA_BUSY;
837         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
838         ata_qc_complete(qc);
839 }
840
841 /**
842  * ipr_scsi_eh_done - mid-layer done function for aborted ops
843  * @ipr_cmd:    ipr command struct
844  *
845  * This function is invoked by the interrupt handler for
846  * ops generated by the SCSI mid-layer which are being aborted.
847  *
848  * Return value:
849  *      none
850  **/
851 static void ipr_scsi_eh_done(struct ipr_cmnd *ipr_cmd)
852 {
853         struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
854
855         scsi_cmd->result |= (DID_ERROR << 16);
856
857         scsi_dma_unmap(ipr_cmd->scsi_cmd);
858         scsi_cmd->scsi_done(scsi_cmd);
859         if (ipr_cmd->eh_comp)
860                 complete(ipr_cmd->eh_comp);
861         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
862 }
863
864 /**
865  * ipr_fail_all_ops - Fails all outstanding ops.
866  * @ioa_cfg:    ioa config struct
867  *
868  * This function fails all outstanding ops.
869  *
870  * Return value:
871  *      none
872  **/
873 static void ipr_fail_all_ops(struct ipr_ioa_cfg *ioa_cfg)
874 {
875         struct ipr_cmnd *ipr_cmd, *temp;
876         struct ipr_hrr_queue *hrrq;
877
878         ENTER;
879         for_each_hrrq(hrrq, ioa_cfg) {
880                 spin_lock(&hrrq->_lock);
881                 list_for_each_entry_safe(ipr_cmd,
882                                         temp, &hrrq->hrrq_pending_q, queue) {
883                         list_del(&ipr_cmd->queue);
884
885                         ipr_cmd->s.ioasa.hdr.ioasc =
886                                 cpu_to_be32(IPR_IOASC_IOA_WAS_RESET);
887                         ipr_cmd->s.ioasa.hdr.ilid =
888                                 cpu_to_be32(IPR_DRIVER_ILID);
889
890                         if (ipr_cmd->scsi_cmd)
891                                 ipr_cmd->done = ipr_scsi_eh_done;
892                         else if (ipr_cmd->qc)
893                                 ipr_cmd->done = ipr_sata_eh_done;
894
895                         ipr_trc_hook(ipr_cmd, IPR_TRACE_FINISH,
896                                      IPR_IOASC_IOA_WAS_RESET);
897                         del_timer(&ipr_cmd->timer);
898                         ipr_cmd->done(ipr_cmd);
899                 }
900                 spin_unlock(&hrrq->_lock);
901         }
902         LEAVE;
903 }
904
905 /**
906  * ipr_send_command -  Send driver initiated requests.
907  * @ipr_cmd:            ipr command struct
908  *
909  * This function sends a command to the adapter using the correct write call.
910  * In the case of sis64, calculate the ioarcb size required. Then or in the
911  * appropriate bits.
912  *
913  * Return value:
914  *      none
915  **/
916 static void ipr_send_command(struct ipr_cmnd *ipr_cmd)
917 {
918         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
919         dma_addr_t send_dma_addr = ipr_cmd->dma_addr;
920
921         if (ioa_cfg->sis64) {
922                 /* The default size is 256 bytes */
923                 send_dma_addr |= 0x1;
924
925                 /* If the number of ioadls * size of ioadl > 128 bytes,
926                    then use a 512 byte ioarcb */
927                 if (ipr_cmd->dma_use_sg * sizeof(struct ipr_ioadl64_desc) > 128 )
928                         send_dma_addr |= 0x4;
929                 writeq(send_dma_addr, ioa_cfg->regs.ioarrin_reg);
930         } else
931                 writel(send_dma_addr, ioa_cfg->regs.ioarrin_reg);
932 }
933
934 /**
935  * ipr_do_req -  Send driver initiated requests.
936  * @ipr_cmd:            ipr command struct
937  * @done:                       done function
938  * @timeout_func:       timeout function
939  * @timeout:            timeout value
940  *
941  * This function sends the specified command to the adapter with the
942  * timeout given. The done function is invoked on command completion.
943  *
944  * Return value:
945  *      none
946  **/
947 static void ipr_do_req(struct ipr_cmnd *ipr_cmd,
948                        void (*done) (struct ipr_cmnd *),
949                        void (*timeout_func) (struct ipr_cmnd *), u32 timeout)
950 {
951         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
952
953         ipr_cmd->done = done;
954
955         ipr_cmd->timer.data = (unsigned long) ipr_cmd;
956         ipr_cmd->timer.expires = jiffies + timeout;
957         ipr_cmd->timer.function = (void (*)(unsigned long))timeout_func;
958
959         add_timer(&ipr_cmd->timer);
960
961         ipr_trc_hook(ipr_cmd, IPR_TRACE_START, 0);
962
963         ipr_send_command(ipr_cmd);
964 }
965
966 /**
967  * ipr_internal_cmd_done - Op done function for an internally generated op.
968  * @ipr_cmd:    ipr command struct
969  *
970  * This function is the op done function for an internally generated,
971  * blocking op. It simply wakes the sleeping thread.
972  *
973  * Return value:
974  *      none
975  **/
976 static void ipr_internal_cmd_done(struct ipr_cmnd *ipr_cmd)
977 {
978         if (ipr_cmd->sibling)
979                 ipr_cmd->sibling = NULL;
980         else
981                 complete(&ipr_cmd->completion);
982 }
983
984 /**
985  * ipr_init_ioadl - initialize the ioadl for the correct SIS type
986  * @ipr_cmd:    ipr command struct
987  * @dma_addr:   dma address
988  * @len:        transfer length
989  * @flags:      ioadl flag value
990  *
991  * This function initializes an ioadl in the case where there is only a single
992  * descriptor.
993  *
994  * Return value:
995  *      nothing
996  **/
997 static void ipr_init_ioadl(struct ipr_cmnd *ipr_cmd, dma_addr_t dma_addr,
998                            u32 len, int flags)
999 {
1000         struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl;
1001         struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ioadl64;
1002
1003         ipr_cmd->dma_use_sg = 1;
1004
1005         if (ipr_cmd->ioa_cfg->sis64) {
1006                 ioadl64->flags = cpu_to_be32(flags);
1007                 ioadl64->data_len = cpu_to_be32(len);
1008                 ioadl64->address = cpu_to_be64(dma_addr);
1009
1010                 ipr_cmd->ioarcb.ioadl_len =
1011                         cpu_to_be32(sizeof(struct ipr_ioadl64_desc));
1012                 ipr_cmd->ioarcb.data_transfer_length = cpu_to_be32(len);
1013         } else {
1014                 ioadl->flags_and_data_len = cpu_to_be32(flags | len);
1015                 ioadl->address = cpu_to_be32(dma_addr);
1016
1017                 if (flags == IPR_IOADL_FLAGS_READ_LAST) {
1018                         ipr_cmd->ioarcb.read_ioadl_len =
1019                                 cpu_to_be32(sizeof(struct ipr_ioadl_desc));
1020                         ipr_cmd->ioarcb.read_data_transfer_length = cpu_to_be32(len);
1021                 } else {
1022                         ipr_cmd->ioarcb.ioadl_len =
1023                                 cpu_to_be32(sizeof(struct ipr_ioadl_desc));
1024                         ipr_cmd->ioarcb.data_transfer_length = cpu_to_be32(len);
1025                 }
1026         }
1027 }
1028
1029 /**
1030  * ipr_send_blocking_cmd - Send command and sleep on its completion.
1031  * @ipr_cmd:    ipr command struct
1032  * @timeout_func:       function to invoke if command times out
1033  * @timeout:    timeout
1034  *
1035  * Return value:
1036  *      none
1037  **/
1038 static void ipr_send_blocking_cmd(struct ipr_cmnd *ipr_cmd,
1039                                   void (*timeout_func) (struct ipr_cmnd *ipr_cmd),
1040                                   u32 timeout)
1041 {
1042         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
1043
1044         init_completion(&ipr_cmd->completion);
1045         ipr_do_req(ipr_cmd, ipr_internal_cmd_done, timeout_func, timeout);
1046
1047         spin_unlock_irq(ioa_cfg->host->host_lock);
1048         wait_for_completion(&ipr_cmd->completion);
1049         spin_lock_irq(ioa_cfg->host->host_lock);
1050 }
1051
1052 static int ipr_get_hrrq_index(struct ipr_ioa_cfg *ioa_cfg)
1053 {
1054         if (ioa_cfg->hrrq_num == 1)
1055                 return 0;
1056         else
1057                 return (atomic_add_return(1, &ioa_cfg->hrrq_index) % (ioa_cfg->hrrq_num - 1)) + 1;
1058 }
1059
1060 /**
1061  * ipr_send_hcam - Send an HCAM to the adapter.
1062  * @ioa_cfg:    ioa config struct
1063  * @type:               HCAM type
1064  * @hostrcb:    hostrcb struct
1065  *
1066  * This function will send a Host Controlled Async command to the adapter.
1067  * If HCAMs are currently not allowed to be issued to the adapter, it will
1068  * place the hostrcb on the free queue.
1069  *
1070  * Return value:
1071  *      none
1072  **/
1073 static void ipr_send_hcam(struct ipr_ioa_cfg *ioa_cfg, u8 type,
1074                           struct ipr_hostrcb *hostrcb)
1075 {
1076         struct ipr_cmnd *ipr_cmd;
1077         struct ipr_ioarcb *ioarcb;
1078
1079         if (ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds) {
1080                 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
1081                 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
1082                 list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_pending_q);
1083
1084                 ipr_cmd->u.hostrcb = hostrcb;
1085                 ioarcb = &ipr_cmd->ioarcb;
1086
1087                 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
1088                 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_HCAM;
1089                 ioarcb->cmd_pkt.cdb[0] = IPR_HOST_CONTROLLED_ASYNC;
1090                 ioarcb->cmd_pkt.cdb[1] = type;
1091                 ioarcb->cmd_pkt.cdb[7] = (sizeof(hostrcb->hcam) >> 8) & 0xff;
1092                 ioarcb->cmd_pkt.cdb[8] = sizeof(hostrcb->hcam) & 0xff;
1093
1094                 ipr_init_ioadl(ipr_cmd, hostrcb->hostrcb_dma,
1095                                sizeof(hostrcb->hcam), IPR_IOADL_FLAGS_READ_LAST);
1096
1097                 if (type == IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE)
1098                         ipr_cmd->done = ipr_process_ccn;
1099                 else
1100                         ipr_cmd->done = ipr_process_error;
1101
1102                 ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_IOA_RES_ADDR);
1103
1104                 ipr_send_command(ipr_cmd);
1105         } else {
1106                 list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_free_q);
1107         }
1108 }
1109
1110 /**
1111  * ipr_update_ata_class - Update the ata class in the resource entry
1112  * @res:        resource entry struct
1113  * @proto:      cfgte device bus protocol value
1114  *
1115  * Return value:
1116  *      none
1117  **/
1118 static void ipr_update_ata_class(struct ipr_resource_entry *res, unsigned int proto)
1119 {
1120         switch (proto) {
1121         case IPR_PROTO_SATA:
1122         case IPR_PROTO_SAS_STP:
1123                 res->ata_class = ATA_DEV_ATA;
1124                 break;
1125         case IPR_PROTO_SATA_ATAPI:
1126         case IPR_PROTO_SAS_STP_ATAPI:
1127                 res->ata_class = ATA_DEV_ATAPI;
1128                 break;
1129         default:
1130                 res->ata_class = ATA_DEV_UNKNOWN;
1131                 break;
1132         };
1133 }
1134
1135 /**
1136  * ipr_init_res_entry - Initialize a resource entry struct.
1137  * @res:        resource entry struct
1138  * @cfgtew:     config table entry wrapper struct
1139  *
1140  * Return value:
1141  *      none
1142  **/
1143 static void ipr_init_res_entry(struct ipr_resource_entry *res,
1144                                struct ipr_config_table_entry_wrapper *cfgtew)
1145 {
1146         int found = 0;
1147         unsigned int proto;
1148         struct ipr_ioa_cfg *ioa_cfg = res->ioa_cfg;
1149         struct ipr_resource_entry *gscsi_res = NULL;
1150
1151         res->needs_sync_complete = 0;
1152         res->in_erp = 0;
1153         res->add_to_ml = 0;
1154         res->del_from_ml = 0;
1155         res->resetting_device = 0;
1156         res->reset_occurred = 0;
1157         res->sdev = NULL;
1158         res->sata_port = NULL;
1159
1160         if (ioa_cfg->sis64) {
1161                 proto = cfgtew->u.cfgte64->proto;
1162                 res->res_flags = cfgtew->u.cfgte64->res_flags;
1163                 res->qmodel = IPR_QUEUEING_MODEL64(res);
1164                 res->type = cfgtew->u.cfgte64->res_type;
1165
1166                 memcpy(res->res_path, &cfgtew->u.cfgte64->res_path,
1167                         sizeof(res->res_path));
1168
1169                 res->bus = 0;
1170                 memcpy(&res->dev_lun.scsi_lun, &cfgtew->u.cfgte64->lun,
1171                         sizeof(res->dev_lun.scsi_lun));
1172                 res->lun = scsilun_to_int(&res->dev_lun);
1173
1174                 if (res->type == IPR_RES_TYPE_GENERIC_SCSI) {
1175                         list_for_each_entry(gscsi_res, &ioa_cfg->used_res_q, queue) {
1176                                 if (gscsi_res->dev_id == cfgtew->u.cfgte64->dev_id) {
1177                                         found = 1;
1178                                         res->target = gscsi_res->target;
1179                                         break;
1180                                 }
1181                         }
1182                         if (!found) {
1183                                 res->target = find_first_zero_bit(ioa_cfg->target_ids,
1184                                                                   ioa_cfg->max_devs_supported);
1185                                 set_bit(res->target, ioa_cfg->target_ids);
1186                         }
1187                 } else if (res->type == IPR_RES_TYPE_IOAFP) {
1188                         res->bus = IPR_IOAFP_VIRTUAL_BUS;
1189                         res->target = 0;
1190                 } else if (res->type == IPR_RES_TYPE_ARRAY) {
1191                         res->bus = IPR_ARRAY_VIRTUAL_BUS;
1192                         res->target = find_first_zero_bit(ioa_cfg->array_ids,
1193                                                           ioa_cfg->max_devs_supported);
1194                         set_bit(res->target, ioa_cfg->array_ids);
1195                 } else if (res->type == IPR_RES_TYPE_VOLUME_SET) {
1196                         res->bus = IPR_VSET_VIRTUAL_BUS;
1197                         res->target = find_first_zero_bit(ioa_cfg->vset_ids,
1198                                                           ioa_cfg->max_devs_supported);
1199                         set_bit(res->target, ioa_cfg->vset_ids);
1200                 } else {
1201                         res->target = find_first_zero_bit(ioa_cfg->target_ids,
1202                                                           ioa_cfg->max_devs_supported);
1203                         set_bit(res->target, ioa_cfg->target_ids);
1204                 }
1205         } else {
1206                 proto = cfgtew->u.cfgte->proto;
1207                 res->qmodel = IPR_QUEUEING_MODEL(res);
1208                 res->flags = cfgtew->u.cfgte->flags;
1209                 if (res->flags & IPR_IS_IOA_RESOURCE)
1210                         res->type = IPR_RES_TYPE_IOAFP;
1211                 else
1212                         res->type = cfgtew->u.cfgte->rsvd_subtype & 0x0f;
1213
1214                 res->bus = cfgtew->u.cfgte->res_addr.bus;
1215                 res->target = cfgtew->u.cfgte->res_addr.target;
1216                 res->lun = cfgtew->u.cfgte->res_addr.lun;
1217                 res->lun_wwn = get_unaligned_be64(cfgtew->u.cfgte->lun_wwn);
1218         }
1219
1220         ipr_update_ata_class(res, proto);
1221 }
1222
1223 /**
1224  * ipr_is_same_device - Determine if two devices are the same.
1225  * @res:        resource entry struct
1226  * @cfgtew:     config table entry wrapper struct
1227  *
1228  * Return value:
1229  *      1 if the devices are the same / 0 otherwise
1230  **/
1231 static int ipr_is_same_device(struct ipr_resource_entry *res,
1232                               struct ipr_config_table_entry_wrapper *cfgtew)
1233 {
1234         if (res->ioa_cfg->sis64) {
1235                 if (!memcmp(&res->dev_id, &cfgtew->u.cfgte64->dev_id,
1236                                         sizeof(cfgtew->u.cfgte64->dev_id)) &&
1237                         !memcmp(&res->dev_lun.scsi_lun, &cfgtew->u.cfgte64->lun,
1238                                         sizeof(cfgtew->u.cfgte64->lun))) {
1239                         return 1;
1240                 }
1241         } else {
1242                 if (res->bus == cfgtew->u.cfgte->res_addr.bus &&
1243                     res->target == cfgtew->u.cfgte->res_addr.target &&
1244                     res->lun == cfgtew->u.cfgte->res_addr.lun)
1245                         return 1;
1246         }
1247
1248         return 0;
1249 }
1250
1251 /**
1252  * __ipr_format_res_path - Format the resource path for printing.
1253  * @res_path:   resource path
1254  * @buf:        buffer
1255  * @len:        length of buffer provided
1256  *
1257  * Return value:
1258  *      pointer to buffer
1259  **/
1260 static char *__ipr_format_res_path(u8 *res_path, char *buffer, int len)
1261 {
1262         int i;
1263         char *p = buffer;
1264
1265         *p = '\0';
1266         p += snprintf(p, buffer + len - p, "%02X", res_path[0]);
1267         for (i = 1; res_path[i] != 0xff && ((i * 3) < len); i++)
1268                 p += snprintf(p, buffer + len - p, "-%02X", res_path[i]);
1269
1270         return buffer;
1271 }
1272
1273 /**
1274  * ipr_format_res_path - Format the resource path for printing.
1275  * @ioa_cfg:    ioa config struct
1276  * @res_path:   resource path
1277  * @buf:        buffer
1278  * @len:        length of buffer provided
1279  *
1280  * Return value:
1281  *      pointer to buffer
1282  **/
1283 static char *ipr_format_res_path(struct ipr_ioa_cfg *ioa_cfg,
1284                                  u8 *res_path, char *buffer, int len)
1285 {
1286         char *p = buffer;
1287
1288         *p = '\0';
1289         p += snprintf(p, buffer + len - p, "%d/", ioa_cfg->host->host_no);
1290         __ipr_format_res_path(res_path, p, len - (buffer - p));
1291         return buffer;
1292 }
1293
1294 /**
1295  * ipr_update_res_entry - Update the resource entry.
1296  * @res:        resource entry struct
1297  * @cfgtew:     config table entry wrapper struct
1298  *
1299  * Return value:
1300  *      none
1301  **/
1302 static void ipr_update_res_entry(struct ipr_resource_entry *res,
1303                                  struct ipr_config_table_entry_wrapper *cfgtew)
1304 {
1305         char buffer[IPR_MAX_RES_PATH_LENGTH];
1306         unsigned int proto;
1307         int new_path = 0;
1308
1309         if (res->ioa_cfg->sis64) {
1310                 res->flags = cfgtew->u.cfgte64->flags;
1311                 res->res_flags = cfgtew->u.cfgte64->res_flags;
1312                 res->type = cfgtew->u.cfgte64->res_type;
1313
1314                 memcpy(&res->std_inq_data, &cfgtew->u.cfgte64->std_inq_data,
1315                         sizeof(struct ipr_std_inq_data));
1316
1317                 res->qmodel = IPR_QUEUEING_MODEL64(res);
1318                 proto = cfgtew->u.cfgte64->proto;
1319                 res->res_handle = cfgtew->u.cfgte64->res_handle;
1320                 res->dev_id = cfgtew->u.cfgte64->dev_id;
1321
1322                 memcpy(&res->dev_lun.scsi_lun, &cfgtew->u.cfgte64->lun,
1323                         sizeof(res->dev_lun.scsi_lun));
1324
1325                 if (memcmp(res->res_path, &cfgtew->u.cfgte64->res_path,
1326                                         sizeof(res->res_path))) {
1327                         memcpy(res->res_path, &cfgtew->u.cfgte64->res_path,
1328                                 sizeof(res->res_path));
1329                         new_path = 1;
1330                 }
1331
1332                 if (res->sdev && new_path)
1333                         sdev_printk(KERN_INFO, res->sdev, "Resource path: %s\n",
1334                                     ipr_format_res_path(res->ioa_cfg,
1335                                         res->res_path, buffer, sizeof(buffer)));
1336         } else {
1337                 res->flags = cfgtew->u.cfgte->flags;
1338                 if (res->flags & IPR_IS_IOA_RESOURCE)
1339                         res->type = IPR_RES_TYPE_IOAFP;
1340                 else
1341                         res->type = cfgtew->u.cfgte->rsvd_subtype & 0x0f;
1342
1343                 memcpy(&res->std_inq_data, &cfgtew->u.cfgte->std_inq_data,
1344                         sizeof(struct ipr_std_inq_data));
1345
1346                 res->qmodel = IPR_QUEUEING_MODEL(res);
1347                 proto = cfgtew->u.cfgte->proto;
1348                 res->res_handle = cfgtew->u.cfgte->res_handle;
1349         }
1350
1351         ipr_update_ata_class(res, proto);
1352 }
1353
1354 /**
1355  * ipr_clear_res_target - Clear the bit in the bit map representing the target
1356  *                        for the resource.
1357  * @res:        resource entry struct
1358  * @cfgtew:     config table entry wrapper struct
1359  *
1360  * Return value:
1361  *      none
1362  **/
1363 static void ipr_clear_res_target(struct ipr_resource_entry *res)
1364 {
1365         struct ipr_resource_entry *gscsi_res = NULL;
1366         struct ipr_ioa_cfg *ioa_cfg = res->ioa_cfg;
1367
1368         if (!ioa_cfg->sis64)
1369                 return;
1370
1371         if (res->bus == IPR_ARRAY_VIRTUAL_BUS)
1372                 clear_bit(res->target, ioa_cfg->array_ids);
1373         else if (res->bus == IPR_VSET_VIRTUAL_BUS)
1374                 clear_bit(res->target, ioa_cfg->vset_ids);
1375         else if (res->bus == 0 && res->type == IPR_RES_TYPE_GENERIC_SCSI) {
1376                 list_for_each_entry(gscsi_res, &ioa_cfg->used_res_q, queue)
1377                         if (gscsi_res->dev_id == res->dev_id && gscsi_res != res)
1378                                 return;
1379                 clear_bit(res->target, ioa_cfg->target_ids);
1380
1381         } else if (res->bus == 0)
1382                 clear_bit(res->target, ioa_cfg->target_ids);
1383 }
1384
1385 /**
1386  * ipr_handle_config_change - Handle a config change from the adapter
1387  * @ioa_cfg:    ioa config struct
1388  * @hostrcb:    hostrcb
1389  *
1390  * Return value:
1391  *      none
1392  **/
1393 static void ipr_handle_config_change(struct ipr_ioa_cfg *ioa_cfg,
1394                                      struct ipr_hostrcb *hostrcb)
1395 {
1396         struct ipr_resource_entry *res = NULL;
1397         struct ipr_config_table_entry_wrapper cfgtew;
1398         __be32 cc_res_handle;
1399
1400         u32 is_ndn = 1;
1401
1402         if (ioa_cfg->sis64) {
1403                 cfgtew.u.cfgte64 = &hostrcb->hcam.u.ccn.u.cfgte64;
1404                 cc_res_handle = cfgtew.u.cfgte64->res_handle;
1405         } else {
1406                 cfgtew.u.cfgte = &hostrcb->hcam.u.ccn.u.cfgte;
1407                 cc_res_handle = cfgtew.u.cfgte->res_handle;
1408         }
1409
1410         list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
1411                 if (res->res_handle == cc_res_handle) {
1412                         is_ndn = 0;
1413                         break;
1414                 }
1415         }
1416
1417         if (is_ndn) {
1418                 if (list_empty(&ioa_cfg->free_res_q)) {
1419                         ipr_send_hcam(ioa_cfg,
1420                                       IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE,
1421                                       hostrcb);
1422                         return;
1423                 }
1424
1425                 res = list_entry(ioa_cfg->free_res_q.next,
1426                                  struct ipr_resource_entry, queue);
1427
1428                 list_del(&res->queue);
1429                 ipr_init_res_entry(res, &cfgtew);
1430                 list_add_tail(&res->queue, &ioa_cfg->used_res_q);
1431         }
1432
1433         ipr_update_res_entry(res, &cfgtew);
1434
1435         if (hostrcb->hcam.notify_type == IPR_HOST_RCB_NOTIF_TYPE_REM_ENTRY) {
1436                 if (res->sdev) {
1437                         res->del_from_ml = 1;
1438                         res->res_handle = IPR_INVALID_RES_HANDLE;
1439                         schedule_work(&ioa_cfg->work_q);
1440                 } else {
1441                         ipr_clear_res_target(res);
1442                         list_move_tail(&res->queue, &ioa_cfg->free_res_q);
1443                 }
1444         } else if (!res->sdev || res->del_from_ml) {
1445                 res->add_to_ml = 1;
1446                 schedule_work(&ioa_cfg->work_q);
1447         }
1448
1449         ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb);
1450 }
1451
1452 /**
1453  * ipr_process_ccn - Op done function for a CCN.
1454  * @ipr_cmd:    ipr command struct
1455  *
1456  * This function is the op done function for a configuration
1457  * change notification host controlled async from the adapter.
1458  *
1459  * Return value:
1460  *      none
1461  **/
1462 static void ipr_process_ccn(struct ipr_cmnd *ipr_cmd)
1463 {
1464         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
1465         struct ipr_hostrcb *hostrcb = ipr_cmd->u.hostrcb;
1466         u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
1467
1468         list_del(&hostrcb->queue);
1469         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
1470
1471         if (ioasc) {
1472                 if (ioasc != IPR_IOASC_IOA_WAS_RESET &&
1473                     ioasc != IPR_IOASC_ABORTED_CMD_TERM_BY_HOST)
1474                         dev_err(&ioa_cfg->pdev->dev,
1475                                 "Host RCB failed with IOASC: 0x%08X\n", ioasc);
1476
1477                 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb);
1478         } else {
1479                 ipr_handle_config_change(ioa_cfg, hostrcb);
1480         }
1481 }
1482
1483 /**
1484  * strip_and_pad_whitespace - Strip and pad trailing whitespace.
1485  * @i:          index into buffer
1486  * @buf:                string to modify
1487  *
1488  * This function will strip all trailing whitespace, pad the end
1489  * of the string with a single space, and NULL terminate the string.
1490  *
1491  * Return value:
1492  *      new length of string
1493  **/
1494 static int strip_and_pad_whitespace(int i, char *buf)
1495 {
1496         while (i && buf[i] == ' ')
1497                 i--;
1498         buf[i+1] = ' ';
1499         buf[i+2] = '\0';
1500         return i + 2;
1501 }
1502
1503 /**
1504  * ipr_log_vpd_compact - Log the passed extended VPD compactly.
1505  * @prefix:             string to print at start of printk
1506  * @hostrcb:    hostrcb pointer
1507  * @vpd:                vendor/product id/sn struct
1508  *
1509  * Return value:
1510  *      none
1511  **/
1512 static void ipr_log_vpd_compact(char *prefix, struct ipr_hostrcb *hostrcb,
1513                                 struct ipr_vpd *vpd)
1514 {
1515         char buffer[IPR_VENDOR_ID_LEN + IPR_PROD_ID_LEN + IPR_SERIAL_NUM_LEN + 3];
1516         int i = 0;
1517
1518         memcpy(buffer, vpd->vpids.vendor_id, IPR_VENDOR_ID_LEN);
1519         i = strip_and_pad_whitespace(IPR_VENDOR_ID_LEN - 1, buffer);
1520
1521         memcpy(&buffer[i], vpd->vpids.product_id, IPR_PROD_ID_LEN);
1522         i = strip_and_pad_whitespace(i + IPR_PROD_ID_LEN - 1, buffer);
1523
1524         memcpy(&buffer[i], vpd->sn, IPR_SERIAL_NUM_LEN);
1525         buffer[IPR_SERIAL_NUM_LEN + i] = '\0';
1526
1527         ipr_hcam_err(hostrcb, "%s VPID/SN: %s\n", prefix, buffer);
1528 }
1529
1530 /**
1531  * ipr_log_vpd - Log the passed VPD to the error log.
1532  * @vpd:                vendor/product id/sn struct
1533  *
1534  * Return value:
1535  *      none
1536  **/
1537 static void ipr_log_vpd(struct ipr_vpd *vpd)
1538 {
1539         char buffer[IPR_VENDOR_ID_LEN + IPR_PROD_ID_LEN
1540                     + IPR_SERIAL_NUM_LEN];
1541
1542         memcpy(buffer, vpd->vpids.vendor_id, IPR_VENDOR_ID_LEN);
1543         memcpy(buffer + IPR_VENDOR_ID_LEN, vpd->vpids.product_id,
1544                IPR_PROD_ID_LEN);
1545         buffer[IPR_VENDOR_ID_LEN + IPR_PROD_ID_LEN] = '\0';
1546         ipr_err("Vendor/Product ID: %s\n", buffer);
1547
1548         memcpy(buffer, vpd->sn, IPR_SERIAL_NUM_LEN);
1549         buffer[IPR_SERIAL_NUM_LEN] = '\0';
1550         ipr_err("    Serial Number: %s\n", buffer);
1551 }
1552
1553 /**
1554  * ipr_log_ext_vpd_compact - Log the passed extended VPD compactly.
1555  * @prefix:             string to print at start of printk
1556  * @hostrcb:    hostrcb pointer
1557  * @vpd:                vendor/product id/sn/wwn struct
1558  *
1559  * Return value:
1560  *      none
1561  **/
1562 static void ipr_log_ext_vpd_compact(char *prefix, struct ipr_hostrcb *hostrcb,
1563                                     struct ipr_ext_vpd *vpd)
1564 {
1565         ipr_log_vpd_compact(prefix, hostrcb, &vpd->vpd);
1566         ipr_hcam_err(hostrcb, "%s WWN: %08X%08X\n", prefix,
1567                      be32_to_cpu(vpd->wwid[0]), be32_to_cpu(vpd->wwid[1]));
1568 }
1569
1570 /**
1571  * ipr_log_ext_vpd - Log the passed extended VPD to the error log.
1572  * @vpd:                vendor/product id/sn/wwn struct
1573  *
1574  * Return value:
1575  *      none
1576  **/
1577 static void ipr_log_ext_vpd(struct ipr_ext_vpd *vpd)
1578 {
1579         ipr_log_vpd(&vpd->vpd);
1580         ipr_err("    WWN: %08X%08X\n", be32_to_cpu(vpd->wwid[0]),
1581                 be32_to_cpu(vpd->wwid[1]));
1582 }
1583
1584 /**
1585  * ipr_log_enhanced_cache_error - Log a cache error.
1586  * @ioa_cfg:    ioa config struct
1587  * @hostrcb:    hostrcb struct
1588  *
1589  * Return value:
1590  *      none
1591  **/
1592 static void ipr_log_enhanced_cache_error(struct ipr_ioa_cfg *ioa_cfg,
1593                                          struct ipr_hostrcb *hostrcb)
1594 {
1595         struct ipr_hostrcb_type_12_error *error;
1596
1597         if (ioa_cfg->sis64)
1598                 error = &hostrcb->hcam.u.error64.u.type_12_error;
1599         else
1600                 error = &hostrcb->hcam.u.error.u.type_12_error;
1601
1602         ipr_err("-----Current Configuration-----\n");
1603         ipr_err("Cache Directory Card Information:\n");
1604         ipr_log_ext_vpd(&error->ioa_vpd);
1605         ipr_err("Adapter Card Information:\n");
1606         ipr_log_ext_vpd(&error->cfc_vpd);
1607
1608         ipr_err("-----Expected Configuration-----\n");
1609         ipr_err("Cache Directory Card Information:\n");
1610         ipr_log_ext_vpd(&error->ioa_last_attached_to_cfc_vpd);
1611         ipr_err("Adapter Card Information:\n");
1612         ipr_log_ext_vpd(&error->cfc_last_attached_to_ioa_vpd);
1613
1614         ipr_err("Additional IOA Data: %08X %08X %08X\n",
1615                      be32_to_cpu(error->ioa_data[0]),
1616                      be32_to_cpu(error->ioa_data[1]),
1617                      be32_to_cpu(error->ioa_data[2]));
1618 }
1619
1620 /**
1621  * ipr_log_cache_error - Log a cache error.
1622  * @ioa_cfg:    ioa config struct
1623  * @hostrcb:    hostrcb struct
1624  *
1625  * Return value:
1626  *      none
1627  **/
1628 static void ipr_log_cache_error(struct ipr_ioa_cfg *ioa_cfg,
1629                                 struct ipr_hostrcb *hostrcb)
1630 {
1631         struct ipr_hostrcb_type_02_error *error =
1632                 &hostrcb->hcam.u.error.u.type_02_error;
1633
1634         ipr_err("-----Current Configuration-----\n");
1635         ipr_err("Cache Directory Card Information:\n");
1636         ipr_log_vpd(&error->ioa_vpd);
1637         ipr_err("Adapter Card Information:\n");
1638         ipr_log_vpd(&error->cfc_vpd);
1639
1640         ipr_err("-----Expected Configuration-----\n");
1641         ipr_err("Cache Directory Card Information:\n");
1642         ipr_log_vpd(&error->ioa_last_attached_to_cfc_vpd);
1643         ipr_err("Adapter Card Information:\n");
1644         ipr_log_vpd(&error->cfc_last_attached_to_ioa_vpd);
1645
1646         ipr_err("Additional IOA Data: %08X %08X %08X\n",
1647                      be32_to_cpu(error->ioa_data[0]),
1648                      be32_to_cpu(error->ioa_data[1]),
1649                      be32_to_cpu(error->ioa_data[2]));
1650 }
1651
1652 /**
1653  * ipr_log_enhanced_config_error - Log a configuration error.
1654  * @ioa_cfg:    ioa config struct
1655  * @hostrcb:    hostrcb struct
1656  *
1657  * Return value:
1658  *      none
1659  **/
1660 static void ipr_log_enhanced_config_error(struct ipr_ioa_cfg *ioa_cfg,
1661                                           struct ipr_hostrcb *hostrcb)
1662 {
1663         int errors_logged, i;
1664         struct ipr_hostrcb_device_data_entry_enhanced *dev_entry;
1665         struct ipr_hostrcb_type_13_error *error;
1666
1667         error = &hostrcb->hcam.u.error.u.type_13_error;
1668         errors_logged = be32_to_cpu(error->errors_logged);
1669
1670         ipr_err("Device Errors Detected/Logged: %d/%d\n",
1671                 be32_to_cpu(error->errors_detected), errors_logged);
1672
1673         dev_entry = error->dev;
1674
1675         for (i = 0; i < errors_logged; i++, dev_entry++) {
1676                 ipr_err_separator;
1677
1678                 ipr_phys_res_err(ioa_cfg, dev_entry->dev_res_addr, "Device %d", i + 1);
1679                 ipr_log_ext_vpd(&dev_entry->vpd);
1680
1681                 ipr_err("-----New Device Information-----\n");
1682                 ipr_log_ext_vpd(&dev_entry->new_vpd);
1683
1684                 ipr_err("Cache Directory Card Information:\n");
1685                 ipr_log_ext_vpd(&dev_entry->ioa_last_with_dev_vpd);
1686
1687                 ipr_err("Adapter Card Information:\n");
1688                 ipr_log_ext_vpd(&dev_entry->cfc_last_with_dev_vpd);
1689         }
1690 }
1691
1692 /**
1693  * ipr_log_sis64_config_error - Log a device error.
1694  * @ioa_cfg:    ioa config struct
1695  * @hostrcb:    hostrcb struct
1696  *
1697  * Return value:
1698  *      none
1699  **/
1700 static void ipr_log_sis64_config_error(struct ipr_ioa_cfg *ioa_cfg,
1701                                        struct ipr_hostrcb *hostrcb)
1702 {
1703         int errors_logged, i;
1704         struct ipr_hostrcb64_device_data_entry_enhanced *dev_entry;
1705         struct ipr_hostrcb_type_23_error *error;
1706         char buffer[IPR_MAX_RES_PATH_LENGTH];
1707
1708         error = &hostrcb->hcam.u.error64.u.type_23_error;
1709         errors_logged = be32_to_cpu(error->errors_logged);
1710
1711         ipr_err("Device Errors Detected/Logged: %d/%d\n",
1712                 be32_to_cpu(error->errors_detected), errors_logged);
1713
1714         dev_entry = error->dev;
1715
1716         for (i = 0; i < errors_logged; i++, dev_entry++) {
1717                 ipr_err_separator;
1718
1719                 ipr_err("Device %d : %s", i + 1,
1720                         __ipr_format_res_path(dev_entry->res_path,
1721                                               buffer, sizeof(buffer)));
1722                 ipr_log_ext_vpd(&dev_entry->vpd);
1723
1724                 ipr_err("-----New Device Information-----\n");
1725                 ipr_log_ext_vpd(&dev_entry->new_vpd);
1726
1727                 ipr_err("Cache Directory Card Information:\n");
1728                 ipr_log_ext_vpd(&dev_entry->ioa_last_with_dev_vpd);
1729
1730                 ipr_err("Adapter Card Information:\n");
1731                 ipr_log_ext_vpd(&dev_entry->cfc_last_with_dev_vpd);
1732         }
1733 }
1734
1735 /**
1736  * ipr_log_config_error - Log a configuration error.
1737  * @ioa_cfg:    ioa config struct
1738  * @hostrcb:    hostrcb struct
1739  *
1740  * Return value:
1741  *      none
1742  **/
1743 static void ipr_log_config_error(struct ipr_ioa_cfg *ioa_cfg,
1744                                  struct ipr_hostrcb *hostrcb)
1745 {
1746         int errors_logged, i;
1747         struct ipr_hostrcb_device_data_entry *dev_entry;
1748         struct ipr_hostrcb_type_03_error *error;
1749
1750         error = &hostrcb->hcam.u.error.u.type_03_error;
1751         errors_logged = be32_to_cpu(error->errors_logged);
1752
1753         ipr_err("Device Errors Detected/Logged: %d/%d\n",
1754                 be32_to_cpu(error->errors_detected), errors_logged);
1755
1756         dev_entry = error->dev;
1757
1758         for (i = 0; i < errors_logged; i++, dev_entry++) {
1759                 ipr_err_separator;
1760
1761                 ipr_phys_res_err(ioa_cfg, dev_entry->dev_res_addr, "Device %d", i + 1);
1762                 ipr_log_vpd(&dev_entry->vpd);
1763
1764                 ipr_err("-----New Device Information-----\n");
1765                 ipr_log_vpd(&dev_entry->new_vpd);
1766
1767                 ipr_err("Cache Directory Card Information:\n");
1768                 ipr_log_vpd(&dev_entry->ioa_last_with_dev_vpd);
1769
1770                 ipr_err("Adapter Card Information:\n");
1771                 ipr_log_vpd(&dev_entry->cfc_last_with_dev_vpd);
1772
1773                 ipr_err("Additional IOA Data: %08X %08X %08X %08X %08X\n",
1774                         be32_to_cpu(dev_entry->ioa_data[0]),
1775                         be32_to_cpu(dev_entry->ioa_data[1]),
1776                         be32_to_cpu(dev_entry->ioa_data[2]),
1777                         be32_to_cpu(dev_entry->ioa_data[3]),
1778                         be32_to_cpu(dev_entry->ioa_data[4]));
1779         }
1780 }
1781
1782 /**
1783  * ipr_log_enhanced_array_error - Log an array configuration error.
1784  * @ioa_cfg:    ioa config struct
1785  * @hostrcb:    hostrcb struct
1786  *
1787  * Return value:
1788  *      none
1789  **/
1790 static void ipr_log_enhanced_array_error(struct ipr_ioa_cfg *ioa_cfg,
1791                                          struct ipr_hostrcb *hostrcb)
1792 {
1793         int i, num_entries;
1794         struct ipr_hostrcb_type_14_error *error;
1795         struct ipr_hostrcb_array_data_entry_enhanced *array_entry;
1796         const u8 zero_sn[IPR_SERIAL_NUM_LEN] = { [0 ... IPR_SERIAL_NUM_LEN-1] = '0' };
1797
1798         error = &hostrcb->hcam.u.error.u.type_14_error;
1799
1800         ipr_err_separator;
1801
1802         ipr_err("RAID %s Array Configuration: %d:%d:%d:%d\n",
1803                 error->protection_level,
1804                 ioa_cfg->host->host_no,
1805                 error->last_func_vset_res_addr.bus,
1806                 error->last_func_vset_res_addr.target,
1807                 error->last_func_vset_res_addr.lun);
1808
1809         ipr_err_separator;
1810
1811         array_entry = error->array_member;
1812         num_entries = min_t(u32, be32_to_cpu(error->num_entries),
1813                             ARRAY_SIZE(error->array_member));
1814
1815         for (i = 0; i < num_entries; i++, array_entry++) {
1816                 if (!memcmp(array_entry->vpd.vpd.sn, zero_sn, IPR_SERIAL_NUM_LEN))
1817                         continue;
1818
1819                 if (be32_to_cpu(error->exposed_mode_adn) == i)
1820                         ipr_err("Exposed Array Member %d:\n", i);
1821                 else
1822                         ipr_err("Array Member %d:\n", i);
1823
1824                 ipr_log_ext_vpd(&array_entry->vpd);
1825                 ipr_phys_res_err(ioa_cfg, array_entry->dev_res_addr, "Current Location");
1826                 ipr_phys_res_err(ioa_cfg, array_entry->expected_dev_res_addr,
1827                                  "Expected Location");
1828
1829                 ipr_err_separator;
1830         }
1831 }
1832
1833 /**
1834  * ipr_log_array_error - Log an array configuration error.
1835  * @ioa_cfg:    ioa config struct
1836  * @hostrcb:    hostrcb struct
1837  *
1838  * Return value:
1839  *      none
1840  **/
1841 static void ipr_log_array_error(struct ipr_ioa_cfg *ioa_cfg,
1842                                 struct ipr_hostrcb *hostrcb)
1843 {
1844         int i;
1845         struct ipr_hostrcb_type_04_error *error;
1846         struct ipr_hostrcb_array_data_entry *array_entry;
1847         const u8 zero_sn[IPR_SERIAL_NUM_LEN] = { [0 ... IPR_SERIAL_NUM_LEN-1] = '0' };
1848
1849         error = &hostrcb->hcam.u.error.u.type_04_error;
1850
1851         ipr_err_separator;
1852
1853         ipr_err("RAID %s Array Configuration: %d:%d:%d:%d\n",
1854                 error->protection_level,
1855                 ioa_cfg->host->host_no,
1856                 error->last_func_vset_res_addr.bus,
1857                 error->last_func_vset_res_addr.target,
1858                 error->last_func_vset_res_addr.lun);
1859
1860         ipr_err_separator;
1861
1862         array_entry = error->array_member;
1863
1864         for (i = 0; i < 18; i++) {
1865                 if (!memcmp(array_entry->vpd.sn, zero_sn, IPR_SERIAL_NUM_LEN))
1866                         continue;
1867
1868                 if (be32_to_cpu(error->exposed_mode_adn) == i)
1869                         ipr_err("Exposed Array Member %d:\n", i);
1870                 else
1871                         ipr_err("Array Member %d:\n", i);
1872
1873                 ipr_log_vpd(&array_entry->vpd);
1874
1875                 ipr_phys_res_err(ioa_cfg, array_entry->dev_res_addr, "Current Location");
1876                 ipr_phys_res_err(ioa_cfg, array_entry->expected_dev_res_addr,
1877                                  "Expected Location");
1878
1879                 ipr_err_separator;
1880
1881                 if (i == 9)
1882                         array_entry = error->array_member2;
1883                 else
1884                         array_entry++;
1885         }
1886 }
1887
1888 /**
1889  * ipr_log_hex_data - Log additional hex IOA error data.
1890  * @ioa_cfg:    ioa config struct
1891  * @data:               IOA error data
1892  * @len:                data length
1893  *
1894  * Return value:
1895  *      none
1896  **/
1897 static void ipr_log_hex_data(struct ipr_ioa_cfg *ioa_cfg, u32 *data, int len)
1898 {
1899         int i;
1900
1901         if (len == 0)
1902                 return;
1903
1904         if (ioa_cfg->log_level <= IPR_DEFAULT_LOG_LEVEL)
1905                 len = min_t(int, len, IPR_DEFAULT_MAX_ERROR_DUMP);
1906
1907         for (i = 0; i < len / 4; i += 4) {
1908                 ipr_err("%08X: %08X %08X %08X %08X\n", i*4,
1909                         be32_to_cpu(data[i]),
1910                         be32_to_cpu(data[i+1]),
1911                         be32_to_cpu(data[i+2]),
1912                         be32_to_cpu(data[i+3]));
1913         }
1914 }
1915
1916 /**
1917  * ipr_log_enhanced_dual_ioa_error - Log an enhanced dual adapter error.
1918  * @ioa_cfg:    ioa config struct
1919  * @hostrcb:    hostrcb struct
1920  *
1921  * Return value:
1922  *      none
1923  **/
1924 static void ipr_log_enhanced_dual_ioa_error(struct ipr_ioa_cfg *ioa_cfg,
1925                                             struct ipr_hostrcb *hostrcb)
1926 {
1927         struct ipr_hostrcb_type_17_error *error;
1928
1929         if (ioa_cfg->sis64)
1930                 error = &hostrcb->hcam.u.error64.u.type_17_error;
1931         else
1932                 error = &hostrcb->hcam.u.error.u.type_17_error;
1933
1934         error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
1935         strim(error->failure_reason);
1936
1937         ipr_hcam_err(hostrcb, "%s [PRC: %08X]\n", error->failure_reason,
1938                      be32_to_cpu(hostrcb->hcam.u.error.prc));
1939         ipr_log_ext_vpd_compact("Remote IOA", hostrcb, &error->vpd);
1940         ipr_log_hex_data(ioa_cfg, error->data,
1941                          be32_to_cpu(hostrcb->hcam.length) -
1942                          (offsetof(struct ipr_hostrcb_error, u) +
1943                           offsetof(struct ipr_hostrcb_type_17_error, data)));
1944 }
1945
1946 /**
1947  * ipr_log_dual_ioa_error - Log a dual adapter error.
1948  * @ioa_cfg:    ioa config struct
1949  * @hostrcb:    hostrcb struct
1950  *
1951  * Return value:
1952  *      none
1953  **/
1954 static void ipr_log_dual_ioa_error(struct ipr_ioa_cfg *ioa_cfg,
1955                                    struct ipr_hostrcb *hostrcb)
1956 {
1957         struct ipr_hostrcb_type_07_error *error;
1958
1959         error = &hostrcb->hcam.u.error.u.type_07_error;
1960         error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
1961         strim(error->failure_reason);
1962
1963         ipr_hcam_err(hostrcb, "%s [PRC: %08X]\n", error->failure_reason,
1964                      be32_to_cpu(hostrcb->hcam.u.error.prc));
1965         ipr_log_vpd_compact("Remote IOA", hostrcb, &error->vpd);
1966         ipr_log_hex_data(ioa_cfg, error->data,
1967                          be32_to_cpu(hostrcb->hcam.length) -
1968                          (offsetof(struct ipr_hostrcb_error, u) +
1969                           offsetof(struct ipr_hostrcb_type_07_error, data)));
1970 }
1971
1972 static const struct {
1973         u8 active;
1974         char *desc;
1975 } path_active_desc[] = {
1976         { IPR_PATH_NO_INFO, "Path" },
1977         { IPR_PATH_ACTIVE, "Active path" },
1978         { IPR_PATH_NOT_ACTIVE, "Inactive path" }
1979 };
1980
1981 static const struct {
1982         u8 state;
1983         char *desc;
1984 } path_state_desc[] = {
1985         { IPR_PATH_STATE_NO_INFO, "has no path state information available" },
1986         { IPR_PATH_HEALTHY, "is healthy" },
1987         { IPR_PATH_DEGRADED, "is degraded" },
1988         { IPR_PATH_FAILED, "is failed" }
1989 };
1990
1991 /**
1992  * ipr_log_fabric_path - Log a fabric path error
1993  * @hostrcb:    hostrcb struct
1994  * @fabric:             fabric descriptor
1995  *
1996  * Return value:
1997  *      none
1998  **/
1999 static void ipr_log_fabric_path(struct ipr_hostrcb *hostrcb,
2000                                 struct ipr_hostrcb_fabric_desc *fabric)
2001 {
2002         int i, j;
2003         u8 path_state = fabric->path_state;
2004         u8 active = path_state & IPR_PATH_ACTIVE_MASK;
2005         u8 state = path_state & IPR_PATH_STATE_MASK;
2006
2007         for (i = 0; i < ARRAY_SIZE(path_active_desc); i++) {
2008                 if (path_active_desc[i].active != active)
2009                         continue;
2010
2011                 for (j = 0; j < ARRAY_SIZE(path_state_desc); j++) {
2012                         if (path_state_desc[j].state != state)
2013                                 continue;
2014
2015                         if (fabric->cascaded_expander == 0xff && fabric->phy == 0xff) {
2016                                 ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d\n",
2017                                              path_active_desc[i].desc, path_state_desc[j].desc,
2018                                              fabric->ioa_port);
2019                         } else if (fabric->cascaded_expander == 0xff) {
2020                                 ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d, Phy=%d\n",
2021                                              path_active_desc[i].desc, path_state_desc[j].desc,
2022                                              fabric->ioa_port, fabric->phy);
2023                         } else if (fabric->phy == 0xff) {
2024                                 ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d, Cascade=%d\n",
2025                                              path_active_desc[i].desc, path_state_desc[j].desc,
2026                                              fabric->ioa_port, fabric->cascaded_expander);
2027                         } else {
2028                                 ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d, Cascade=%d, Phy=%d\n",
2029                                              path_active_desc[i].desc, path_state_desc[j].desc,
2030                                              fabric->ioa_port, fabric->cascaded_expander, fabric->phy);
2031                         }
2032                         return;
2033                 }
2034         }
2035
2036         ipr_err("Path state=%02X IOA Port=%d Cascade=%d Phy=%d\n", path_state,
2037                 fabric->ioa_port, fabric->cascaded_expander, fabric->phy);
2038 }
2039
2040 /**
2041  * ipr_log64_fabric_path - Log a fabric path error
2042  * @hostrcb:    hostrcb struct
2043  * @fabric:             fabric descriptor
2044  *
2045  * Return value:
2046  *      none
2047  **/
2048 static void ipr_log64_fabric_path(struct ipr_hostrcb *hostrcb,
2049                                   struct ipr_hostrcb64_fabric_desc *fabric)
2050 {
2051         int i, j;
2052         u8 path_state = fabric->path_state;
2053         u8 active = path_state & IPR_PATH_ACTIVE_MASK;
2054         u8 state = path_state & IPR_PATH_STATE_MASK;
2055         char buffer[IPR_MAX_RES_PATH_LENGTH];
2056
2057         for (i = 0; i < ARRAY_SIZE(path_active_desc); i++) {
2058                 if (path_active_desc[i].active != active)
2059                         continue;
2060
2061                 for (j = 0; j < ARRAY_SIZE(path_state_desc); j++) {
2062                         if (path_state_desc[j].state != state)
2063                                 continue;
2064
2065                         ipr_hcam_err(hostrcb, "%s %s: Resource Path=%s\n",
2066                                      path_active_desc[i].desc, path_state_desc[j].desc,
2067                                      ipr_format_res_path(hostrcb->ioa_cfg,
2068                                                 fabric->res_path,
2069                                                 buffer, sizeof(buffer)));
2070                         return;
2071                 }
2072         }
2073
2074         ipr_err("Path state=%02X Resource Path=%s\n", path_state,
2075                 ipr_format_res_path(hostrcb->ioa_cfg, fabric->res_path,
2076                                     buffer, sizeof(buffer)));
2077 }
2078
2079 static const struct {
2080         u8 type;
2081         char *desc;
2082 } path_type_desc[] = {
2083         { IPR_PATH_CFG_IOA_PORT, "IOA port" },
2084         { IPR_PATH_CFG_EXP_PORT, "Expander port" },
2085         { IPR_PATH_CFG_DEVICE_PORT, "Device port" },
2086         { IPR_PATH_CFG_DEVICE_LUN, "Device LUN" }
2087 };
2088
2089 static const struct {
2090         u8 status;
2091         char *desc;
2092 } path_status_desc[] = {
2093         { IPR_PATH_CFG_NO_PROB, "Functional" },
2094         { IPR_PATH_CFG_DEGRADED, "Degraded" },
2095         { IPR_PATH_CFG_FAILED, "Failed" },
2096         { IPR_PATH_CFG_SUSPECT, "Suspect" },
2097         { IPR_PATH_NOT_DETECTED, "Missing" },
2098         { IPR_PATH_INCORRECT_CONN, "Incorrectly connected" }
2099 };
2100
2101 static const char *link_rate[] = {
2102         "unknown",
2103         "disabled",
2104         "phy reset problem",
2105         "spinup hold",
2106         "port selector",
2107         "unknown",
2108         "unknown",
2109         "unknown",
2110         "1.5Gbps",
2111         "3.0Gbps",
2112         "unknown",
2113         "unknown",
2114         "unknown",
2115         "unknown",
2116         "unknown",
2117         "unknown"
2118 };
2119
2120 /**
2121  * ipr_log_path_elem - Log a fabric path element.
2122  * @hostrcb:    hostrcb struct
2123  * @cfg:                fabric path element struct
2124  *
2125  * Return value:
2126  *      none
2127  **/
2128 static void ipr_log_path_elem(struct ipr_hostrcb *hostrcb,
2129                               struct ipr_hostrcb_config_element *cfg)
2130 {
2131         int i, j;
2132         u8 type = cfg->type_status & IPR_PATH_CFG_TYPE_MASK;
2133         u8 status = cfg->type_status & IPR_PATH_CFG_STATUS_MASK;
2134
2135         if (type == IPR_PATH_CFG_NOT_EXIST)
2136                 return;
2137
2138         for (i = 0; i < ARRAY_SIZE(path_type_desc); i++) {
2139                 if (path_type_desc[i].type != type)
2140                         continue;
2141
2142                 for (j = 0; j < ARRAY_SIZE(path_status_desc); j++) {
2143                         if (path_status_desc[j].status != status)
2144                                 continue;
2145
2146                         if (type == IPR_PATH_CFG_IOA_PORT) {
2147                                 ipr_hcam_err(hostrcb, "%s %s: Phy=%d, Link rate=%s, WWN=%08X%08X\n",
2148                                              path_status_desc[j].desc, path_type_desc[i].desc,
2149                                              cfg->phy, link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2150                                              be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2151                         } else {
2152                                 if (cfg->cascaded_expander == 0xff && cfg->phy == 0xff) {
2153                                         ipr_hcam_err(hostrcb, "%s %s: Link rate=%s, WWN=%08X%08X\n",
2154                                                      path_status_desc[j].desc, path_type_desc[i].desc,
2155                                                      link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2156                                                      be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2157                                 } else if (cfg->cascaded_expander == 0xff) {
2158                                         ipr_hcam_err(hostrcb, "%s %s: Phy=%d, Link rate=%s, "
2159                                                      "WWN=%08X%08X\n", path_status_desc[j].desc,
2160                                                      path_type_desc[i].desc, cfg->phy,
2161                                                      link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2162                                                      be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2163                                 } else if (cfg->phy == 0xff) {
2164                                         ipr_hcam_err(hostrcb, "%s %s: Cascade=%d, Link rate=%s, "
2165                                                      "WWN=%08X%08X\n", path_status_desc[j].desc,
2166                                                      path_type_desc[i].desc, cfg->cascaded_expander,
2167                                                      link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2168                                                      be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2169                                 } else {
2170                                         ipr_hcam_err(hostrcb, "%s %s: Cascade=%d, Phy=%d, Link rate=%s "
2171                                                      "WWN=%08X%08X\n", path_status_desc[j].desc,
2172                                                      path_type_desc[i].desc, cfg->cascaded_expander, cfg->phy,
2173                                                      link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2174                                                      be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2175                                 }
2176                         }
2177                         return;
2178                 }
2179         }
2180
2181         ipr_hcam_err(hostrcb, "Path element=%02X: Cascade=%d Phy=%d Link rate=%s "
2182                      "WWN=%08X%08X\n", cfg->type_status, cfg->cascaded_expander, cfg->phy,
2183                      link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2184                      be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2185 }
2186
2187 /**
2188  * ipr_log64_path_elem - Log a fabric path element.
2189  * @hostrcb:    hostrcb struct
2190  * @cfg:                fabric path element struct
2191  *
2192  * Return value:
2193  *      none
2194  **/
2195 static void ipr_log64_path_elem(struct ipr_hostrcb *hostrcb,
2196                                 struct ipr_hostrcb64_config_element *cfg)
2197 {
2198         int i, j;
2199         u8 desc_id = cfg->descriptor_id & IPR_DESCRIPTOR_MASK;
2200         u8 type = cfg->type_status & IPR_PATH_CFG_TYPE_MASK;
2201         u8 status = cfg->type_status & IPR_PATH_CFG_STATUS_MASK;
2202         char buffer[IPR_MAX_RES_PATH_LENGTH];
2203
2204         if (type == IPR_PATH_CFG_NOT_EXIST || desc_id != IPR_DESCRIPTOR_SIS64)
2205                 return;
2206
2207         for (i = 0; i < ARRAY_SIZE(path_type_desc); i++) {
2208                 if (path_type_desc[i].type != type)
2209                         continue;
2210
2211                 for (j = 0; j < ARRAY_SIZE(path_status_desc); j++) {
2212                         if (path_status_desc[j].status != status)
2213                                 continue;
2214
2215                         ipr_hcam_err(hostrcb, "%s %s: Resource Path=%s, Link rate=%s, WWN=%08X%08X\n",
2216                                      path_status_desc[j].desc, path_type_desc[i].desc,
2217                                      ipr_format_res_path(hostrcb->ioa_cfg,
2218                                         cfg->res_path, buffer, sizeof(buffer)),
2219                                         link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2220                                         be32_to_cpu(cfg->wwid[0]),
2221                                         be32_to_cpu(cfg->wwid[1]));
2222                         return;
2223                 }
2224         }
2225         ipr_hcam_err(hostrcb, "Path element=%02X: Resource Path=%s, Link rate=%s "
2226                      "WWN=%08X%08X\n", cfg->type_status,
2227                      ipr_format_res_path(hostrcb->ioa_cfg,
2228                         cfg->res_path, buffer, sizeof(buffer)),
2229                         link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2230                         be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2231 }
2232
2233 /**
2234  * ipr_log_fabric_error - Log a fabric error.
2235  * @ioa_cfg:    ioa config struct
2236  * @hostrcb:    hostrcb struct
2237  *
2238  * Return value:
2239  *      none
2240  **/
2241 static void ipr_log_fabric_error(struct ipr_ioa_cfg *ioa_cfg,
2242                                  struct ipr_hostrcb *hostrcb)
2243 {
2244         struct ipr_hostrcb_type_20_error *error;
2245         struct ipr_hostrcb_fabric_desc *fabric;
2246         struct ipr_hostrcb_config_element *cfg;
2247         int i, add_len;
2248
2249         error = &hostrcb->hcam.u.error.u.type_20_error;
2250         error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
2251         ipr_hcam_err(hostrcb, "%s\n", error->failure_reason);
2252
2253         add_len = be32_to_cpu(hostrcb->hcam.length) -
2254                 (offsetof(struct ipr_hostrcb_error, u) +
2255                  offsetof(struct ipr_hostrcb_type_20_error, desc));
2256
2257         for (i = 0, fabric = error->desc; i < error->num_entries; i++) {
2258                 ipr_log_fabric_path(hostrcb, fabric);
2259                 for_each_fabric_cfg(fabric, cfg)
2260                         ipr_log_path_elem(hostrcb, cfg);
2261
2262                 add_len -= be16_to_cpu(fabric->length);
2263                 fabric = (struct ipr_hostrcb_fabric_desc *)
2264                         ((unsigned long)fabric + be16_to_cpu(fabric->length));
2265         }
2266
2267         ipr_log_hex_data(ioa_cfg, (u32 *)fabric, add_len);
2268 }
2269
2270 /**
2271  * ipr_log_sis64_array_error - Log a sis64 array error.
2272  * @ioa_cfg:    ioa config struct
2273  * @hostrcb:    hostrcb struct
2274  *
2275  * Return value:
2276  *      none
2277  **/
2278 static void ipr_log_sis64_array_error(struct ipr_ioa_cfg *ioa_cfg,
2279                                       struct ipr_hostrcb *hostrcb)
2280 {
2281         int i, num_entries;
2282         struct ipr_hostrcb_type_24_error *error;
2283         struct ipr_hostrcb64_array_data_entry *array_entry;
2284         char buffer[IPR_MAX_RES_PATH_LENGTH];
2285         const u8 zero_sn[IPR_SERIAL_NUM_LEN] = { [0 ... IPR_SERIAL_NUM_LEN-1] = '0' };
2286
2287         error = &hostrcb->hcam.u.error64.u.type_24_error;
2288
2289         ipr_err_separator;
2290
2291         ipr_err("RAID %s Array Configuration: %s\n",
2292                 error->protection_level,
2293                 ipr_format_res_path(ioa_cfg, error->last_res_path,
2294                         buffer, sizeof(buffer)));
2295
2296         ipr_err_separator;
2297
2298         array_entry = error->array_member;
2299         num_entries = min_t(u32, error->num_entries,
2300                             ARRAY_SIZE(error->array_member));
2301
2302         for (i = 0; i < num_entries; i++, array_entry++) {
2303
2304                 if (!memcmp(array_entry->vpd.vpd.sn, zero_sn, IPR_SERIAL_NUM_LEN))
2305                         continue;
2306
2307                 if (error->exposed_mode_adn == i)
2308                         ipr_err("Exposed Array Member %d:\n", i);
2309                 else
2310                         ipr_err("Array Member %d:\n", i);
2311
2312                 ipr_err("Array Member %d:\n", i);
2313                 ipr_log_ext_vpd(&array_entry->vpd);
2314                 ipr_err("Current Location: %s\n",
2315                          ipr_format_res_path(ioa_cfg, array_entry->res_path,
2316                                 buffer, sizeof(buffer)));
2317                 ipr_err("Expected Location: %s\n",
2318                          ipr_format_res_path(ioa_cfg,
2319                                 array_entry->expected_res_path,
2320                                 buffer, sizeof(buffer)));
2321
2322                 ipr_err_separator;
2323         }
2324 }
2325
2326 /**
2327  * ipr_log_sis64_fabric_error - Log a sis64 fabric error.
2328  * @ioa_cfg:    ioa config struct
2329  * @hostrcb:    hostrcb struct
2330  *
2331  * Return value:
2332  *      none
2333  **/
2334 static void ipr_log_sis64_fabric_error(struct ipr_ioa_cfg *ioa_cfg,
2335                                        struct ipr_hostrcb *hostrcb)
2336 {
2337         struct ipr_hostrcb_type_30_error *error;
2338         struct ipr_hostrcb64_fabric_desc *fabric;
2339         struct ipr_hostrcb64_config_element *cfg;
2340         int i, add_len;
2341
2342         error = &hostrcb->hcam.u.error64.u.type_30_error;
2343
2344         error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
2345         ipr_hcam_err(hostrcb, "%s\n", error->failure_reason);
2346
2347         add_len = be32_to_cpu(hostrcb->hcam.length) -
2348                 (offsetof(struct ipr_hostrcb64_error, u) +
2349                  offsetof(struct ipr_hostrcb_type_30_error, desc));
2350
2351         for (i = 0, fabric = error->desc; i < error->num_entries; i++) {
2352                 ipr_log64_fabric_path(hostrcb, fabric);
2353                 for_each_fabric_cfg(fabric, cfg)
2354                         ipr_log64_path_elem(hostrcb, cfg);
2355
2356                 add_len -= be16_to_cpu(fabric->length);
2357                 fabric = (struct ipr_hostrcb64_fabric_desc *)
2358                         ((unsigned long)fabric + be16_to_cpu(fabric->length));
2359         }
2360
2361         ipr_log_hex_data(ioa_cfg, (u32 *)fabric, add_len);
2362 }
2363
2364 /**
2365  * ipr_log_generic_error - Log an adapter error.
2366  * @ioa_cfg:    ioa config struct
2367  * @hostrcb:    hostrcb struct
2368  *
2369  * Return value:
2370  *      none
2371  **/
2372 static void ipr_log_generic_error(struct ipr_ioa_cfg *ioa_cfg,
2373                                   struct ipr_hostrcb *hostrcb)
2374 {
2375         ipr_log_hex_data(ioa_cfg, hostrcb->hcam.u.raw.data,
2376                          be32_to_cpu(hostrcb->hcam.length));
2377 }
2378
2379 /**
2380  * ipr_log_sis64_device_error - Log a cache error.
2381  * @ioa_cfg:    ioa config struct
2382  * @hostrcb:    hostrcb struct
2383  *
2384  * Return value:
2385  *      none
2386  **/
2387 static void ipr_log_sis64_device_error(struct ipr_ioa_cfg *ioa_cfg,
2388                                          struct ipr_hostrcb *hostrcb)
2389 {
2390         struct ipr_hostrcb_type_21_error *error;
2391         char buffer[IPR_MAX_RES_PATH_LENGTH];
2392
2393         error = &hostrcb->hcam.u.error64.u.type_21_error;
2394
2395         ipr_err("-----Failing Device Information-----\n");
2396         ipr_err("World Wide Unique ID: %08X%08X%08X%08X\n",
2397                 be32_to_cpu(error->wwn[0]), be32_to_cpu(error->wwn[1]),
2398                  be32_to_cpu(error->wwn[2]), be32_to_cpu(error->wwn[3]));
2399         ipr_err("Device Resource Path: %s\n",
2400                 __ipr_format_res_path(error->res_path,
2401                                       buffer, sizeof(buffer)));
2402         error->primary_problem_desc[sizeof(error->primary_problem_desc) - 1] = '\0';
2403         error->second_problem_desc[sizeof(error->second_problem_desc) - 1] = '\0';
2404         ipr_err("Primary Problem Description: %s\n", error->primary_problem_desc);
2405         ipr_err("Secondary Problem Description:  %s\n", error->second_problem_desc);
2406         ipr_err("SCSI Sense Data:\n");
2407         ipr_log_hex_data(ioa_cfg, error->sense_data, sizeof(error->sense_data));
2408         ipr_err("SCSI Command Descriptor Block: \n");
2409         ipr_log_hex_data(ioa_cfg, error->cdb, sizeof(error->cdb));
2410
2411         ipr_err("Additional IOA Data:\n");
2412         ipr_log_hex_data(ioa_cfg, error->ioa_data, be32_to_cpu(error->length_of_error));
2413 }
2414
2415 /**
2416  * ipr_get_error - Find the specfied IOASC in the ipr_error_table.
2417  * @ioasc:      IOASC
2418  *
2419  * This function will return the index of into the ipr_error_table
2420  * for the specified IOASC. If the IOASC is not in the table,
2421  * 0 will be returned, which points to the entry used for unknown errors.
2422  *
2423  * Return value:
2424  *      index into the ipr_error_table
2425  **/
2426 static u32 ipr_get_error(u32 ioasc)
2427 {
2428         int i;
2429
2430         for (i = 0; i < ARRAY_SIZE(ipr_error_table); i++)
2431                 if (ipr_error_table[i].ioasc == (ioasc & IPR_IOASC_IOASC_MASK))
2432                         return i;
2433
2434         return 0;
2435 }
2436
2437 /**
2438  * ipr_handle_log_data - Log an adapter error.
2439  * @ioa_cfg:    ioa config struct
2440  * @hostrcb:    hostrcb struct
2441  *
2442  * This function logs an adapter error to the system.
2443  *
2444  * Return value:
2445  *      none
2446  **/
2447 static void ipr_handle_log_data(struct ipr_ioa_cfg *ioa_cfg,
2448                                 struct ipr_hostrcb *hostrcb)
2449 {
2450         u32 ioasc;
2451         int error_index;
2452         struct ipr_hostrcb_type_21_error *error;
2453
2454         if (hostrcb->hcam.notify_type != IPR_HOST_RCB_NOTIF_TYPE_ERROR_LOG_ENTRY)
2455                 return;
2456
2457         if (hostrcb->hcam.notifications_lost == IPR_HOST_RCB_NOTIFICATIONS_LOST)
2458                 dev_err(&ioa_cfg->pdev->dev, "Error notifications lost\n");
2459
2460         if (ioa_cfg->sis64)
2461                 ioasc = be32_to_cpu(hostrcb->hcam.u.error64.fd_ioasc);
2462         else
2463                 ioasc = be32_to_cpu(hostrcb->hcam.u.error.fd_ioasc);
2464
2465         if (!ioa_cfg->sis64 && (ioasc == IPR_IOASC_BUS_WAS_RESET ||
2466             ioasc == IPR_IOASC_BUS_WAS_RESET_BY_OTHER)) {
2467                 /* Tell the midlayer we had a bus reset so it will handle the UA properly */
2468                 scsi_report_bus_reset(ioa_cfg->host,
2469                                       hostrcb->hcam.u.error.fd_res_addr.bus);
2470         }
2471
2472         error_index = ipr_get_error(ioasc);
2473
2474         if (!ipr_error_table[error_index].log_hcam)
2475                 return;
2476
2477         if (ioasc == IPR_IOASC_HW_CMD_FAILED &&
2478             hostrcb->hcam.overlay_id == IPR_HOST_RCB_OVERLAY_ID_21) {
2479                 error = &hostrcb->hcam.u.error64.u.type_21_error;
2480
2481                 if (((be32_to_cpu(error->sense_data[0]) & 0x0000ff00) >> 8) == ILLEGAL_REQUEST &&
2482                         ioa_cfg->log_level <= IPR_DEFAULT_LOG_LEVEL)
2483                                 return;
2484         }
2485
2486         ipr_hcam_err(hostrcb, "%s\n", ipr_error_table[error_index].error);
2487
2488         /* Set indication we have logged an error */
2489         ioa_cfg->errors_logged++;
2490
2491         if (ioa_cfg->log_level < ipr_error_table[error_index].log_hcam)
2492                 return;
2493         if (be32_to_cpu(hostrcb->hcam.length) > sizeof(hostrcb->hcam.u.raw))
2494                 hostrcb->hcam.length = cpu_to_be32(sizeof(hostrcb->hcam.u.raw));
2495
2496         switch (hostrcb->hcam.overlay_id) {
2497         case IPR_HOST_RCB_OVERLAY_ID_2:
2498                 ipr_log_cache_error(ioa_cfg, hostrcb);
2499                 break;
2500         case IPR_HOST_RCB_OVERLAY_ID_3:
2501                 ipr_log_config_error(ioa_cfg, hostrcb);
2502                 break;
2503         case IPR_HOST_RCB_OVERLAY_ID_4:
2504         case IPR_HOST_RCB_OVERLAY_ID_6:
2505                 ipr_log_array_error(ioa_cfg, hostrcb);
2506                 break;
2507         case IPR_HOST_RCB_OVERLAY_ID_7:
2508                 ipr_log_dual_ioa_error(ioa_cfg, hostrcb);
2509                 break;
2510         case IPR_HOST_RCB_OVERLAY_ID_12:
2511                 ipr_log_enhanced_cache_error(ioa_cfg, hostrcb);
2512                 break;
2513         case IPR_HOST_RCB_OVERLAY_ID_13:
2514                 ipr_log_enhanced_config_error(ioa_cfg, hostrcb);
2515                 break;
2516         case IPR_HOST_RCB_OVERLAY_ID_14:
2517         case IPR_HOST_RCB_OVERLAY_ID_16:
2518                 ipr_log_enhanced_array_error(ioa_cfg, hostrcb);
2519                 break;
2520         case IPR_HOST_RCB_OVERLAY_ID_17:
2521                 ipr_log_enhanced_dual_ioa_error(ioa_cfg, hostrcb);
2522                 break;
2523         case IPR_HOST_RCB_OVERLAY_ID_20:
2524                 ipr_log_fabric_error(ioa_cfg, hostrcb);
2525                 break;
2526         case IPR_HOST_RCB_OVERLAY_ID_21:
2527                 ipr_log_sis64_device_error(ioa_cfg, hostrcb);
2528                 break;
2529         case IPR_HOST_RCB_OVERLAY_ID_23:
2530                 ipr_log_sis64_config_error(ioa_cfg, hostrcb);
2531                 break;
2532         case IPR_HOST_RCB_OVERLAY_ID_24:
2533         case IPR_HOST_RCB_OVERLAY_ID_26:
2534                 ipr_log_sis64_array_error(ioa_cfg, hostrcb);
2535                 break;
2536         case IPR_HOST_RCB_OVERLAY_ID_30:
2537                 ipr_log_sis64_fabric_error(ioa_cfg, hostrcb);
2538                 break;
2539         case IPR_HOST_RCB_OVERLAY_ID_1:
2540         case IPR_HOST_RCB_OVERLAY_ID_DEFAULT:
2541         default:
2542                 ipr_log_generic_error(ioa_cfg, hostrcb);
2543                 break;
2544         }
2545 }
2546
2547 /**
2548  * ipr_process_error - Op done function for an adapter error log.
2549  * @ipr_cmd:    ipr command struct
2550  *
2551  * This function is the op done function for an error log host
2552  * controlled async from the adapter. It will log the error and
2553  * send the HCAM back to the adapter.
2554  *
2555  * Return value:
2556  *      none
2557  **/
2558 static void ipr_process_error(struct ipr_cmnd *ipr_cmd)
2559 {
2560         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
2561         struct ipr_hostrcb *hostrcb = ipr_cmd->u.hostrcb;
2562         u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
2563         u32 fd_ioasc;
2564
2565         if (ioa_cfg->sis64)
2566                 fd_ioasc = be32_to_cpu(hostrcb->hcam.u.error64.fd_ioasc);
2567         else
2568                 fd_ioasc = be32_to_cpu(hostrcb->hcam.u.error.fd_ioasc);
2569
2570         list_del(&hostrcb->queue);
2571         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
2572
2573         if (!ioasc) {
2574                 ipr_handle_log_data(ioa_cfg, hostrcb);
2575                 if (fd_ioasc == IPR_IOASC_NR_IOA_RESET_REQUIRED)
2576                         ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_ABBREV);
2577         } else if (ioasc != IPR_IOASC_IOA_WAS_RESET &&
2578                    ioasc != IPR_IOASC_ABORTED_CMD_TERM_BY_HOST) {
2579                 dev_err(&ioa_cfg->pdev->dev,
2580                         "Host RCB failed with IOASC: 0x%08X\n", ioasc);
2581         }
2582
2583         ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_LOG_DATA, hostrcb);
2584 }
2585
2586 /**
2587  * ipr_timeout -  An internally generated op has timed out.
2588  * @ipr_cmd:    ipr command struct
2589  *
2590  * This function blocks host requests and initiates an
2591  * adapter reset.
2592  *
2593  * Return value:
2594  *      none
2595  **/
2596 static void ipr_timeout(struct ipr_cmnd *ipr_cmd)
2597 {
2598         unsigned long lock_flags = 0;
2599         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
2600
2601         ENTER;
2602         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2603
2604         ioa_cfg->errors_logged++;
2605         dev_err(&ioa_cfg->pdev->dev,
2606                 "Adapter being reset due to command timeout.\n");
2607
2608         if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
2609                 ioa_cfg->sdt_state = GET_DUMP;
2610
2611         if (!ioa_cfg->in_reset_reload || ioa_cfg->reset_cmd == ipr_cmd)
2612                 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
2613
2614         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2615         LEAVE;
2616 }
2617
2618 /**
2619  * ipr_oper_timeout -  Adapter timed out transitioning to operational
2620  * @ipr_cmd:    ipr command struct
2621  *
2622  * This function blocks host requests and initiates an
2623  * adapter reset.
2624  *
2625  * Return value:
2626  *      none
2627  **/
2628 static void ipr_oper_timeout(struct ipr_cmnd *ipr_cmd)
2629 {
2630         unsigned long lock_flags = 0;
2631         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
2632
2633         ENTER;
2634         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2635
2636         ioa_cfg->errors_logged++;
2637         dev_err(&ioa_cfg->pdev->dev,
2638                 "Adapter timed out transitioning to operational.\n");
2639
2640         if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
2641                 ioa_cfg->sdt_state = GET_DUMP;
2642
2643         if (!ioa_cfg->in_reset_reload || ioa_cfg->reset_cmd == ipr_cmd) {
2644                 if (ipr_fastfail)
2645                         ioa_cfg->reset_retries += IPR_NUM_RESET_RELOAD_RETRIES;
2646                 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
2647         }
2648
2649         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2650         LEAVE;
2651 }
2652
2653 /**
2654  * ipr_find_ses_entry - Find matching SES in SES table
2655  * @res:        resource entry struct of SES
2656  *
2657  * Return value:
2658  *      pointer to SES table entry / NULL on failure
2659  **/
2660 static const struct ipr_ses_table_entry *
2661 ipr_find_ses_entry(struct ipr_resource_entry *res)
2662 {
2663         int i, j, matches;
2664         struct ipr_std_inq_vpids *vpids;
2665         const struct ipr_ses_table_entry *ste = ipr_ses_table;
2666
2667         for (i = 0; i < ARRAY_SIZE(ipr_ses_table); i++, ste++) {
2668                 for (j = 0, matches = 0; j < IPR_PROD_ID_LEN; j++) {
2669                         if (ste->compare_product_id_byte[j] == 'X') {
2670                                 vpids = &res->std_inq_data.vpids;
2671                                 if (vpids->product_id[j] == ste->product_id[j])
2672                                         matches++;
2673                                 else
2674                                         break;
2675                         } else
2676                                 matches++;
2677                 }
2678
2679                 if (matches == IPR_PROD_ID_LEN)
2680                         return ste;
2681         }
2682
2683         return NULL;
2684 }
2685
2686 /**
2687  * ipr_get_max_scsi_speed - Determine max SCSI speed for a given bus
2688  * @ioa_cfg:    ioa config struct
2689  * @bus:                SCSI bus
2690  * @bus_width:  bus width
2691  *
2692  * Return value:
2693  *      SCSI bus speed in units of 100KHz, 1600 is 160 MHz
2694  *      For a 2-byte wide SCSI bus, the maximum transfer speed is
2695  *      twice the maximum transfer rate (e.g. for a wide enabled bus,
2696  *      max 160MHz = max 320MB/sec).
2697  **/
2698 static u32 ipr_get_max_scsi_speed(struct ipr_ioa_cfg *ioa_cfg, u8 bus, u8 bus_width)
2699 {
2700         struct ipr_resource_entry *res;
2701         const struct ipr_ses_table_entry *ste;
2702         u32 max_xfer_rate = IPR_MAX_SCSI_RATE(bus_width);
2703
2704         /* Loop through each config table entry in the config table buffer */
2705         list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
2706                 if (!(IPR_IS_SES_DEVICE(res->std_inq_data)))
2707                         continue;
2708
2709                 if (bus != res->bus)
2710                         continue;
2711
2712                 if (!(ste = ipr_find_ses_entry(res)))
2713                         continue;
2714
2715                 max_xfer_rate = (ste->max_bus_speed_limit * 10) / (bus_width / 8);
2716         }
2717
2718         return max_xfer_rate;
2719 }
2720
2721 /**
2722  * ipr_wait_iodbg_ack - Wait for an IODEBUG ACK from the IOA
2723  * @ioa_cfg:            ioa config struct
2724  * @max_delay:          max delay in micro-seconds to wait
2725  *
2726  * Waits for an IODEBUG ACK from the IOA, doing busy looping.
2727  *
2728  * Return value:
2729  *      0 on success / other on failure
2730  **/
2731 static int ipr_wait_iodbg_ack(struct ipr_ioa_cfg *ioa_cfg, int max_delay)
2732 {
2733         volatile u32 pcii_reg;
2734         int delay = 1;
2735
2736         /* Read interrupt reg until IOA signals IO Debug Acknowledge */
2737         while (delay < max_delay) {
2738                 pcii_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
2739
2740                 if (pcii_reg & IPR_PCII_IO_DEBUG_ACKNOWLEDGE)
2741                         return 0;
2742
2743                 /* udelay cannot be used if delay is more than a few milliseconds */
2744                 if ((delay / 1000) > MAX_UDELAY_MS)
2745                         mdelay(delay / 1000);
2746                 else
2747                         udelay(delay);
2748
2749                 delay += delay;
2750         }
2751         return -EIO;
2752 }
2753
2754 /**
2755  * ipr_get_sis64_dump_data_section - Dump IOA memory
2756  * @ioa_cfg:                    ioa config struct
2757  * @start_addr:                 adapter address to dump
2758  * @dest:                       destination kernel buffer
2759  * @length_in_words:            length to dump in 4 byte words
2760  *
2761  * Return value:
2762  *      0 on success
2763  **/
2764 static int ipr_get_sis64_dump_data_section(struct ipr_ioa_cfg *ioa_cfg,
2765                                            u32 start_addr,
2766                                            __be32 *dest, u32 length_in_words)
2767 {
2768         int i;
2769
2770         for (i = 0; i < length_in_words; i++) {
2771                 writel(start_addr+(i*4), ioa_cfg->regs.dump_addr_reg);
2772                 *dest = cpu_to_be32(readl(ioa_cfg->regs.dump_data_reg));
2773                 dest++;
2774         }
2775
2776         return 0;
2777 }
2778
2779 /**
2780  * ipr_get_ldump_data_section - Dump IOA memory
2781  * @ioa_cfg:                    ioa config struct
2782  * @start_addr:                 adapter address to dump
2783  * @dest:                               destination kernel buffer
2784  * @length_in_words:    length to dump in 4 byte words
2785  *
2786  * Return value:
2787  *      0 on success / -EIO on failure
2788  **/
2789 static int ipr_get_ldump_data_section(struct ipr_ioa_cfg *ioa_cfg,
2790                                       u32 start_addr,
2791                                       __be32 *dest, u32 length_in_words)
2792 {
2793         volatile u32 temp_pcii_reg;
2794         int i, delay = 0;
2795
2796         if (ioa_cfg->sis64)
2797                 return ipr_get_sis64_dump_data_section(ioa_cfg, start_addr,
2798                                                        dest, length_in_words);
2799
2800         /* Write IOA interrupt reg starting LDUMP state  */
2801         writel((IPR_UPROCI_RESET_ALERT | IPR_UPROCI_IO_DEBUG_ALERT),
2802                ioa_cfg->regs.set_uproc_interrupt_reg32);
2803
2804         /* Wait for IO debug acknowledge */
2805         if (ipr_wait_iodbg_ack(ioa_cfg,
2806                                IPR_LDUMP_MAX_LONG_ACK_DELAY_IN_USEC)) {
2807                 dev_err(&ioa_cfg->pdev->dev,
2808                         "IOA dump long data transfer timeout\n");
2809                 return -EIO;
2810         }
2811
2812         /* Signal LDUMP interlocked - clear IO debug ack */
2813         writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE,
2814                ioa_cfg->regs.clr_interrupt_reg);
2815
2816         /* Write Mailbox with starting address */
2817         writel(start_addr, ioa_cfg->ioa_mailbox);
2818
2819         /* Signal address valid - clear IOA Reset alert */
2820         writel(IPR_UPROCI_RESET_ALERT,
2821                ioa_cfg->regs.clr_uproc_interrupt_reg32);
2822
2823         for (i = 0; i < length_in_words; i++) {
2824                 /* Wait for IO debug acknowledge */
2825                 if (ipr_wait_iodbg_ack(ioa_cfg,
2826                                        IPR_LDUMP_MAX_SHORT_ACK_DELAY_IN_USEC)) {
2827                         dev_err(&ioa_cfg->pdev->dev,
2828                                 "IOA dump short data transfer timeout\n");
2829                         return -EIO;
2830                 }
2831
2832                 /* Read data from mailbox and increment destination pointer */
2833                 *dest = cpu_to_be32(readl(ioa_cfg->ioa_mailbox));
2834                 dest++;
2835
2836                 /* For all but the last word of data, signal data received */
2837                 if (i < (length_in_words - 1)) {
2838                         /* Signal dump data received - Clear IO debug Ack */
2839                         writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE,
2840                                ioa_cfg->regs.clr_interrupt_reg);
2841                 }
2842         }
2843
2844         /* Signal end of block transfer. Set reset alert then clear IO debug ack */
2845         writel(IPR_UPROCI_RESET_ALERT,
2846                ioa_cfg->regs.set_uproc_interrupt_reg32);
2847
2848         writel(IPR_UPROCI_IO_DEBUG_ALERT,
2849                ioa_cfg->regs.clr_uproc_interrupt_reg32);
2850
2851         /* Signal dump data received - Clear IO debug Ack */
2852         writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE,
2853                ioa_cfg->regs.clr_interrupt_reg);
2854
2855         /* Wait for IOA to signal LDUMP exit - IOA reset alert will be cleared */
2856         while (delay < IPR_LDUMP_MAX_SHORT_ACK_DELAY_IN_USEC) {
2857                 temp_pcii_reg =
2858                     readl(ioa_cfg->regs.sense_uproc_interrupt_reg32);
2859
2860                 if (!(temp_pcii_reg & IPR_UPROCI_RESET_ALERT))
2861                         return 0;
2862
2863                 udelay(10);
2864                 delay += 10;
2865         }
2866
2867         return 0;
2868 }
2869
2870 #ifdef CONFIG_SCSI_IPR_DUMP
2871 /**
2872  * ipr_sdt_copy - Copy Smart Dump Table to kernel buffer
2873  * @ioa_cfg:            ioa config struct
2874  * @pci_address:        adapter address
2875  * @length:                     length of data to copy
2876  *
2877  * Copy data from PCI adapter to kernel buffer.
2878  * Note: length MUST be a 4 byte multiple
2879  * Return value:
2880  *      0 on success / other on failure
2881  **/
2882 static int ipr_sdt_copy(struct ipr_ioa_cfg *ioa_cfg,
2883                         unsigned long pci_address, u32 length)
2884 {
2885         int bytes_copied = 0;
2886         int cur_len, rc, rem_len, rem_page_len, max_dump_size;
2887         __be32 *page;
2888         unsigned long lock_flags = 0;
2889         struct ipr_ioa_dump *ioa_dump = &ioa_cfg->dump->ioa_dump;
2890
2891         if (ioa_cfg->sis64)
2892                 max_dump_size = IPR_FMT3_MAX_IOA_DUMP_SIZE;
2893         else
2894                 max_dump_size = IPR_FMT2_MAX_IOA_DUMP_SIZE;
2895
2896         while (bytes_copied < length &&
2897                (ioa_dump->hdr.len + bytes_copied) < max_dump_size) {
2898                 if (ioa_dump->page_offset >= PAGE_SIZE ||
2899                     ioa_dump->page_offset == 0) {
2900                         page = (__be32 *)__get_free_page(GFP_ATOMIC);
2901
2902                         if (!page) {
2903                                 ipr_trace;
2904                                 return bytes_copied;
2905                         }
2906
2907                         ioa_dump->page_offset = 0;
2908                         ioa_dump->ioa_data[ioa_dump->next_page_index] = page;
2909                         ioa_dump->next_page_index++;
2910                 } else
2911                         page = ioa_dump->ioa_data[ioa_dump->next_page_index - 1];
2912
2913                 rem_len = length - bytes_copied;
2914                 rem_page_len = PAGE_SIZE - ioa_dump->page_offset;
2915                 cur_len = min(rem_len, rem_page_len);
2916
2917                 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2918                 if (ioa_cfg->sdt_state == ABORT_DUMP) {
2919                         rc = -EIO;
2920                 } else {
2921                         rc = ipr_get_ldump_data_section(ioa_cfg,
2922                                                         pci_address + bytes_copied,
2923                                                         &page[ioa_dump->page_offset / 4],
2924                                                         (cur_len / sizeof(u32)));
2925                 }
2926                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2927
2928                 if (!rc) {
2929                         ioa_dump->page_offset += cur_len;
2930                         bytes_copied += cur_len;
2931                 } else {
2932                         ipr_trace;
2933                         break;
2934                 }
2935                 schedule();
2936         }
2937
2938         return bytes_copied;
2939 }
2940
2941 /**
2942  * ipr_init_dump_entry_hdr - Initialize a dump entry header.
2943  * @hdr:        dump entry header struct
2944  *
2945  * Return value:
2946  *      nothing
2947  **/
2948 static void ipr_init_dump_entry_hdr(struct ipr_dump_entry_header *hdr)
2949 {
2950         hdr->eye_catcher = IPR_DUMP_EYE_CATCHER;
2951         hdr->num_elems = 1;
2952         hdr->offset = sizeof(*hdr);
2953         hdr->status = IPR_DUMP_STATUS_SUCCESS;
2954 }
2955
2956 /**
2957  * ipr_dump_ioa_type_data - Fill in the adapter type in the dump.
2958  * @ioa_cfg:    ioa config struct
2959  * @driver_dump:        driver dump struct
2960  *
2961  * Return value:
2962  *      nothing
2963  **/
2964 static void ipr_dump_ioa_type_data(struct ipr_ioa_cfg *ioa_cfg,
2965                                    struct ipr_driver_dump *driver_dump)
2966 {
2967         struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
2968
2969         ipr_init_dump_entry_hdr(&driver_dump->ioa_type_entry.hdr);
2970         driver_dump->ioa_type_entry.hdr.len =
2971                 sizeof(struct ipr_dump_ioa_type_entry) -
2972                 sizeof(struct ipr_dump_entry_header);
2973         driver_dump->ioa_type_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY;
2974         driver_dump->ioa_type_entry.hdr.id = IPR_DUMP_DRIVER_TYPE_ID;
2975         driver_dump->ioa_type_entry.type = ioa_cfg->type;
2976         driver_dump->ioa_type_entry.fw_version = (ucode_vpd->major_release << 24) |
2977                 (ucode_vpd->card_type << 16) | (ucode_vpd->minor_release[0] << 8) |
2978                 ucode_vpd->minor_release[1];
2979         driver_dump->hdr.num_entries++;
2980 }
2981
2982 /**
2983  * ipr_dump_version_data - Fill in the driver version in the dump.
2984  * @ioa_cfg:    ioa config struct
2985  * @driver_dump:        driver dump struct
2986  *
2987  * Return value:
2988  *      nothing
2989  **/
2990 static void ipr_dump_version_data(struct ipr_ioa_cfg *ioa_cfg,
2991                                   struct ipr_driver_dump *driver_dump)
2992 {
2993         ipr_init_dump_entry_hdr(&driver_dump->version_entry.hdr);
2994         driver_dump->version_entry.hdr.len =
2995                 sizeof(struct ipr_dump_version_entry) -
2996                 sizeof(struct ipr_dump_entry_header);
2997         driver_dump->version_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_ASCII;
2998         driver_dump->version_entry.hdr.id = IPR_DUMP_DRIVER_VERSION_ID;
2999         strcpy(driver_dump->version_entry.version, IPR_DRIVER_VERSION);
3000         driver_dump->hdr.num_entries++;
3001 }
3002
3003 /**
3004  * ipr_dump_trace_data - Fill in the IOA trace in the dump.
3005  * @ioa_cfg:    ioa config struct
3006  * @driver_dump:        driver dump struct
3007  *
3008  * Return value:
3009  *      nothing
3010  **/
3011 static void ipr_dump_trace_data(struct ipr_ioa_cfg *ioa_cfg,
3012                                    struct ipr_driver_dump *driver_dump)
3013 {
3014         ipr_init_dump_entry_hdr(&driver_dump->trace_entry.hdr);
3015         driver_dump->trace_entry.hdr.len =
3016                 sizeof(struct ipr_dump_trace_entry) -
3017                 sizeof(struct ipr_dump_entry_header);
3018         driver_dump->trace_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY;
3019         driver_dump->trace_entry.hdr.id = IPR_DUMP_TRACE_ID;
3020         memcpy(driver_dump->trace_entry.trace, ioa_cfg->trace, IPR_TRACE_SIZE);
3021         driver_dump->hdr.num_entries++;
3022 }
3023
3024 /**
3025  * ipr_dump_location_data - Fill in the IOA location in the dump.
3026  * @ioa_cfg:    ioa config struct
3027  * @driver_dump:        driver dump struct
3028  *
3029  * Return value:
3030  *      nothing
3031  **/
3032 static void ipr_dump_location_data(struct ipr_ioa_cfg *ioa_cfg,
3033                                    struct ipr_driver_dump *driver_dump)
3034 {
3035         ipr_init_dump_entry_hdr(&driver_dump->location_entry.hdr);
3036         driver_dump->location_entry.hdr.len =
3037                 sizeof(struct ipr_dump_location_entry) -
3038                 sizeof(struct ipr_dump_entry_header);
3039         driver_dump->location_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_ASCII;
3040         driver_dump->location_entry.hdr.id = IPR_DUMP_LOCATION_ID;
3041         strcpy(driver_dump->location_entry.location, dev_name(&ioa_cfg->pdev->dev));
3042         driver_dump->hdr.num_entries++;
3043 }
3044
3045 /**
3046  * ipr_get_ioa_dump - Perform a dump of the driver and adapter.
3047  * @ioa_cfg:    ioa config struct
3048  * @dump:               dump struct
3049  *
3050  * Return value:
3051  *      nothing
3052  **/
3053 static void ipr_get_ioa_dump(struct ipr_ioa_cfg *ioa_cfg, struct ipr_dump *dump)
3054 {
3055         unsigned long start_addr, sdt_word;
3056         unsigned long lock_flags = 0;
3057         struct ipr_driver_dump *driver_dump = &dump->driver_dump;
3058         struct ipr_ioa_dump *ioa_dump = &dump->ioa_dump;
3059         u32 num_entries, max_num_entries, start_off, end_off;
3060         u32 max_dump_size, bytes_to_copy, bytes_copied, rc;
3061         struct ipr_sdt *sdt;
3062         int valid = 1;
3063         int i;
3064
3065         ENTER;
3066
3067         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3068
3069         if (ioa_cfg->sdt_state != READ_DUMP) {
3070                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3071                 return;
3072         }
3073
3074         if (ioa_cfg->sis64) {
3075                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3076                 ssleep(IPR_DUMP_DELAY_SECONDS);
3077                 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3078         }
3079
3080         start_addr = readl(ioa_cfg->ioa_mailbox);
3081
3082         if (!ioa_cfg->sis64 && !ipr_sdt_is_fmt2(start_addr)) {
3083                 dev_err(&ioa_cfg->pdev->dev,
3084                         "Invalid dump table format: %lx\n", start_addr);
3085                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3086                 return;
3087         }
3088
3089         dev_err(&ioa_cfg->pdev->dev, "Dump of IOA initiated\n");
3090
3091         driver_dump->hdr.eye_catcher = IPR_DUMP_EYE_CATCHER;
3092
3093         /* Initialize the overall dump header */
3094         driver_dump->hdr.len = sizeof(struct ipr_driver_dump);
3095         driver_dump->hdr.num_entries = 1;
3096         driver_dump->hdr.first_entry_offset = sizeof(struct ipr_dump_header);
3097         driver_dump->hdr.status = IPR_DUMP_STATUS_SUCCESS;
3098         driver_dump->hdr.os = IPR_DUMP_OS_LINUX;
3099         driver_dump->hdr.driver_name = IPR_DUMP_DRIVER_NAME;
3100
3101         ipr_dump_version_data(ioa_cfg, driver_dump);
3102         ipr_dump_location_data(ioa_cfg, driver_dump);
3103         ipr_dump_ioa_type_data(ioa_cfg, driver_dump);
3104         ipr_dump_trace_data(ioa_cfg, driver_dump);
3105
3106         /* Update dump_header */
3107         driver_dump->hdr.len += sizeof(struct ipr_dump_entry_header);
3108
3109         /* IOA Dump entry */
3110         ipr_init_dump_entry_hdr(&ioa_dump->hdr);
3111         ioa_dump->hdr.len = 0;
3112         ioa_dump->hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY;
3113         ioa_dump->hdr.id = IPR_DUMP_IOA_DUMP_ID;
3114
3115         /* First entries in sdt are actually a list of dump addresses and
3116          lengths to gather the real dump data.  sdt represents the pointer
3117          to the ioa generated dump table.  Dump data will be extracted based
3118          on entries in this table */
3119         sdt = &ioa_dump->sdt;
3120
3121         if (ioa_cfg->sis64) {
3122                 max_num_entries = IPR_FMT3_NUM_SDT_ENTRIES;
3123                 max_dump_size = IPR_FMT3_MAX_IOA_DUMP_SIZE;
3124         } else {
3125                 max_num_entries = IPR_FMT2_NUM_SDT_ENTRIES;
3126                 max_dump_size = IPR_FMT2_MAX_IOA_DUMP_SIZE;
3127         }
3128
3129         bytes_to_copy = offsetof(struct ipr_sdt, entry) +
3130                         (max_num_entries * sizeof(struct ipr_sdt_entry));
3131         rc = ipr_get_ldump_data_section(ioa_cfg, start_addr, (__be32 *)sdt,
3132                                         bytes_to_copy / sizeof(__be32));
3133
3134         /* Smart Dump table is ready to use and the first entry is valid */
3135         if (rc || ((be32_to_cpu(sdt->hdr.state) != IPR_FMT3_SDT_READY_TO_USE) &&
3136             (be32_to_cpu(sdt->hdr.state) != IPR_FMT2_SDT_READY_TO_USE))) {
3137                 dev_err(&ioa_cfg->pdev->dev,
3138                         "Dump of IOA failed. Dump table not valid: %d, %X.\n",
3139                         rc, be32_to_cpu(sdt->hdr.state));
3140                 driver_dump->hdr.status = IPR_DUMP_STATUS_FAILED;
3141                 ioa_cfg->sdt_state = DUMP_OBTAINED;
3142                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3143                 return;
3144         }
3145
3146         num_entries = be32_to_cpu(sdt->hdr.num_entries_used);
3147
3148         if (num_entries > max_num_entries)
3149                 num_entries = max_num_entries;
3150
3151         /* Update dump length to the actual data to be copied */
3152         dump->driver_dump.hdr.len += sizeof(struct ipr_sdt_header);
3153         if (ioa_cfg->sis64)
3154                 dump->driver_dump.hdr.len += num_entries * sizeof(struct ipr_sdt_entry);
3155         else
3156                 dump->driver_dump.hdr.len += max_num_entries * sizeof(struct ipr_sdt_entry);
3157
3158         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3159
3160         for (i = 0; i < num_entries; i++) {
3161                 if (ioa_dump->hdr.len > max_dump_size) {
3162                         driver_dump->hdr.status = IPR_DUMP_STATUS_QUAL_SUCCESS;
3163                         break;
3164                 }
3165
3166                 if (sdt->entry[i].flags & IPR_SDT_VALID_ENTRY) {
3167                         sdt_word = be32_to_cpu(sdt->entry[i].start_token);
3168                         if (ioa_cfg->sis64)
3169                                 bytes_to_copy = be32_to_cpu(sdt->entry[i].end_token);
3170                         else {
3171                                 start_off = sdt_word & IPR_FMT2_MBX_ADDR_MASK;
3172                                 end_off = be32_to_cpu(sdt->entry[i].end_token);
3173
3174                                 if (ipr_sdt_is_fmt2(sdt_word) && sdt_word)
3175                                         bytes_to_copy = end_off - start_off;
3176                                 else
3177                                         valid = 0;
3178                         }
3179                         if (valid) {
3180                                 if (bytes_to_copy > max_dump_size) {
3181                                         sdt->entry[i].flags &= ~IPR_SDT_VALID_ENTRY;
3182                                         continue;
3183                                 }
3184
3185                                 /* Copy data from adapter to driver buffers */
3186                                 bytes_copied = ipr_sdt_copy(ioa_cfg, sdt_word,
3187                                                             bytes_to_copy);
3188
3189                                 ioa_dump->hdr.len += bytes_copied;
3190
3191                                 if (bytes_copied != bytes_to_copy) {
3192                                         driver_dump->hdr.status = IPR_DUMP_STATUS_QUAL_SUCCESS;
3193                                         break;
3194                                 }
3195                         }
3196                 }
3197         }
3198
3199         dev_err(&ioa_cfg->pdev->dev, "Dump of IOA completed.\n");
3200
3201         /* Update dump_header */
3202         driver_dump->hdr.len += ioa_dump->hdr.len;
3203         wmb();
3204         ioa_cfg->sdt_state = DUMP_OBTAINED;
3205         LEAVE;
3206 }
3207
3208 #else
3209 #define ipr_get_ioa_dump(ioa_cfg, dump) do { } while (0)
3210 #endif
3211
3212 /**
3213  * ipr_release_dump - Free adapter dump memory
3214  * @kref:       kref struct
3215  *
3216  * Return value:
3217  *      nothing
3218  **/
3219 static void ipr_release_dump(struct kref *kref)
3220 {
3221         struct ipr_dump *dump = container_of(kref, struct ipr_dump, kref);
3222         struct ipr_ioa_cfg *ioa_cfg = dump->ioa_cfg;
3223         unsigned long lock_flags = 0;
3224         int i;
3225
3226         ENTER;
3227         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3228         ioa_cfg->dump = NULL;
3229         ioa_cfg->sdt_state = INACTIVE;
3230         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3231
3232         for (i = 0; i < dump->ioa_dump.next_page_index; i++)
3233                 free_page((unsigned long) dump->ioa_dump.ioa_data[i]);
3234
3235         vfree(dump->ioa_dump.ioa_data);
3236         kfree(dump);
3237         LEAVE;
3238 }
3239
3240 /**
3241  * ipr_worker_thread - Worker thread
3242  * @work:               ioa config struct
3243  *
3244  * Called at task level from a work thread. This function takes care
3245  * of adding and removing device from the mid-layer as configuration
3246  * changes are detected by the adapter.
3247  *
3248  * Return value:
3249  *      nothing
3250  **/
3251 static void ipr_worker_thread(struct work_struct *work)
3252 {
3253         unsigned long lock_flags;
3254         struct ipr_resource_entry *res;
3255         struct scsi_device *sdev;
3256         struct ipr_dump *dump;
3257         struct ipr_ioa_cfg *ioa_cfg =
3258                 container_of(work, struct ipr_ioa_cfg, work_q);
3259         u8 bus, target, lun;
3260         int did_work;
3261
3262         ENTER;
3263         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3264
3265         if (ioa_cfg->sdt_state == READ_DUMP) {
3266                 dump = ioa_cfg->dump;
3267                 if (!dump) {
3268                         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3269                         return;
3270                 }
3271                 kref_get(&dump->kref);
3272                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3273                 ipr_get_ioa_dump(ioa_cfg, dump);
3274                 kref_put(&dump->kref, ipr_release_dump);
3275
3276                 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3277                 if (ioa_cfg->sdt_state == DUMP_OBTAINED && !ioa_cfg->dump_timeout)
3278                         ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
3279                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3280                 return;
3281         }
3282
3283 restart:
3284         do {
3285                 did_work = 0;
3286                 if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds) {
3287                         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3288                         return;
3289                 }
3290
3291                 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
3292                         if (res->del_from_ml && res->sdev) {
3293                                 did_work = 1;
3294                                 sdev = res->sdev;
3295                                 if (!scsi_device_get(sdev)) {
3296                                         if (!res->add_to_ml)
3297                                                 list_move_tail(&res->queue, &ioa_cfg->free_res_q);
3298                                         else
3299                                                 res->del_from_ml = 0;
3300                                         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3301                                         scsi_remove_device(sdev);
3302                                         scsi_device_put(sdev);
3303                                         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3304                                 }
3305                                 break;
3306                         }
3307                 }
3308         } while (did_work);
3309
3310         list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
3311                 if (res->add_to_ml) {
3312                         bus = res->bus;
3313                         target = res->target;
3314                         lun = res->lun;
3315                         res->add_to_ml = 0;
3316                         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3317                         scsi_add_device(ioa_cfg->host, bus, target, lun);
3318                         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3319                         goto restart;
3320                 }
3321         }
3322
3323         ioa_cfg->scan_done = 1;
3324         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3325         kobject_uevent(&ioa_cfg->host->shost_dev.kobj, KOBJ_CHANGE);
3326         LEAVE;
3327 }
3328
3329 #ifdef CONFIG_SCSI_IPR_TRACE
3330 /**
3331  * ipr_read_trace - Dump the adapter trace
3332  * @filp:               open sysfs file
3333  * @kobj:               kobject struct
3334  * @bin_attr:           bin_attribute struct
3335  * @buf:                buffer
3336  * @off:                offset
3337  * @count:              buffer size
3338  *
3339  * Return value:
3340  *      number of bytes printed to buffer
3341  **/
3342 static ssize_t ipr_read_trace(struct file *filp, struct kobject *kobj,
3343                               struct bin_attribute *bin_attr,
3344                               char *buf, loff_t off, size_t count)
3345 {
3346         struct device *dev = container_of(kobj, struct device, kobj);
3347         struct Scsi_Host *shost = class_to_shost(dev);
3348         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3349         unsigned long lock_flags = 0;
3350         ssize_t ret;
3351
3352         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3353         ret = memory_read_from_buffer(buf, count, &off, ioa_cfg->trace,
3354                                 IPR_TRACE_SIZE);
3355         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3356
3357         return ret;
3358 }
3359
3360 static struct bin_attribute ipr_trace_attr = {
3361         .attr = {
3362                 .name = "trace",
3363                 .mode = S_IRUGO,
3364         },
3365         .size = 0,
3366         .read = ipr_read_trace,
3367 };
3368 #endif
3369
3370 /**
3371  * ipr_show_fw_version - Show the firmware version
3372  * @dev:        class device struct
3373  * @buf:        buffer
3374  *
3375  * Return value:
3376  *      number of bytes printed to buffer
3377  **/
3378 static ssize_t ipr_show_fw_version(struct device *dev,
3379                                    struct device_attribute *attr, char *buf)
3380 {
3381         struct Scsi_Host *shost = class_to_shost(dev);
3382         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3383         struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
3384         unsigned long lock_flags = 0;
3385         int len;
3386
3387         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3388         len = snprintf(buf, PAGE_SIZE, "%02X%02X%02X%02X\n",
3389                        ucode_vpd->major_release, ucode_vpd->card_type,
3390                        ucode_vpd->minor_release[0],
3391                        ucode_vpd->minor_release[1]);
3392         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3393         return len;
3394 }
3395
3396 static struct device_attribute ipr_fw_version_attr = {
3397         .attr = {
3398                 .name =         "fw_version",
3399                 .mode =         S_IRUGO,
3400         },
3401         .show = ipr_show_fw_version,
3402 };
3403
3404 /**
3405  * ipr_show_log_level - Show the adapter's error logging level
3406  * @dev:        class device struct
3407  * @buf:        buffer
3408  *
3409  * Return value:
3410  *      number of bytes printed to buffer
3411  **/
3412 static ssize_t ipr_show_log_level(struct device *dev,
3413                                    struct device_attribute *attr, char *buf)
3414 {
3415         struct Scsi_Host *shost = class_to_shost(dev);
3416         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3417         unsigned long lock_flags = 0;
3418         int len;
3419
3420         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3421         len = snprintf(buf, PAGE_SIZE, "%d\n", ioa_cfg->log_level);
3422         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3423         return len;
3424 }
3425
3426 /**
3427  * ipr_store_log_level - Change the adapter's error logging level
3428  * @dev:        class device struct
3429  * @buf:        buffer
3430  *
3431  * Return value:
3432  *      number of bytes printed to buffer
3433  **/
3434 static ssize_t ipr_store_log_level(struct device *dev,
3435                                    struct device_attribute *attr,
3436                                    const char *buf, size_t count)
3437 {
3438         struct Scsi_Host *shost = class_to_shost(dev);
3439         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3440         unsigned long lock_flags = 0;
3441
3442         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3443         ioa_cfg->log_level = simple_strtoul(buf, NULL, 10);
3444         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3445         return strlen(buf);
3446 }
3447
3448 static struct device_attribute ipr_log_level_attr = {
3449         .attr = {
3450                 .name =         "log_level",
3451                 .mode =         S_IRUGO | S_IWUSR,
3452         },
3453         .show = ipr_show_log_level,
3454         .store = ipr_store_log_level
3455 };
3456
3457 /**
3458  * ipr_store_diagnostics - IOA Diagnostics interface
3459  * @dev:        device struct
3460  * @buf:        buffer
3461  * @count:      buffer size
3462  *
3463  * This function will reset the adapter and wait a reasonable
3464  * amount of time for any errors that the adapter might log.
3465  *
3466  * Return value:
3467  *      count on success / other on failure
3468  **/
3469 static ssize_t ipr_store_diagnostics(struct device *dev,
3470                                      struct device_attribute *attr,
3471                                      const char *buf, size_t count)
3472 {
3473         struct Scsi_Host *shost = class_to_shost(dev);
3474         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3475         unsigned long lock_flags = 0;
3476         int rc = count;
3477
3478         if (!capable(CAP_SYS_ADMIN))
3479                 return -EACCES;
3480
3481         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3482         while (ioa_cfg->in_reset_reload) {
3483                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3484                 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3485                 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3486         }
3487
3488         ioa_cfg->errors_logged = 0;
3489         ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
3490
3491         if (ioa_cfg->in_reset_reload) {
3492                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3493                 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3494
3495                 /* Wait for a second for any errors to be logged */
3496                 msleep(1000);
3497         } else {
3498                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3499                 return -EIO;
3500         }
3501
3502         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3503         if (ioa_cfg->in_reset_reload || ioa_cfg->errors_logged)
3504                 rc = -EIO;
3505         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3506
3507         return rc;
3508 }
3509
3510 static struct device_attribute ipr_diagnostics_attr = {
3511         .attr = {
3512                 .name =         "run_diagnostics",
3513                 .mode =         S_IWUSR,
3514         },
3515         .store = ipr_store_diagnostics
3516 };
3517
3518 /**
3519  * ipr_show_adapter_state - Show the adapter's state
3520  * @class_dev:  device struct
3521  * @buf:        buffer
3522  *
3523  * Return value:
3524  *      number of bytes printed to buffer
3525  **/
3526 static ssize_t ipr_show_adapter_state(struct device *dev,
3527                                       struct device_attribute *attr, char *buf)
3528 {
3529         struct Scsi_Host *shost = class_to_shost(dev);
3530         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3531         unsigned long lock_flags = 0;
3532         int len;
3533
3534         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3535         if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead)
3536                 len = snprintf(buf, PAGE_SIZE, "offline\n");
3537         else
3538                 len = snprintf(buf, PAGE_SIZE, "online\n");
3539         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3540         return len;
3541 }
3542
3543 /**
3544  * ipr_store_adapter_state - Change adapter state
3545  * @dev:        device struct
3546  * @buf:        buffer
3547  * @count:      buffer size
3548  *
3549  * This function will change the adapter's state.
3550  *
3551  * Return value:
3552  *      count on success / other on failure
3553  **/
3554 static ssize_t ipr_store_adapter_state(struct device *dev,
3555                                        struct device_attribute *attr,
3556                                        const char *buf, size_t count)
3557 {
3558         struct Scsi_Host *shost = class_to_shost(dev);
3559         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3560         unsigned long lock_flags;
3561         int result = count, i;
3562
3563         if (!capable(CAP_SYS_ADMIN))
3564                 return -EACCES;
3565
3566         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3567         if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead &&
3568             !strncmp(buf, "online", 6)) {
3569                 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
3570                         spin_lock(&ioa_cfg->hrrq[i]._lock);
3571                         ioa_cfg->hrrq[i].ioa_is_dead = 0;
3572                         spin_unlock(&ioa_cfg->hrrq[i]._lock);
3573                 }
3574                 wmb();
3575                 ioa_cfg->reset_retries = 0;
3576                 ioa_cfg->in_ioa_bringdown = 0;
3577                 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
3578         }
3579         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3580         wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3581
3582         return result;
3583 }
3584
3585 static struct device_attribute ipr_ioa_state_attr = {
3586         .attr = {
3587                 .name =         "online_state",
3588                 .mode =         S_IRUGO | S_IWUSR,
3589         },
3590         .show = ipr_show_adapter_state,
3591         .store = ipr_store_adapter_state
3592 };
3593
3594 /**
3595  * ipr_store_reset_adapter - Reset the adapter
3596  * @dev:        device struct
3597  * @buf:        buffer
3598  * @count:      buffer size
3599  *
3600  * This function will reset the adapter.
3601  *
3602  * Return value:
3603  *      count on success / other on failure
3604  **/
3605 static ssize_t ipr_store_reset_adapter(struct device *dev,
3606                                        struct device_attribute *attr,
3607                                        const char *buf, size_t count)
3608 {
3609         struct Scsi_Host *shost = class_to_shost(dev);
3610         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3611         unsigned long lock_flags;
3612         int result = count;
3613
3614         if (!capable(CAP_SYS_ADMIN))
3615                 return -EACCES;
3616
3617         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3618         if (!ioa_cfg->in_reset_reload)
3619                 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
3620         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3621         wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3622
3623         return result;
3624 }
3625
3626 static struct device_attribute ipr_ioa_reset_attr = {
3627         .attr = {
3628                 .name =         "reset_host",
3629                 .mode =         S_IWUSR,
3630         },
3631         .store = ipr_store_reset_adapter
3632 };
3633
3634 static int ipr_iopoll(struct blk_iopoll *iop, int budget);
3635  /**
3636  * ipr_show_iopoll_weight - Show ipr polling mode
3637  * @dev:        class device struct
3638  * @buf:        buffer
3639  *
3640  * Return value:
3641  *      number of bytes printed to buffer
3642  **/
3643 static ssize_t ipr_show_iopoll_weight(struct device *dev,
3644                                    struct device_attribute *attr, char *buf)
3645 {
3646         struct Scsi_Host *shost = class_to_shost(dev);
3647         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3648         unsigned long lock_flags = 0;
3649         int len;
3650
3651         spin_lock_irqsave(shost->host_lock, lock_flags);
3652         len = snprintf(buf, PAGE_SIZE, "%d\n", ioa_cfg->iopoll_weight);
3653         spin_unlock_irqrestore(shost->host_lock, lock_flags);
3654
3655         return len;
3656 }
3657
3658 /**
3659  * ipr_store_iopoll_weight - Change the adapter's polling mode
3660  * @dev:        class device struct
3661  * @buf:        buffer
3662  *
3663  * Return value:
3664  *      number of bytes printed to buffer
3665  **/
3666 static ssize_t ipr_store_iopoll_weight(struct device *dev,
3667                                         struct device_attribute *attr,
3668                                         const char *buf, size_t count)
3669 {
3670         struct Scsi_Host *shost = class_to_shost(dev);
3671         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3672         unsigned long user_iopoll_weight;
3673         unsigned long lock_flags = 0;
3674         int i;
3675
3676         if (!ioa_cfg->sis64) {
3677                 dev_info(&ioa_cfg->pdev->dev, "blk-iopoll not supported on this adapter\n");
3678                 return -EINVAL;
3679         }
3680         if (kstrtoul(buf, 10, &user_iopoll_weight))
3681                 return -EINVAL;
3682
3683         if (user_iopoll_weight > 256) {
3684                 dev_info(&ioa_cfg->pdev->dev, "Invalid blk-iopoll weight. It must be less than 256\n");
3685                 return -EINVAL;
3686         }
3687
3688         if (user_iopoll_weight == ioa_cfg->iopoll_weight) {
3689                 dev_info(&ioa_cfg->pdev->dev, "Current blk-iopoll weight has the same weight\n");
3690                 return strlen(buf);
3691         }
3692
3693         if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
3694                 for (i = 1; i < ioa_cfg->hrrq_num; i++)
3695                         blk_iopoll_disable(&ioa_cfg->hrrq[i].iopoll);
3696         }
3697
3698         spin_lock_irqsave(shost->host_lock, lock_flags);
3699         ioa_cfg->iopoll_weight = user_iopoll_weight;
3700         if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
3701                 for (i = 1; i < ioa_cfg->hrrq_num; i++) {
3702                         blk_iopoll_init(&ioa_cfg->hrrq[i].iopoll,
3703                                         ioa_cfg->iopoll_weight, ipr_iopoll);
3704                         blk_iopoll_enable(&ioa_cfg->hrrq[i].iopoll);
3705                 }
3706         }
3707         spin_unlock_irqrestore(shost->host_lock, lock_flags);
3708
3709         return strlen(buf);
3710 }
3711
3712 static struct device_attribute ipr_iopoll_weight_attr = {
3713         .attr = {
3714                 .name =         "iopoll_weight",
3715                 .mode =         S_IRUGO | S_IWUSR,
3716         },
3717         .show = ipr_show_iopoll_weight,
3718         .store = ipr_store_iopoll_weight
3719 };
3720
3721 /**
3722  * ipr_alloc_ucode_buffer - Allocates a microcode download buffer
3723  * @buf_len:            buffer length
3724  *
3725  * Allocates a DMA'able buffer in chunks and assembles a scatter/gather
3726  * list to use for microcode download
3727  *
3728  * Return value:
3729  *      pointer to sglist / NULL on failure
3730  **/
3731 static struct ipr_sglist *ipr_alloc_ucode_buffer(int buf_len)
3732 {
3733         int sg_size, order, bsize_elem, num_elem, i, j;
3734         struct ipr_sglist *sglist;
3735         struct scatterlist *scatterlist;
3736         struct page *page;
3737
3738         /* Get the minimum size per scatter/gather element */
3739         sg_size = buf_len / (IPR_MAX_SGLIST - 1);
3740
3741         /* Get the actual size per element */
3742         order = get_order(sg_size);
3743
3744         /* Determine the actual number of bytes per element */
3745         bsize_elem = PAGE_SIZE * (1 << order);
3746
3747         /* Determine the actual number of sg entries needed */
3748         if (buf_len % bsize_elem)
3749                 num_elem = (buf_len / bsize_elem) + 1;
3750         else
3751                 num_elem = buf_len / bsize_elem;
3752
3753         /* Allocate a scatter/gather list for the DMA */
3754         sglist = kzalloc(sizeof(struct ipr_sglist) +
3755                          (sizeof(struct scatterlist) * (num_elem - 1)),
3756                          GFP_KERNEL);
3757
3758         if (sglist == NULL) {
3759                 ipr_trace;
3760                 return NULL;
3761         }
3762
3763         scatterlist = sglist->scatterlist;
3764         sg_init_table(scatterlist, num_elem);
3765
3766         sglist->order = order;
3767         sglist->num_sg = num_elem;
3768
3769         /* Allocate a bunch of sg elements */
3770         for (i = 0; i < num_elem; i++) {
3771                 page = alloc_pages(GFP_KERNEL, order);
3772                 if (!page) {
3773                         ipr_trace;
3774
3775                         /* Free up what we already allocated */
3776                         for (j = i - 1; j >= 0; j--)
3777                                 __free_pages(sg_page(&scatterlist[j]), order);
3778                         kfree(sglist);
3779                         return NULL;
3780                 }
3781
3782                 sg_set_page(&scatterlist[i], page, 0, 0);
3783         }
3784
3785         return sglist;
3786 }
3787
3788 /**
3789  * ipr_free_ucode_buffer - Frees a microcode download buffer
3790  * @p_dnld:             scatter/gather list pointer
3791  *
3792  * Free a DMA'able ucode download buffer previously allocated with
3793  * ipr_alloc_ucode_buffer
3794  *
3795  * Return value:
3796  *      nothing
3797  **/
3798 static void ipr_free_ucode_buffer(struct ipr_sglist *sglist)
3799 {
3800         int i;
3801
3802         for (i = 0; i < sglist->num_sg; i++)
3803                 __free_pages(sg_page(&sglist->scatterlist[i]), sglist->order);
3804
3805         kfree(sglist);
3806 }
3807
3808 /**
3809  * ipr_copy_ucode_buffer - Copy user buffer to kernel buffer
3810  * @sglist:             scatter/gather list pointer
3811  * @buffer:             buffer pointer
3812  * @len:                buffer length
3813  *
3814  * Copy a microcode image from a user buffer into a buffer allocated by
3815  * ipr_alloc_ucode_buffer
3816  *
3817  * Return value:
3818  *      0 on success / other on failure
3819  **/
3820 static int ipr_copy_ucode_buffer(struct ipr_sglist *sglist,
3821                                  u8 *buffer, u32 len)
3822 {
3823         int bsize_elem, i, result = 0;
3824         struct scatterlist *scatterlist;
3825         void *kaddr;
3826
3827         /* Determine the actual number of bytes per element */
3828         bsize_elem = PAGE_SIZE * (1 << sglist->order);
3829
3830         scatterlist = sglist->scatterlist;
3831
3832         for (i = 0; i < (len / bsize_elem); i++, buffer += bsize_elem) {
3833                 struct page *page = sg_page(&scatterlist[i]);
3834
3835                 kaddr = kmap(page);
3836                 memcpy(kaddr, buffer, bsize_elem);
3837                 kunmap(page);
3838
3839                 scatterlist[i].length = bsize_elem;
3840
3841                 if (result != 0) {
3842                         ipr_trace;
3843                         return result;
3844                 }
3845         }
3846
3847         if (len % bsize_elem) {
3848                 struct page *page = sg_page(&scatterlist[i]);
3849
3850                 kaddr = kmap(page);
3851                 memcpy(kaddr, buffer, len % bsize_elem);
3852                 kunmap(page);
3853
3854                 scatterlist[i].length = len % bsize_elem;
3855         }
3856
3857         sglist->buffer_len = len;
3858         return result;
3859 }
3860
3861 /**
3862  * ipr_build_ucode_ioadl64 - Build a microcode download IOADL
3863  * @ipr_cmd:            ipr command struct
3864  * @sglist:             scatter/gather list
3865  *
3866  * Builds a microcode download IOA data list (IOADL).
3867  *
3868  **/
3869 static void ipr_build_ucode_ioadl64(struct ipr_cmnd *ipr_cmd,
3870                                     struct ipr_sglist *sglist)
3871 {
3872         struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
3873         struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ioadl64;
3874         struct scatterlist *scatterlist = sglist->scatterlist;
3875         int i;
3876
3877         ipr_cmd->dma_use_sg = sglist->num_dma_sg;
3878         ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
3879         ioarcb->data_transfer_length = cpu_to_be32(sglist->buffer_len);
3880
3881         ioarcb->ioadl_len =
3882                 cpu_to_be32(sizeof(struct ipr_ioadl64_desc) * ipr_cmd->dma_use_sg);
3883         for (i = 0; i < ipr_cmd->dma_use_sg; i++) {
3884                 ioadl64[i].flags = cpu_to_be32(IPR_IOADL_FLAGS_WRITE);
3885                 ioadl64[i].data_len = cpu_to_be32(sg_dma_len(&scatterlist[i]));
3886                 ioadl64[i].address = cpu_to_be64(sg_dma_address(&scatterlist[i]));
3887         }
3888
3889         ioadl64[i-1].flags |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
3890 }
3891
3892 /**
3893  * ipr_build_ucode_ioadl - Build a microcode download IOADL
3894  * @ipr_cmd:    ipr command struct
3895  * @sglist:             scatter/gather list
3896  *
3897  * Builds a microcode download IOA data list (IOADL).
3898  *
3899  **/
3900 static void ipr_build_ucode_ioadl(struct ipr_cmnd *ipr_cmd,
3901                                   struct ipr_sglist *sglist)
3902 {
3903         struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
3904         struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl;
3905         struct scatterlist *scatterlist = sglist->scatterlist;
3906         int i;
3907
3908         ipr_cmd->dma_use_sg = sglist->num_dma_sg;
3909         ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
3910         ioarcb->data_transfer_length = cpu_to_be32(sglist->buffer_len);
3911
3912         ioarcb->ioadl_len =
3913                 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
3914
3915         for (i = 0; i < ipr_cmd->dma_use_sg; i++) {
3916                 ioadl[i].flags_and_data_len =
3917                         cpu_to_be32(IPR_IOADL_FLAGS_WRITE | sg_dma_len(&scatterlist[i]));
3918                 ioadl[i].address =
3919                         cpu_to_be32(sg_dma_address(&scatterlist[i]));
3920         }
3921
3922         ioadl[i-1].flags_and_data_len |=
3923                 cpu_to_be32(IPR_IOADL_FLAGS_LAST);
3924 }
3925
3926 /**
3927  * ipr_update_ioa_ucode - Update IOA's microcode
3928  * @ioa_cfg:    ioa config struct
3929  * @sglist:             scatter/gather list
3930  *
3931  * Initiate an adapter reset to update the IOA's microcode
3932  *
3933  * Return value:
3934  *      0 on success / -EIO on failure
3935  **/
3936 static int ipr_update_ioa_ucode(struct ipr_ioa_cfg *ioa_cfg,
3937                                 struct ipr_sglist *sglist)
3938 {
3939         unsigned long lock_flags;
3940
3941         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3942         while (ioa_cfg->in_reset_reload) {
3943                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3944                 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3945                 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3946         }
3947
3948         if (ioa_cfg->ucode_sglist) {
3949                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3950                 dev_err(&ioa_cfg->pdev->dev,
3951                         "Microcode download already in progress\n");
3952                 return -EIO;
3953         }
3954
3955         sglist->num_dma_sg = dma_map_sg(&ioa_cfg->pdev->dev,
3956                                         sglist->scatterlist, sglist->num_sg,
3957                                         DMA_TO_DEVICE);
3958
3959         if (!sglist->num_dma_sg) {
3960                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3961                 dev_err(&ioa_cfg->pdev->dev,
3962                         "Failed to map microcode download buffer!\n");
3963                 return -EIO;
3964         }
3965
3966         ioa_cfg->ucode_sglist = sglist;
3967         ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
3968         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3969         wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3970
3971         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3972         ioa_cfg->ucode_sglist = NULL;
3973         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3974         return 0;
3975 }
3976
3977 /**
3978  * ipr_store_update_fw - Update the firmware on the adapter
3979  * @class_dev:  device struct
3980  * @buf:        buffer
3981  * @count:      buffer size
3982  *
3983  * This function will update the firmware on the adapter.
3984  *
3985  * Return value:
3986  *      count on success / other on failure
3987  **/
3988 static ssize_t ipr_store_update_fw(struct device *dev,
3989                                    struct device_attribute *attr,
3990                                    const char *buf, size_t count)
3991 {
3992         struct Scsi_Host *shost = class_to_shost(dev);
3993         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3994         struct ipr_ucode_image_header *image_hdr;
3995         const struct firmware *fw_entry;
3996         struct ipr_sglist *sglist;
3997         char fname[100];
3998         char *src;
3999         int len, result, dnld_size;
4000
4001         if (!capable(CAP_SYS_ADMIN))
4002                 return -EACCES;
4003
4004         len = snprintf(fname, 99, "%s", buf);
4005         fname[len-1] = '\0';
4006
4007         if (request_firmware(&fw_entry, fname, &ioa_cfg->pdev->dev)) {
4008                 dev_err(&ioa_cfg->pdev->dev, "Firmware file %s not found\n", fname);
4009                 return -EIO;
4010         }
4011
4012         image_hdr = (struct ipr_ucode_image_header *)fw_entry->data;
4013
4014         src = (u8 *)image_hdr + be32_to_cpu(image_hdr->header_length);
4015         dnld_size = fw_entry->size - be32_to_cpu(image_hdr->header_length);
4016         sglist = ipr_alloc_ucode_buffer(dnld_size);
4017
4018         if (!sglist) {
4019                 dev_err(&ioa_cfg->pdev->dev, "Microcode buffer allocation failed\n");
4020                 release_firmware(fw_entry);
4021                 return -ENOMEM;
4022         }
4023
4024         result = ipr_copy_ucode_buffer(sglist, src, dnld_size);
4025
4026         if (result) {
4027                 dev_err(&ioa_cfg->pdev->dev,
4028                         "Microcode buffer copy to DMA buffer failed\n");
4029                 goto out;
4030         }
4031
4032         ipr_info("Updating microcode, please be patient.  This may take up to 30 minutes.\n");
4033
4034         result = ipr_update_ioa_ucode(ioa_cfg, sglist);
4035
4036         if (!result)
4037                 result = count;
4038 out:
4039         ipr_free_ucode_buffer(sglist);
4040         release_firmware(fw_entry);
4041         return result;
4042 }
4043
4044 static struct device_attribute ipr_update_fw_attr = {
4045         .attr = {
4046                 .name =         "update_fw",
4047                 .mode =         S_IWUSR,
4048         },
4049         .store = ipr_store_update_fw
4050 };
4051
4052 /**
4053  * ipr_show_fw_type - Show the adapter's firmware type.
4054  * @dev:        class device struct
4055  * @buf:        buffer
4056  *
4057  * Return value:
4058  *      number of bytes printed to buffer
4059  **/
4060 static ssize_t ipr_show_fw_type(struct device *dev,
4061                                 struct device_attribute *attr, char *buf)
4062 {
4063         struct Scsi_Host *shost = class_to_shost(dev);
4064         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
4065         unsigned long lock_flags = 0;
4066         int len;
4067
4068         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4069         len = snprintf(buf, PAGE_SIZE, "%d\n", ioa_cfg->sis64);
4070         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4071         return len;
4072 }
4073
4074 static struct device_attribute ipr_ioa_fw_type_attr = {
4075         .attr = {
4076                 .name =         "fw_type",
4077                 .mode =         S_IRUGO,
4078         },
4079         .show = ipr_show_fw_type
4080 };
4081
4082 static struct device_attribute *ipr_ioa_attrs[] = {
4083         &ipr_fw_version_attr,
4084         &ipr_log_level_attr,
4085         &ipr_diagnostics_attr,
4086         &ipr_ioa_state_attr,
4087         &ipr_ioa_reset_attr,
4088         &ipr_update_fw_attr,
4089         &ipr_ioa_fw_type_attr,
4090         &ipr_iopoll_weight_attr,
4091         NULL,
4092 };
4093
4094 #ifdef CONFIG_SCSI_IPR_DUMP
4095 /**
4096  * ipr_read_dump - Dump the adapter
4097  * @filp:               open sysfs file
4098  * @kobj:               kobject struct
4099  * @bin_attr:           bin_attribute struct
4100  * @buf:                buffer
4101  * @off:                offset
4102  * @count:              buffer size
4103  *
4104  * Return value:
4105  *      number of bytes printed to buffer
4106  **/
4107 static ssize_t ipr_read_dump(struct file *filp, struct kobject *kobj,
4108                              struct bin_attribute *bin_attr,
4109                              char *buf, loff_t off, size_t count)
4110 {
4111         struct device *cdev = container_of(kobj, struct device, kobj);
4112         struct Scsi_Host *shost = class_to_shost(cdev);
4113         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
4114         struct ipr_dump *dump;
4115         unsigned long lock_flags = 0;
4116         char *src;
4117         int len, sdt_end;
4118         size_t rc = count;
4119
4120         if (!capable(CAP_SYS_ADMIN))
4121                 return -EACCES;
4122
4123         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4124         dump = ioa_cfg->dump;
4125
4126         if (ioa_cfg->sdt_state != DUMP_OBTAINED || !dump) {
4127                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4128                 return 0;
4129         }
4130         kref_get(&dump->kref);
4131         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4132
4133         if (off > dump->driver_dump.hdr.len) {
4134                 kref_put(&dump->kref, ipr_release_dump);
4135                 return 0;
4136         }
4137
4138         if (off + count > dump->driver_dump.hdr.len) {
4139                 count = dump->driver_dump.hdr.len - off;
4140                 rc = count;
4141         }
4142
4143         if (count && off < sizeof(dump->driver_dump)) {
4144                 if (off + count > sizeof(dump->driver_dump))
4145                         len = sizeof(dump->driver_dump) - off;
4146                 else
4147                         len = count;
4148                 src = (u8 *)&dump->driver_dump + off;
4149                 memcpy(buf, src, len);
4150                 buf += len;
4151                 off += len;
4152                 count -= len;
4153         }
4154
4155         off -= sizeof(dump->driver_dump);
4156
4157         if (ioa_cfg->sis64)
4158                 sdt_end = offsetof(struct ipr_ioa_dump, sdt.entry) +
4159                           (be32_to_cpu(dump->ioa_dump.sdt.hdr.num_entries_used) *
4160                            sizeof(struct ipr_sdt_entry));
4161         else
4162                 sdt_end = offsetof(struct ipr_ioa_dump, sdt.entry) +
4163                           (IPR_FMT2_NUM_SDT_ENTRIES * sizeof(struct ipr_sdt_entry));
4164
4165         if (count && off < sdt_end) {
4166                 if (off + count > sdt_end)
4167                         len = sdt_end - off;
4168                 else
4169                         len = count;
4170                 src = (u8 *)&dump->ioa_dump + off;
4171                 memcpy(buf, src, len);
4172                 buf += len;
4173                 off += len;
4174                 count -= len;
4175         }
4176
4177         off -= sdt_end;
4178
4179         while (count) {
4180                 if ((off & PAGE_MASK) != ((off + count) & PAGE_MASK))
4181                         len = PAGE_ALIGN(off) - off;
4182                 else
4183                         len = count;
4184                 src = (u8 *)dump->ioa_dump.ioa_data[(off & PAGE_MASK) >> PAGE_SHIFT];
4185                 src += off & ~PAGE_MASK;
4186                 memcpy(buf, src, len);
4187                 buf += len;
4188                 off += len;
4189                 count -= len;
4190         }
4191
4192         kref_put(&dump->kref, ipr_release_dump);
4193         return rc;
4194 }
4195
4196 /**
4197  * ipr_alloc_dump - Prepare for adapter dump
4198  * @ioa_cfg:    ioa config struct
4199  *
4200  * Return value:
4201  *      0 on success / other on failure
4202  **/
4203 static int ipr_alloc_dump(struct ipr_ioa_cfg *ioa_cfg)
4204 {
4205         struct ipr_dump *dump;
4206         __be32 **ioa_data;
4207         unsigned long lock_flags = 0;
4208
4209         dump = kzalloc(sizeof(struct ipr_dump), GFP_KERNEL);
4210
4211         if (!dump) {
4212                 ipr_err("Dump memory allocation failed\n");
4213                 return -ENOMEM;
4214         }
4215
4216         if (ioa_cfg->sis64)
4217                 ioa_data = vmalloc(IPR_FMT3_MAX_NUM_DUMP_PAGES * sizeof(__be32 *));
4218         else
4219                 ioa_data = vmalloc(IPR_FMT2_MAX_NUM_DUMP_PAGES * sizeof(__be32 *));
4220
4221         if (!ioa_data) {
4222                 ipr_err("Dump memory allocation failed\n");
4223                 kfree(dump);
4224                 return -ENOMEM;
4225         }
4226
4227         dump->ioa_dump.ioa_data = ioa_data;
4228
4229         kref_init(&dump->kref);
4230         dump->ioa_cfg = ioa_cfg;
4231
4232         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4233
4234         if (INACTIVE != ioa_cfg->sdt_state) {
4235                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4236                 vfree(dump->ioa_dump.ioa_data);
4237                 kfree(dump);
4238                 return 0;
4239         }
4240
4241         ioa_cfg->dump = dump;
4242         ioa_cfg->sdt_state = WAIT_FOR_DUMP;
4243         if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead && !ioa_cfg->dump_taken) {
4244                 ioa_cfg->dump_taken = 1;
4245                 schedule_work(&ioa_cfg->work_q);
4246         }
4247         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4248
4249         return 0;
4250 }
4251
4252 /**
4253  * ipr_free_dump - Free adapter dump memory
4254  * @ioa_cfg:    ioa config struct
4255  *
4256  * Return value:
4257  *      0 on success / other on failure
4258  **/
4259 static int ipr_free_dump(struct ipr_ioa_cfg *ioa_cfg)
4260 {
4261         struct ipr_dump *dump;
4262         unsigned long lock_flags = 0;
4263
4264         ENTER;
4265
4266         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4267         dump = ioa_cfg->dump;
4268         if (!dump) {
4269                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4270                 return 0;
4271         }
4272
4273         ioa_cfg->dump = NULL;
4274         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4275
4276         kref_put(&dump->kref, ipr_release_dump);
4277
4278         LEAVE;
4279         return 0;
4280 }
4281
4282 /**
4283  * ipr_write_dump - Setup dump state of adapter
4284  * @filp:               open sysfs file
4285  * @kobj:               kobject struct
4286  * @bin_attr:           bin_attribute struct
4287  * @buf:                buffer
4288  * @off:                offset
4289  * @count:              buffer size
4290  *
4291  * Return value:
4292  *      number of bytes printed to buffer
4293  **/
4294 static ssize_t ipr_write_dump(struct file *filp, struct kobject *kobj,
4295                               struct bin_attribute *bin_attr,
4296                               char *buf, loff_t off, size_t count)
4297 {
4298         struct device *cdev = container_of(kobj, struct device, kobj);
4299         struct Scsi_Host *shost = class_to_shost(cdev);
4300         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
4301         int rc;
4302
4303         if (!capable(CAP_SYS_ADMIN))
4304                 return -EACCES;
4305
4306         if (buf[0] == '1')
4307                 rc = ipr_alloc_dump(ioa_cfg);
4308         else if (buf[0] == '0')
4309                 rc = ipr_free_dump(ioa_cfg);
4310         else
4311                 return -EINVAL;
4312
4313         if (rc)
4314                 return rc;
4315         else
4316                 return count;
4317 }
4318
4319 static struct bin_attribute ipr_dump_attr = {
4320         .attr = {
4321                 .name = "dump",
4322                 .mode = S_IRUSR | S_IWUSR,
4323         },
4324         .size = 0,
4325         .read = ipr_read_dump,
4326         .write = ipr_write_dump
4327 };
4328 #else
4329 static int ipr_free_dump(struct ipr_ioa_cfg *ioa_cfg) { return 0; };
4330 #endif
4331
4332 /**
4333  * ipr_change_queue_depth - Change the device's queue depth
4334  * @sdev:       scsi device struct
4335  * @qdepth:     depth to set
4336  * @reason:     calling context
4337  *
4338  * Return value:
4339  *      actual depth set
4340  **/
4341 static int ipr_change_queue_depth(struct scsi_device *sdev, int qdepth)
4342 {
4343         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4344         struct ipr_resource_entry *res;
4345         unsigned long lock_flags = 0;
4346
4347         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4348         res = (struct ipr_resource_entry *)sdev->hostdata;
4349
4350         if (res && ipr_is_gata(res) && qdepth > IPR_MAX_CMD_PER_ATA_LUN)
4351                 qdepth = IPR_MAX_CMD_PER_ATA_LUN;
4352         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4353
4354         scsi_change_queue_depth(sdev, qdepth);
4355         return sdev->queue_depth;
4356 }
4357
4358 /**
4359  * ipr_show_adapter_handle - Show the adapter's resource handle for this device
4360  * @dev:        device struct
4361  * @attr:       device attribute structure
4362  * @buf:        buffer
4363  *
4364  * Return value:
4365  *      number of bytes printed to buffer
4366  **/
4367 static ssize_t ipr_show_adapter_handle(struct device *dev, struct device_attribute *attr, char *buf)
4368 {
4369         struct scsi_device *sdev = to_scsi_device(dev);
4370         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4371         struct ipr_resource_entry *res;
4372         unsigned long lock_flags = 0;
4373         ssize_t len = -ENXIO;
4374
4375         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4376         res = (struct ipr_resource_entry *)sdev->hostdata;
4377         if (res)
4378                 len = snprintf(buf, PAGE_SIZE, "%08X\n", res->res_handle);
4379         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4380         return len;
4381 }
4382
4383 static struct device_attribute ipr_adapter_handle_attr = {
4384         .attr = {
4385                 .name =         "adapter_handle",
4386                 .mode =         S_IRUSR,
4387         },
4388         .show = ipr_show_adapter_handle
4389 };
4390
4391 /**
4392  * ipr_show_resource_path - Show the resource path or the resource address for
4393  *                          this device.
4394  * @dev:        device struct
4395  * @attr:       device attribute structure
4396  * @buf:        buffer
4397  *
4398  * Return value:
4399  *      number of bytes printed to buffer
4400  **/
4401 static ssize_t ipr_show_resource_path(struct device *dev, struct device_attribute *attr, char *buf)
4402 {
4403         struct scsi_device *sdev = to_scsi_device(dev);
4404         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4405         struct ipr_resource_entry *res;
4406         unsigned long lock_flags = 0;
4407         ssize_t len = -ENXIO;
4408         char buffer[IPR_MAX_RES_PATH_LENGTH];
4409
4410         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4411         res = (struct ipr_resource_entry *)sdev->hostdata;
4412         if (res && ioa_cfg->sis64)
4413                 len = snprintf(buf, PAGE_SIZE, "%s\n",
4414                                __ipr_format_res_path(res->res_path, buffer,
4415                                                      sizeof(buffer)));
4416         else if (res)
4417                 len = snprintf(buf, PAGE_SIZE, "%d:%d:%d:%d\n", ioa_cfg->host->host_no,
4418                                res->bus, res->target, res->lun);
4419
4420         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4421         return len;
4422 }
4423
4424 static struct device_attribute ipr_resource_path_attr = {
4425         .attr = {
4426                 .name =         "resource_path",
4427                 .mode =         S_IRUGO,
4428         },
4429         .show = ipr_show_resource_path
4430 };
4431
4432 /**
4433  * ipr_show_device_id - Show the device_id for this device.
4434  * @dev:        device struct
4435  * @attr:       device attribute structure
4436  * @buf:        buffer
4437  *
4438  * Return value:
4439  *      number of bytes printed to buffer
4440  **/
4441 static ssize_t ipr_show_device_id(struct device *dev, struct device_attribute *attr, char *buf)
4442 {
4443         struct scsi_device *sdev = to_scsi_device(dev);
4444         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4445         struct ipr_resource_entry *res;
4446         unsigned long lock_flags = 0;
4447         ssize_t len = -ENXIO;
4448
4449         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4450         res = (struct ipr_resource_entry *)sdev->hostdata;
4451         if (res && ioa_cfg->sis64)
4452                 len = snprintf(buf, PAGE_SIZE, "0x%llx\n", res->dev_id);
4453         else if (res)
4454                 len = snprintf(buf, PAGE_SIZE, "0x%llx\n", res->lun_wwn);
4455
4456         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4457         return len;
4458 }
4459
4460 static struct device_attribute ipr_device_id_attr = {
4461         .attr = {
4462                 .name =         "device_id",
4463                 .mode =         S_IRUGO,
4464         },
4465         .show = ipr_show_device_id
4466 };
4467
4468 /**
4469  * ipr_show_resource_type - Show the resource type for this device.
4470  * @dev:        device struct
4471  * @attr:       device attribute structure
4472  * @buf:        buffer
4473  *
4474  * Return value:
4475  *      number of bytes printed to buffer
4476  **/
4477 static ssize_t ipr_show_resource_type(struct device *dev, struct device_attribute *attr, char *buf)
4478 {
4479         struct scsi_device *sdev = to_scsi_device(dev);
4480         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4481         struct ipr_resource_entry *res;
4482         unsigned long lock_flags = 0;
4483         ssize_t len = -ENXIO;
4484
4485         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4486         res = (struct ipr_resource_entry *)sdev->hostdata;
4487
4488         if (res)
4489                 len = snprintf(buf, PAGE_SIZE, "%x\n", res->type);
4490
4491         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4492         return len;
4493 }
4494
4495 static struct device_attribute ipr_resource_type_attr = {
4496         .attr = {
4497                 .name =         "resource_type",
4498                 .mode =         S_IRUGO,
4499         },
4500         .show = ipr_show_resource_type
4501 };
4502
4503 /**
4504  * ipr_show_raw_mode - Show the adapter's raw mode
4505  * @dev:        class device struct
4506  * @buf:        buffer
4507  *
4508  * Return value:
4509  *      number of bytes printed to buffer
4510  **/
4511 static ssize_t ipr_show_raw_mode(struct device *dev,
4512                                  struct device_attribute *attr, char *buf)
4513 {
4514         struct scsi_device *sdev = to_scsi_device(dev);
4515         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4516         struct ipr_resource_entry *res;
4517         unsigned long lock_flags = 0;
4518         ssize_t len;
4519
4520         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4521         res = (struct ipr_resource_entry *)sdev->hostdata;
4522         if (res)
4523                 len = snprintf(buf, PAGE_SIZE, "%d\n", res->raw_mode);
4524         else
4525                 len = -ENXIO;
4526         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4527         return len;
4528 }
4529
4530 /**
4531  * ipr_store_raw_mode - Change the adapter's raw mode
4532  * @dev:        class device struct
4533  * @buf:        buffer
4534  *
4535  * Return value:
4536  *      number of bytes printed to buffer
4537  **/
4538 static ssize_t ipr_store_raw_mode(struct device *dev,
4539                                   struct device_attribute *attr,
4540                                   const char *buf, size_t count)
4541 {
4542         struct scsi_device *sdev = to_scsi_device(dev);
4543         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4544         struct ipr_resource_entry *res;
4545         unsigned long lock_flags = 0;
4546         ssize_t len;
4547
4548         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4549         res = (struct ipr_resource_entry *)sdev->hostdata;
4550         if (res) {
4551                 if (ioa_cfg->sis64 && ipr_is_af_dasd_device(res)) {
4552                         res->raw_mode = simple_strtoul(buf, NULL, 10);
4553                         len = strlen(buf);
4554                         if (res->sdev)
4555                                 sdev_printk(KERN_INFO, res->sdev, "raw mode is %s\n",
4556                                         res->raw_mode ? "enabled" : "disabled");
4557                 } else
4558                         len = -EINVAL;
4559         } else
4560                 len = -ENXIO;
4561         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4562         return len;
4563 }
4564
4565 static struct device_attribute ipr_raw_mode_attr = {
4566         .attr = {
4567                 .name =         "raw_mode",
4568                 .mode =         S_IRUGO | S_IWUSR,
4569         },
4570         .show = ipr_show_raw_mode,
4571         .store = ipr_store_raw_mode
4572 };
4573
4574 static struct device_attribute *ipr_dev_attrs[] = {
4575         &ipr_adapter_handle_attr,
4576         &ipr_resource_path_attr,
4577         &ipr_device_id_attr,
4578         &ipr_resource_type_attr,
4579         &ipr_raw_mode_attr,
4580         NULL,
4581 };
4582
4583 /**
4584  * ipr_biosparam - Return the HSC mapping
4585  * @sdev:                       scsi device struct
4586  * @block_device:       block device pointer
4587  * @capacity:           capacity of the device
4588  * @parm:                       Array containing returned HSC values.
4589  *
4590  * This function generates the HSC parms that fdisk uses.
4591  * We want to make sure we return something that places partitions
4592  * on 4k boundaries for best performance with the IOA.
4593  *
4594  * Return value:
4595  *      0 on success
4596  **/
4597 static int ipr_biosparam(struct scsi_device *sdev,
4598                          struct block_device *block_device,
4599                          sector_t capacity, int *parm)
4600 {
4601         int heads, sectors;
4602         sector_t cylinders;
4603
4604         heads = 128;
4605         sectors = 32;
4606
4607         cylinders = capacity;
4608         sector_div(cylinders, (128 * 32));
4609
4610         /* return result */
4611         parm[0] = heads;
4612         parm[1] = sectors;
4613         parm[2] = cylinders;
4614
4615         return 0;
4616 }
4617
4618 /**
4619  * ipr_find_starget - Find target based on bus/target.
4620  * @starget:    scsi target struct
4621  *
4622  * Return value:
4623  *      resource entry pointer if found / NULL if not found
4624  **/
4625 static struct ipr_resource_entry *ipr_find_starget(struct scsi_target *starget)
4626 {
4627         struct Scsi_Host *shost = dev_to_shost(&starget->dev);
4628         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata;
4629         struct ipr_resource_entry *res;
4630
4631         list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
4632                 if ((res->bus == starget->channel) &&
4633                     (res->target == starget->id)) {
4634                         return res;
4635                 }
4636         }
4637
4638         return NULL;
4639 }
4640
4641 static struct ata_port_info sata_port_info;
4642
4643 /**
4644  * ipr_target_alloc - Prepare for commands to a SCSI target
4645  * @starget:    scsi target struct
4646  *
4647  * If the device is a SATA device, this function allocates an
4648  * ATA port with libata, else it does nothing.
4649  *
4650  * Return value:
4651  *      0 on success / non-0 on failure
4652  **/
4653 static int ipr_target_alloc(struct scsi_target *starget)
4654 {
4655         struct Scsi_Host *shost = dev_to_shost(&starget->dev);
4656         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata;
4657         struct ipr_sata_port *sata_port;
4658         struct ata_port *ap;
4659         struct ipr_resource_entry *res;
4660         unsigned long lock_flags;
4661
4662         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4663         res = ipr_find_starget(starget);
4664         starget->hostdata = NULL;
4665
4666         if (res && ipr_is_gata(res)) {
4667                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4668                 sata_port = kzalloc(sizeof(*sata_port), GFP_KERNEL);
4669                 if (!sata_port)
4670                         return -ENOMEM;
4671
4672                 ap = ata_sas_port_alloc(&ioa_cfg->ata_host, &sata_port_info, shost);
4673                 if (ap) {
4674                         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4675                         sata_port->ioa_cfg = ioa_cfg;
4676                         sata_port->ap = ap;
4677                         sata_port->res = res;
4678
4679                         res->sata_port = sata_port;
4680                         ap->private_data = sata_port;
4681                         starget->hostdata = sata_port;
4682                 } else {
4683                         kfree(sata_port);
4684                         return -ENOMEM;
4685                 }
4686         }
4687         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4688
4689         return 0;
4690 }
4691
4692 /**
4693  * ipr_target_destroy - Destroy a SCSI target
4694  * @starget:    scsi target struct
4695  *
4696  * If the device was a SATA device, this function frees the libata
4697  * ATA port, else it does nothing.
4698  *
4699  **/
4700 static void ipr_target_destroy(struct scsi_target *starget)
4701 {
4702         struct ipr_sata_port *sata_port = starget->hostdata;
4703         struct Scsi_Host *shost = dev_to_shost(&starget->dev);
4704         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata;
4705
4706         if (ioa_cfg->sis64) {
4707                 if (!ipr_find_starget(starget)) {
4708                         if (starget->channel == IPR_ARRAY_VIRTUAL_BUS)
4709                                 clear_bit(starget->id, ioa_cfg->array_ids);
4710                         else if (starget->channel == IPR_VSET_VIRTUAL_BUS)
4711                                 clear_bit(starget->id, ioa_cfg->vset_ids);
4712                         else if (starget->channel == 0)
4713                                 clear_bit(starget->id, ioa_cfg->target_ids);
4714                 }
4715         }
4716
4717         if (sata_port) {
4718                 starget->hostdata = NULL;
4719                 ata_sas_port_destroy(sata_port->ap);
4720                 kfree(sata_port);
4721         }
4722 }
4723
4724 /**
4725  * ipr_find_sdev - Find device based on bus/target/lun.
4726  * @sdev:       scsi device struct
4727  *
4728  * Return value:
4729  *      resource entry pointer if found / NULL if not found
4730  **/
4731 static struct ipr_resource_entry *ipr_find_sdev(struct scsi_device *sdev)
4732 {
4733         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
4734         struct ipr_resource_entry *res;
4735
4736         list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
4737                 if ((res->bus == sdev->channel) &&
4738                     (res->target == sdev->id) &&
4739                     (res->lun == sdev->lun))
4740                         return res;
4741         }
4742
4743         return NULL;
4744 }
4745
4746 /**
4747  * ipr_slave_destroy - Unconfigure a SCSI device
4748  * @sdev:       scsi device struct
4749  *
4750  * Return value:
4751  *      nothing
4752  **/
4753 static void ipr_slave_destroy(struct scsi_device *sdev)
4754 {
4755         struct ipr_resource_entry *res;
4756         struct ipr_ioa_cfg *ioa_cfg;
4757         unsigned long lock_flags = 0;
4758
4759         ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
4760
4761         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4762         res = (struct ipr_resource_entry *) sdev->hostdata;
4763         if (res) {
4764                 if (res->sata_port)
4765                         res->sata_port->ap->link.device[0].class = ATA_DEV_NONE;
4766                 sdev->hostdata = NULL;
4767                 res->sdev = NULL;
4768                 res->sata_port = NULL;
4769         }
4770         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4771 }
4772
4773 /**
4774  * ipr_slave_configure - Configure a SCSI device
4775  * @sdev:       scsi device struct
4776  *
4777  * This function configures the specified scsi device.
4778  *
4779  * Return value:
4780  *      0 on success
4781  **/
4782 static int ipr_slave_configure(struct scsi_device *sdev)
4783 {
4784         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
4785         struct ipr_resource_entry *res;
4786         struct ata_port *ap = NULL;
4787         unsigned long lock_flags = 0;
4788         char buffer[IPR_MAX_RES_PATH_LENGTH];
4789
4790         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4791         res = sdev->hostdata;
4792         if (res) {
4793                 if (ipr_is_af_dasd_device(res))
4794                         sdev->type = TYPE_RAID;
4795                 if (ipr_is_af_dasd_device(res) || ipr_is_ioa_resource(res)) {
4796                         sdev->scsi_level = 4;
4797                         sdev->no_uld_attach = 1;
4798                 }
4799                 if (ipr_is_vset_device(res)) {
4800                         sdev->scsi_level = SCSI_SPC_3;
4801                         blk_queue_rq_timeout(sdev->request_queue,
4802                                              IPR_VSET_RW_TIMEOUT);
4803                         blk_queue_max_hw_sectors(sdev->request_queue, IPR_VSET_MAX_SECTORS);
4804                 }
4805                 if (ipr_is_gata(res) && res->sata_port)
4806                         ap = res->sata_port->ap;
4807                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4808
4809                 if (ap) {
4810                         scsi_change_queue_depth(sdev, IPR_MAX_CMD_PER_ATA_LUN);
4811                         ata_sas_slave_configure(sdev, ap);
4812                 }
4813
4814                 if (ioa_cfg->sis64)
4815                         sdev_printk(KERN_INFO, sdev, "Resource path: %s\n",
4816                                     ipr_format_res_path(ioa_cfg,
4817                                 res->res_path, buffer, sizeof(buffer)));
4818                 return 0;
4819         }
4820         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4821         return 0;
4822 }
4823
4824 /**
4825  * ipr_ata_slave_alloc - Prepare for commands to a SATA device
4826  * @sdev:       scsi device struct
4827  *
4828  * This function initializes an ATA port so that future commands
4829  * sent through queuecommand will work.
4830  *
4831  * Return value:
4832  *      0 on success
4833  **/
4834 static int ipr_ata_slave_alloc(struct scsi_device *sdev)
4835 {
4836         struct ipr_sata_port *sata_port = NULL;
4837         int rc = -ENXIO;
4838
4839         ENTER;
4840         if (sdev->sdev_target)
4841                 sata_port = sdev->sdev_target->hostdata;
4842         if (sata_port) {
4843                 rc = ata_sas_port_init(sata_port->ap);
4844                 if (rc == 0)
4845                         rc = ata_sas_sync_probe(sata_port->ap);
4846         }
4847
4848         if (rc)
4849                 ipr_slave_destroy(sdev);
4850
4851         LEAVE;
4852         return rc;
4853 }
4854
4855 /**
4856  * ipr_slave_alloc - Prepare for commands to a device.
4857  * @sdev:       scsi device struct
4858  *
4859  * This function saves a pointer to the resource entry
4860  * in the scsi device struct if the device exists. We
4861  * can then use this pointer in ipr_queuecommand when
4862  * handling new commands.
4863  *
4864  * Return value:
4865  *      0 on success / -ENXIO if device does not exist
4866  **/
4867 static int ipr_slave_alloc(struct scsi_device *sdev)
4868 {
4869         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
4870         struct ipr_resource_entry *res;
4871         unsigned long lock_flags;
4872         int rc = -ENXIO;
4873
4874         sdev->hostdata = NULL;
4875
4876         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4877
4878         res = ipr_find_sdev(sdev);
4879         if (res) {
4880                 res->sdev = sdev;
4881                 res->add_to_ml = 0;
4882                 res->in_erp = 0;
4883                 sdev->hostdata = res;
4884                 if (!ipr_is_naca_model(res))
4885                         res->needs_sync_complete = 1;
4886                 rc = 0;
4887                 if (ipr_is_gata(res)) {
4888                         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4889                         return ipr_ata_slave_alloc(sdev);
4890                 }
4891         }
4892
4893         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4894
4895         return rc;
4896 }
4897
4898 /**
4899  * ipr_match_lun - Match function for specified LUN
4900  * @ipr_cmd:    ipr command struct
4901  * @device:             device to match (sdev)
4902  *
4903  * Returns:
4904  *      1 if command matches sdev / 0 if command does not match sdev
4905  **/
4906 static int ipr_match_lun(struct ipr_cmnd *ipr_cmd, void *device)
4907 {
4908         if (ipr_cmd->scsi_cmd && ipr_cmd->scsi_cmd->device == device)
4909                 return 1;
4910         return 0;
4911 }
4912
4913 /**
4914  * ipr_wait_for_ops - Wait for matching commands to complete
4915  * @ipr_cmd:    ipr command struct
4916  * @device:             device to match (sdev)
4917  * @match:              match function to use
4918  *
4919  * Returns:
4920  *      SUCCESS / FAILED
4921  **/
4922 static int ipr_wait_for_ops(struct ipr_ioa_cfg *ioa_cfg, void *device,
4923                             int (*match)(struct ipr_cmnd *, void *))
4924 {
4925         struct ipr_cmnd *ipr_cmd;
4926         int wait;
4927         unsigned long flags;
4928         struct ipr_hrr_queue *hrrq;
4929         signed long timeout = IPR_ABORT_TASK_TIMEOUT;
4930         DECLARE_COMPLETION_ONSTACK(comp);
4931
4932         ENTER;
4933         do {
4934                 wait = 0;
4935
4936                 for_each_hrrq(hrrq, ioa_cfg) {
4937                         spin_lock_irqsave(hrrq->lock, flags);
4938                         list_for_each_entry(ipr_cmd, &hrrq->hrrq_pending_q, queue) {
4939                                 if (match(ipr_cmd, device)) {
4940                                         ipr_cmd->eh_comp = &comp;
4941                                         wait++;
4942                                 }
4943                         }
4944                         spin_unlock_irqrestore(hrrq->lock, flags);
4945                 }
4946
4947                 if (wait) {
4948                         timeout = wait_for_completion_timeout(&comp, timeout);
4949
4950                         if (!timeout) {
4951                                 wait = 0;
4952
4953                                 for_each_hrrq(hrrq, ioa_cfg) {
4954                                         spin_lock_irqsave(hrrq->lock, flags);
4955                                         list_for_each_entry(ipr_cmd, &hrrq->hrrq_pending_q, queue) {
4956                                                 if (match(ipr_cmd, device)) {
4957                                                         ipr_cmd->eh_comp = NULL;
4958                                                         wait++;
4959                                                 }
4960                                         }
4961                                         spin_unlock_irqrestore(hrrq->lock, flags);
4962                                 }
4963
4964                                 if (wait)
4965                                         dev_err(&ioa_cfg->pdev->dev, "Timed out waiting for aborted commands\n");
4966                                 LEAVE;
4967                                 return wait ? FAILED : SUCCESS;
4968                         }
4969                 }
4970         } while (wait);
4971
4972         LEAVE;
4973         return SUCCESS;
4974 }
4975
4976 static int ipr_eh_host_reset(struct scsi_cmnd *cmd)
4977 {
4978         struct ipr_ioa_cfg *ioa_cfg;
4979         unsigned long lock_flags = 0;
4980         int rc = SUCCESS;
4981
4982         ENTER;
4983         ioa_cfg = (struct ipr_ioa_cfg *) cmd->device->host->hostdata;
4984         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4985
4986         if (!ioa_cfg->in_reset_reload && !ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead) {
4987                 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_ABBREV);
4988                 dev_err(&ioa_cfg->pdev->dev,
4989                         "Adapter being reset as a result of error recovery.\n");
4990
4991                 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
4992                         ioa_cfg->sdt_state = GET_DUMP;
4993         }
4994
4995         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4996         wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
4997         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4998
4999         /* If we got hit with a host reset while we were already resetting
5000          the adapter for some reason, and the reset failed. */
5001         if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead) {
5002                 ipr_trace;
5003                 rc = FAILED;
5004         }
5005
5006         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5007         LEAVE;
5008         return rc;
5009 }
5010
5011 /**
5012  * ipr_device_reset - Reset the device
5013  * @ioa_cfg:    ioa config struct
5014  * @res:                resource entry struct
5015  *
5016  * This function issues a device reset to the affected device.
5017  * If the device is a SCSI device, a LUN reset will be sent
5018  * to the device first. If that does not work, a target reset
5019  * will be sent. If the device is a SATA device, a PHY reset will
5020  * be sent.
5021  *
5022  * Return value:
5023  *      0 on success / non-zero on failure
5024  **/
5025 static int ipr_device_reset(struct ipr_ioa_cfg *ioa_cfg,
5026                             struct ipr_resource_entry *res)
5027 {
5028         struct ipr_cmnd *ipr_cmd;
5029         struct ipr_ioarcb *ioarcb;
5030         struct ipr_cmd_pkt *cmd_pkt;
5031         struct ipr_ioarcb_ata_regs *regs;
5032         u32 ioasc;
5033
5034         ENTER;
5035         ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
5036         ioarcb = &ipr_cmd->ioarcb;
5037         cmd_pkt = &ioarcb->cmd_pkt;
5038
5039         if (ipr_cmd->ioa_cfg->sis64) {
5040                 regs = &ipr_cmd->i.ata_ioadl.regs;
5041                 ioarcb->add_cmd_parms_offset = cpu_to_be16(sizeof(*ioarcb));
5042         } else
5043                 regs = &ioarcb->u.add_data.u.regs;
5044
5045         ioarcb->res_handle = res->res_handle;
5046         cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
5047         cmd_pkt->cdb[0] = IPR_RESET_DEVICE;
5048         if (ipr_is_gata(res)) {
5049                 cmd_pkt->cdb[2] = IPR_ATA_PHY_RESET;
5050                 ioarcb->add_cmd_parms_len = cpu_to_be16(sizeof(regs->flags));
5051                 regs->flags |= IPR_ATA_FLAG_STATUS_ON_GOOD_COMPLETION;
5052         }
5053
5054         ipr_send_blocking_cmd(ipr_cmd, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT);
5055         ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
5056         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
5057         if (ipr_is_gata(res) && res->sata_port && ioasc != IPR_IOASC_IOA_WAS_RESET) {
5058                 if (ipr_cmd->ioa_cfg->sis64)
5059                         memcpy(&res->sata_port->ioasa, &ipr_cmd->s.ioasa64.u.gata,
5060                                sizeof(struct ipr_ioasa_gata));
5061                 else
5062                         memcpy(&res->sata_port->ioasa, &ipr_cmd->s.ioasa.u.gata,
5063                                sizeof(struct ipr_ioasa_gata));
5064         }
5065
5066         LEAVE;
5067         return IPR_IOASC_SENSE_KEY(ioasc) ? -EIO : 0;
5068 }
5069
5070 /**
5071  * ipr_sata_reset - Reset the SATA port
5072  * @link:       SATA link to reset
5073  * @classes:    class of the attached device
5074  *
5075  * This function issues a SATA phy reset to the affected ATA link.
5076  *
5077  * Return value:
5078  *      0 on success / non-zero on failure
5079  **/
5080 static int ipr_sata_reset(struct ata_link *link, unsigned int *classes,
5081                                 unsigned long deadline)
5082 {
5083         struct ipr_sata_port *sata_port = link->ap->private_data;
5084         struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
5085         struct ipr_resource_entry *res;
5086         unsigned long lock_flags = 0;
5087         int rc = -ENXIO;
5088
5089         ENTER;
5090         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
5091         while (ioa_cfg->in_reset_reload) {
5092                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5093                 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
5094                 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
5095         }
5096
5097         res = sata_port->res;
5098         if (res) {
5099                 rc = ipr_device_reset(ioa_cfg, res);
5100                 *classes = res->ata_class;
5101         }
5102
5103         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5104         LEAVE;
5105         return rc;
5106 }
5107
5108 /**
5109  * ipr_eh_dev_reset - Reset the device
5110  * @scsi_cmd:   scsi command struct
5111  *
5112  * This function issues a device reset to the affected device.
5113  * A LUN reset will be sent to the device first. If that does
5114  * not work, a target reset will be sent.
5115  *
5116  * Return value:
5117  *      SUCCESS / FAILED
5118  **/
5119 static int __ipr_eh_dev_reset(struct scsi_cmnd *scsi_cmd)
5120 {
5121         struct ipr_cmnd *ipr_cmd;
5122         struct ipr_ioa_cfg *ioa_cfg;
5123         struct ipr_resource_entry *res;
5124         struct ata_port *ap;
5125         int rc = 0;
5126         struct ipr_hrr_queue *hrrq;
5127
5128         ENTER;
5129         ioa_cfg = (struct ipr_ioa_cfg *) scsi_cmd->device->host->hostdata;
5130         res = scsi_cmd->device->hostdata;
5131
5132         if (!res)
5133                 return FAILED;
5134
5135         /*
5136          * If we are currently going through reset/reload, return failed. This will force the
5137          * mid-layer to call ipr_eh_host_reset, which will then go to sleep and wait for the
5138          * reset to complete
5139          */
5140         if (ioa_cfg->in_reset_reload)
5141                 return FAILED;
5142         if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead)
5143                 return FAILED;
5144
5145         for_each_hrrq(hrrq, ioa_cfg) {
5146                 spin_lock(&hrrq->_lock);
5147                 list_for_each_entry(ipr_cmd, &hrrq->hrrq_pending_q, queue) {
5148                         if (ipr_cmd->ioarcb.res_handle == res->res_handle) {
5149                                 if (ipr_cmd->scsi_cmd)
5150                                         ipr_cmd->done = ipr_scsi_eh_done;
5151                                 if (ipr_cmd->qc)
5152                                         ipr_cmd->done = ipr_sata_eh_done;
5153                                 if (ipr_cmd->qc &&
5154                                     !(ipr_cmd->qc->flags & ATA_QCFLAG_FAILED)) {
5155                                         ipr_cmd->qc->err_mask |= AC_ERR_TIMEOUT;
5156                                         ipr_cmd->qc->flags |= ATA_QCFLAG_FAILED;
5157                                 }
5158                         }
5159                 }
5160                 spin_unlock(&hrrq->_lock);
5161         }
5162         res->resetting_device = 1;
5163         scmd_printk(KERN_ERR, scsi_cmd, "Resetting device\n");
5164
5165         if (ipr_is_gata(res) && res->sata_port) {
5166                 ap = res->sata_port->ap;
5167                 spin_unlock_irq(scsi_cmd->device->host->host_lock);
5168                 ata_std_error_handler(ap);
5169                 spin_lock_irq(scsi_cmd->device->host->host_lock);
5170
5171                 for_each_hrrq(hrrq, ioa_cfg) {
5172                         spin_lock(&hrrq->_lock);
5173                         list_for_each_entry(ipr_cmd,
5174                                             &hrrq->hrrq_pending_q, queue) {
5175                                 if (ipr_cmd->ioarcb.res_handle ==
5176                                     res->res_handle) {
5177                                         rc = -EIO;
5178                                         break;
5179                                 }
5180                         }
5181                         spin_unlock(&hrrq->_lock);
5182                 }
5183         } else
5184                 rc = ipr_device_reset(ioa_cfg, res);
5185         res->resetting_device = 0;
5186         res->reset_occurred = 1;
5187
5188         LEAVE;
5189         return rc ? FAILED : SUCCESS;
5190 }
5191
5192 static int ipr_eh_dev_reset(struct scsi_cmnd *cmd)
5193 {
5194         int rc;
5195         struct ipr_ioa_cfg *ioa_cfg;
5196
5197         ioa_cfg = (struct ipr_ioa_cfg *) cmd->device->host->hostdata;
5198
5199         spin_lock_irq(cmd->device->host->host_lock);
5200         rc = __ipr_eh_dev_reset(cmd);
5201         spin_unlock_irq(cmd->device->host->host_lock);
5202
5203         if (rc == SUCCESS)
5204                 rc = ipr_wait_for_ops(ioa_cfg, cmd->device, ipr_match_lun);
5205
5206         return rc;
5207 }
5208
5209 /**
5210  * ipr_bus_reset_done - Op done function for bus reset.
5211  * @ipr_cmd:    ipr command struct
5212  *
5213  * This function is the op done function for a bus reset
5214  *
5215  * Return value:
5216  *      none
5217  **/
5218 static void ipr_bus_reset_done(struct ipr_cmnd *ipr_cmd)
5219 {
5220         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5221         struct ipr_resource_entry *res;
5222
5223         ENTER;
5224         if (!ioa_cfg->sis64)
5225                 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
5226                         if (res->res_handle == ipr_cmd->ioarcb.res_handle) {
5227                                 scsi_report_bus_reset(ioa_cfg->host, res->bus);
5228                                 break;
5229                         }
5230                 }
5231
5232         /*
5233          * If abort has not completed, indicate the reset has, else call the
5234          * abort's done function to wake the sleeping eh thread
5235          */
5236         if (ipr_cmd->sibling->sibling)
5237                 ipr_cmd->sibling->sibling = NULL;
5238         else
5239                 ipr_cmd->sibling->done(ipr_cmd->sibling);
5240
5241         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
5242         LEAVE;
5243 }
5244
5245 /**
5246  * ipr_abort_timeout - An abort task has timed out
5247  * @ipr_cmd:    ipr command struct
5248  *
5249  * This function handles when an abort task times out. If this
5250  * happens we issue a bus reset since we have resources tied
5251  * up that must be freed before returning to the midlayer.
5252  *
5253  * Return value:
5254  *      none
5255  **/
5256 static void ipr_abort_timeout(struct ipr_cmnd *ipr_cmd)
5257 {
5258         struct ipr_cmnd *reset_cmd;
5259         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5260         struct ipr_cmd_pkt *cmd_pkt;
5261         unsigned long lock_flags = 0;
5262
5263         ENTER;
5264         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
5265         if (ipr_cmd->completion.done || ioa_cfg->in_reset_reload) {
5266                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5267                 return;
5268         }
5269
5270         sdev_printk(KERN_ERR, ipr_cmd->u.sdev, "Abort timed out. Resetting bus.\n");
5271         reset_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
5272         ipr_cmd->sibling = reset_cmd;
5273         reset_cmd->sibling = ipr_cmd;
5274         reset_cmd->ioarcb.res_handle = ipr_cmd->ioarcb.res_handle;
5275         cmd_pkt = &reset_cmd->ioarcb.cmd_pkt;
5276         cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
5277         cmd_pkt->cdb[0] = IPR_RESET_DEVICE;
5278         cmd_pkt->cdb[2] = IPR_RESET_TYPE_SELECT | IPR_BUS_RESET;
5279
5280         ipr_do_req(reset_cmd, ipr_bus_reset_done, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT);
5281         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5282         LEAVE;
5283 }
5284
5285 /**
5286  * ipr_cancel_op - Cancel specified op
5287  * @scsi_cmd:   scsi command struct
5288  *
5289  * This function cancels specified op.
5290  *
5291  * Return value:
5292  *      SUCCESS / FAILED
5293  **/
5294 static int ipr_cancel_op(struct scsi_cmnd *scsi_cmd)
5295 {
5296         struct ipr_cmnd *ipr_cmd;
5297         struct ipr_ioa_cfg *ioa_cfg;
5298         struct ipr_resource_entry *res;
5299         struct ipr_cmd_pkt *cmd_pkt;
5300         u32 ioasc, int_reg;
5301         int op_found = 0;
5302         struct ipr_hrr_queue *hrrq;
5303
5304         ENTER;
5305         ioa_cfg = (struct ipr_ioa_cfg *)scsi_cmd->device->host->hostdata;
5306         res = scsi_cmd->device->hostdata;
5307
5308         /* If we are currently going through reset/reload, return failed.
5309          * This will force the mid-layer to call ipr_eh_host_reset,
5310          * which will then go to sleep and wait for the reset to complete
5311          */
5312         if (ioa_cfg->in_reset_reload ||
5313             ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead)
5314                 return FAILED;
5315         if (!res)
5316                 return FAILED;
5317
5318         /*
5319          * If we are aborting a timed out op, chances are that the timeout was caused
5320          * by a still not detected EEH error. In such cases, reading a register will
5321          * trigger the EEH recovery infrastructure.
5322          */
5323         int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
5324
5325         if (!ipr_is_gscsi(res))
5326                 return FAILED;
5327
5328         for_each_hrrq(hrrq, ioa_cfg) {
5329                 spin_lock(&hrrq->_lock);
5330                 list_for_each_entry(ipr_cmd, &hrrq->hrrq_pending_q, queue) {
5331                         if (ipr_cmd->scsi_cmd == scsi_cmd) {
5332                                 ipr_cmd->done = ipr_scsi_eh_done;
5333                                 op_found = 1;
5334                                 break;
5335                         }
5336                 }
5337                 spin_unlock(&hrrq->_lock);
5338         }
5339
5340         if (!op_found)
5341                 return SUCCESS;
5342
5343         ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
5344         ipr_cmd->ioarcb.res_handle = res->res_handle;
5345         cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
5346         cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
5347         cmd_pkt->cdb[0] = IPR_CANCEL_ALL_REQUESTS;
5348         ipr_cmd->u.sdev = scsi_cmd->device;
5349
5350         scmd_printk(KERN_ERR, scsi_cmd, "Aborting command: %02X\n",
5351                     scsi_cmd->cmnd[0]);
5352         ipr_send_blocking_cmd(ipr_cmd, ipr_abort_timeout, IPR_CANCEL_ALL_TIMEOUT);
5353         ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
5354
5355         /*
5356          * If the abort task timed out and we sent a bus reset, we will get
5357          * one the following responses to the abort
5358          */
5359         if (ioasc == IPR_IOASC_BUS_WAS_RESET || ioasc == IPR_IOASC_SYNC_REQUIRED) {
5360                 ioasc = 0;
5361                 ipr_trace;
5362         }
5363
5364         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
5365         if (!ipr_is_naca_model(res))
5366                 res->needs_sync_complete = 1;
5367
5368         LEAVE;
5369         return IPR_IOASC_SENSE_KEY(ioasc) ? FAILED : SUCCESS;
5370 }
5371
5372 /**
5373  * ipr_eh_abort - Abort a single op
5374  * @scsi_cmd:   scsi command struct
5375  *
5376  * Return value:
5377  *      0 if scan in progress / 1 if scan is complete
5378  **/
5379 static int ipr_scan_finished(struct Scsi_Host *shost, unsigned long elapsed_time)
5380 {
5381         unsigned long lock_flags;
5382         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata;
5383         int rc = 0;
5384
5385         spin_lock_irqsave(shost->host_lock, lock_flags);
5386         if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead || ioa_cfg->scan_done)
5387                 rc = 1;
5388         if ((elapsed_time/HZ) > (ioa_cfg->transop_timeout * 2))
5389                 rc = 1;
5390         spin_unlock_irqrestore(shost->host_lock, lock_flags);
5391         return rc;
5392 }
5393
5394 /**
5395  * ipr_eh_host_reset - Reset the host adapter
5396  * @scsi_cmd:   scsi command struct
5397  *
5398  * Return value:
5399  *      SUCCESS / FAILED
5400  **/
5401 static int ipr_eh_abort(struct scsi_cmnd *scsi_cmd)
5402 {
5403         unsigned long flags;
5404         int rc;
5405         struct ipr_ioa_cfg *ioa_cfg;
5406
5407         ENTER;
5408
5409         ioa_cfg = (struct ipr_ioa_cfg *) scsi_cmd->device->host->hostdata;
5410
5411         spin_lock_irqsave(scsi_cmd->device->host->host_lock, flags);
5412         rc = ipr_cancel_op(scsi_cmd);
5413         spin_unlock_irqrestore(scsi_cmd->device->host->host_lock, flags);
5414
5415         if (rc == SUCCESS)
5416                 rc = ipr_wait_for_ops(ioa_cfg, scsi_cmd->device, ipr_match_lun);
5417         LEAVE;
5418         return rc;
5419 }
5420
5421 /**
5422  * ipr_handle_other_interrupt - Handle "other" interrupts
5423  * @ioa_cfg:    ioa config struct
5424  * @int_reg:    interrupt register
5425  *
5426  * Return value:
5427  *      IRQ_NONE / IRQ_HANDLED
5428  **/
5429 static irqreturn_t ipr_handle_other_interrupt(struct ipr_ioa_cfg *ioa_cfg,
5430                                               u32 int_reg)
5431 {
5432         irqreturn_t rc = IRQ_HANDLED;
5433         u32 int_mask_reg;
5434
5435         int_mask_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg32);
5436         int_reg &= ~int_mask_reg;
5437
5438         /* If an interrupt on the adapter did not occur, ignore it.
5439          * Or in the case of SIS 64, check for a stage change interrupt.
5440          */
5441         if ((int_reg & IPR_PCII_OPER_INTERRUPTS) == 0) {
5442                 if (ioa_cfg->sis64) {
5443                         int_mask_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
5444                         int_reg = readl(ioa_cfg->regs.sense_interrupt_reg) & ~int_mask_reg;
5445                         if (int_reg & IPR_PCII_IPL_STAGE_CHANGE) {
5446
5447                                 /* clear stage change */
5448                                 writel(IPR_PCII_IPL_STAGE_CHANGE, ioa_cfg->regs.clr_interrupt_reg);
5449                                 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg) & ~int_mask_reg;
5450                                 list_del(&ioa_cfg->reset_cmd->queue);
5451                                 del_timer(&ioa_cfg->reset_cmd->timer);
5452                                 ipr_reset_ioa_job(ioa_cfg->reset_cmd);
5453                                 return IRQ_HANDLED;
5454                         }
5455                 }
5456
5457                 return IRQ_NONE;
5458         }
5459
5460         if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) {
5461                 /* Mask the interrupt */
5462                 writel(IPR_PCII_IOA_TRANS_TO_OPER, ioa_cfg->regs.set_interrupt_mask_reg);
5463                 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
5464
5465                 list_del(&ioa_cfg->reset_cmd->queue);
5466                 del_timer(&ioa_cfg->reset_cmd->timer);
5467                 ipr_reset_ioa_job(ioa_cfg->reset_cmd);
5468         } else if ((int_reg & IPR_PCII_HRRQ_UPDATED) == int_reg) {
5469                 if (ioa_cfg->clear_isr) {
5470                         if (ipr_debug && printk_ratelimit())
5471                                 dev_err(&ioa_cfg->pdev->dev,
5472                                         "Spurious interrupt detected. 0x%08X\n", int_reg);
5473                         writel(IPR_PCII_HRRQ_UPDATED, ioa_cfg->regs.clr_interrupt_reg32);
5474                         int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
5475                         return IRQ_NONE;
5476                 }
5477         } else {
5478                 if (int_reg & IPR_PCII_IOA_UNIT_CHECKED)
5479                         ioa_cfg->ioa_unit_checked = 1;
5480                 else if (int_reg & IPR_PCII_NO_HOST_RRQ)
5481                         dev_err(&ioa_cfg->pdev->dev,
5482                                 "No Host RRQ. 0x%08X\n", int_reg);
5483                 else
5484                         dev_err(&ioa_cfg->pdev->dev,
5485                                 "Permanent IOA failure. 0x%08X\n", int_reg);
5486
5487                 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
5488                         ioa_cfg->sdt_state = GET_DUMP;
5489
5490                 ipr_mask_and_clear_interrupts(ioa_cfg, ~0);
5491                 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
5492         }
5493
5494         return rc;
5495 }
5496
5497 /**
5498  * ipr_isr_eh - Interrupt service routine error handler
5499  * @ioa_cfg:    ioa config struct
5500  * @msg:        message to log
5501  *
5502  * Return value:
5503  *      none
5504  **/
5505 static void ipr_isr_eh(struct ipr_ioa_cfg *ioa_cfg, char *msg, u16 number)
5506 {
5507         ioa_cfg->errors_logged++;
5508         dev_err(&ioa_cfg->pdev->dev, "%s %d\n", msg, number);
5509
5510         if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
5511                 ioa_cfg->sdt_state = GET_DUMP;
5512
5513         ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
5514 }
5515
5516 static int ipr_process_hrrq(struct ipr_hrr_queue *hrr_queue, int budget,
5517                                                 struct list_head *doneq)
5518 {
5519         u32 ioasc;
5520         u16 cmd_index;
5521         struct ipr_cmnd *ipr_cmd;
5522         struct ipr_ioa_cfg *ioa_cfg = hrr_queue->ioa_cfg;
5523         int num_hrrq = 0;
5524
5525         /* If interrupts are disabled, ignore the interrupt */
5526         if (!hrr_queue->allow_interrupts)
5527                 return 0;
5528
5529         while ((be32_to_cpu(*hrr_queue->hrrq_curr) & IPR_HRRQ_TOGGLE_BIT) ==
5530                hrr_queue->toggle_bit) {
5531
5532                 cmd_index = (be32_to_cpu(*hrr_queue->hrrq_curr) &
5533                              IPR_HRRQ_REQ_RESP_HANDLE_MASK) >>
5534                              IPR_HRRQ_REQ_RESP_HANDLE_SHIFT;
5535
5536                 if (unlikely(cmd_index > hrr_queue->max_cmd_id ||
5537                              cmd_index < hrr_queue->min_cmd_id)) {
5538                         ipr_isr_eh(ioa_cfg,
5539                                 "Invalid response handle from IOA: ",
5540                                 cmd_index);
5541                         break;
5542                 }
5543
5544                 ipr_cmd = ioa_cfg->ipr_cmnd_list[cmd_index];
5545                 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
5546
5547                 ipr_trc_hook(ipr_cmd, IPR_TRACE_FINISH, ioasc);
5548
5549                 list_move_tail(&ipr_cmd->queue, doneq);
5550
5551                 if (hrr_queue->hrrq_curr < hrr_queue->hrrq_end) {
5552                         hrr_queue->hrrq_curr++;
5553                 } else {
5554                         hrr_queue->hrrq_curr = hrr_queue->hrrq_start;
5555                         hrr_queue->toggle_bit ^= 1u;
5556                 }
5557                 num_hrrq++;
5558                 if (budget > 0 && num_hrrq >= budget)
5559                         break;
5560         }
5561
5562         return num_hrrq;
5563 }
5564
5565 static int ipr_iopoll(struct blk_iopoll *iop, int budget)
5566 {
5567         struct ipr_ioa_cfg *ioa_cfg;
5568         struct ipr_hrr_queue *hrrq;
5569         struct ipr_cmnd *ipr_cmd, *temp;
5570         unsigned long hrrq_flags;
5571         int completed_ops;
5572         LIST_HEAD(doneq);
5573
5574         hrrq = container_of(iop, struct ipr_hrr_queue, iopoll);
5575         ioa_cfg = hrrq->ioa_cfg;
5576
5577         spin_lock_irqsave(hrrq->lock, hrrq_flags);
5578         completed_ops = ipr_process_hrrq(hrrq, budget, &doneq);
5579
5580         if (completed_ops < budget)
5581                 blk_iopoll_complete(iop);
5582         spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
5583
5584         list_for_each_entry_safe(ipr_cmd, temp, &doneq, queue) {
5585                 list_del(&ipr_cmd->queue);
5586                 del_timer(&ipr_cmd->timer);
5587                 ipr_cmd->fast_done(ipr_cmd);
5588         }
5589
5590         return completed_ops;
5591 }
5592
5593 /**
5594  * ipr_isr - Interrupt service routine
5595  * @irq:        irq number
5596  * @devp:       pointer to ioa config struct
5597  *
5598  * Return value:
5599  *      IRQ_NONE / IRQ_HANDLED
5600  **/
5601 static irqreturn_t ipr_isr(int irq, void *devp)
5602 {
5603         struct ipr_hrr_queue *hrrq = (struct ipr_hrr_queue *)devp;
5604         struct ipr_ioa_cfg *ioa_cfg = hrrq->ioa_cfg;
5605         unsigned long hrrq_flags = 0;
5606         u32 int_reg = 0;
5607         int num_hrrq = 0;
5608         int irq_none = 0;
5609         struct ipr_cmnd *ipr_cmd, *temp;
5610         irqreturn_t rc = IRQ_NONE;
5611         LIST_HEAD(doneq);
5612
5613         spin_lock_irqsave(hrrq->lock, hrrq_flags);
5614         /* If interrupts are disabled, ignore the interrupt */
5615         if (!hrrq->allow_interrupts) {
5616                 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
5617                 return IRQ_NONE;
5618         }
5619
5620         while (1) {
5621                 if (ipr_process_hrrq(hrrq, -1, &doneq)) {
5622                         rc =  IRQ_HANDLED;
5623
5624                         if (!ioa_cfg->clear_isr)
5625                                 break;
5626
5627                         /* Clear the PCI interrupt */
5628                         num_hrrq = 0;
5629                         do {
5630                                 writel(IPR_PCII_HRRQ_UPDATED,
5631                                      ioa_cfg->regs.clr_interrupt_reg32);
5632                                 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
5633                         } while (int_reg & IPR_PCII_HRRQ_UPDATED &&
5634                                 num_hrrq++ < IPR_MAX_HRRQ_RETRIES);
5635
5636                 } else if (rc == IRQ_NONE && irq_none == 0) {
5637                         int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
5638                         irq_none++;
5639                 } else if (num_hrrq == IPR_MAX_HRRQ_RETRIES &&
5640                            int_reg & IPR_PCII_HRRQ_UPDATED) {
5641                         ipr_isr_eh(ioa_cfg,
5642                                 "Error clearing HRRQ: ", num_hrrq);
5643                         rc = IRQ_HANDLED;
5644                         break;
5645                 } else
5646                         break;
5647         }
5648
5649         if (unlikely(rc == IRQ_NONE))
5650                 rc = ipr_handle_other_interrupt(ioa_cfg, int_reg);
5651
5652         spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
5653         list_for_each_entry_safe(ipr_cmd, temp, &doneq, queue) {
5654                 list_del(&ipr_cmd->queue);
5655                 del_timer(&ipr_cmd->timer);
5656                 ipr_cmd->fast_done(ipr_cmd);
5657         }
5658         return rc;
5659 }
5660
5661 /**
5662  * ipr_isr_mhrrq - Interrupt service routine
5663  * @irq:        irq number
5664  * @devp:       pointer to ioa config struct
5665  *
5666  * Return value:
5667  *      IRQ_NONE / IRQ_HANDLED
5668  **/
5669 static irqreturn_t ipr_isr_mhrrq(int irq, void *devp)
5670 {
5671         struct ipr_hrr_queue *hrrq = (struct ipr_hrr_queue *)devp;
5672         struct ipr_ioa_cfg *ioa_cfg = hrrq->ioa_cfg;
5673         unsigned long hrrq_flags = 0;
5674         struct ipr_cmnd *ipr_cmd, *temp;
5675         irqreturn_t rc = IRQ_NONE;
5676         LIST_HEAD(doneq);
5677
5678         spin_lock_irqsave(hrrq->lock, hrrq_flags);
5679
5680         /* If interrupts are disabled, ignore the interrupt */
5681         if (!hrrq->allow_interrupts) {
5682                 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
5683                 return IRQ_NONE;
5684         }
5685
5686         if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
5687                 if ((be32_to_cpu(*hrrq->hrrq_curr) & IPR_HRRQ_TOGGLE_BIT) ==
5688                        hrrq->toggle_bit) {
5689                         if (!blk_iopoll_sched_prep(&hrrq->iopoll))
5690                                 blk_iopoll_sched(&hrrq->iopoll);
5691                         spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
5692                         return IRQ_HANDLED;
5693                 }
5694         } else {
5695                 if ((be32_to_cpu(*hrrq->hrrq_curr) & IPR_HRRQ_TOGGLE_BIT) ==
5696                         hrrq->toggle_bit)
5697
5698                         if (ipr_process_hrrq(hrrq, -1, &doneq))
5699                                 rc =  IRQ_HANDLED;
5700         }
5701
5702         spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
5703
5704         list_for_each_entry_safe(ipr_cmd, temp, &doneq, queue) {
5705                 list_del(&ipr_cmd->queue);
5706                 del_timer(&ipr_cmd->timer);
5707                 ipr_cmd->fast_done(ipr_cmd);
5708         }
5709         return rc;
5710 }
5711
5712 /**
5713  * ipr_build_ioadl64 - Build a scatter/gather list and map the buffer
5714  * @ioa_cfg:    ioa config struct
5715  * @ipr_cmd:    ipr command struct
5716  *
5717  * Return value:
5718  *      0 on success / -1 on failure
5719  **/
5720 static int ipr_build_ioadl64(struct ipr_ioa_cfg *ioa_cfg,
5721                              struct ipr_cmnd *ipr_cmd)
5722 {
5723         int i, nseg;
5724         struct scatterlist *sg;
5725         u32 length;
5726         u32 ioadl_flags = 0;
5727         struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
5728         struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
5729         struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ioadl64;
5730
5731         length = scsi_bufflen(scsi_cmd);
5732         if (!length)
5733                 return 0;
5734
5735         nseg = scsi_dma_map(scsi_cmd);
5736         if (nseg < 0) {
5737                 if (printk_ratelimit())
5738                         dev_err(&ioa_cfg->pdev->dev, "scsi_dma_map failed!\n");
5739                 return -1;
5740         }
5741
5742         ipr_cmd->dma_use_sg = nseg;
5743
5744         ioarcb->data_transfer_length = cpu_to_be32(length);
5745         ioarcb->ioadl_len =
5746                 cpu_to_be32(sizeof(struct ipr_ioadl64_desc) * ipr_cmd->dma_use_sg);
5747
5748         if (scsi_cmd->sc_data_direction == DMA_TO_DEVICE) {
5749                 ioadl_flags = IPR_IOADL_FLAGS_WRITE;
5750                 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
5751         } else if (scsi_cmd->sc_data_direction == DMA_FROM_DEVICE)
5752                 ioadl_flags = IPR_IOADL_FLAGS_READ;
5753
5754         scsi_for_each_sg(scsi_cmd, sg, ipr_cmd->dma_use_sg, i) {
5755                 ioadl64[i].flags = cpu_to_be32(ioadl_flags);
5756                 ioadl64[i].data_len = cpu_to_be32(sg_dma_len(sg));
5757                 ioadl64[i].address = cpu_to_be64(sg_dma_address(sg));
5758         }
5759
5760         ioadl64[i-1].flags |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
5761         return 0;
5762 }
5763
5764 /**
5765  * ipr_build_ioadl - Build a scatter/gather list and map the buffer
5766  * @ioa_cfg:    ioa config struct
5767  * @ipr_cmd:    ipr command struct
5768  *
5769  * Return value:
5770  *      0 on success / -1 on failure
5771  **/
5772 static int ipr_build_ioadl(struct ipr_ioa_cfg *ioa_cfg,
5773                            struct ipr_cmnd *ipr_cmd)
5774 {
5775         int i, nseg;
5776         struct scatterlist *sg;
5777         u32 length;
5778         u32 ioadl_flags = 0;
5779         struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
5780         struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
5781         struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl;
5782
5783         length = scsi_bufflen(scsi_cmd);
5784         if (!length)
5785                 return 0;
5786
5787         nseg = scsi_dma_map(scsi_cmd);
5788         if (nseg < 0) {
5789                 dev_err(&ioa_cfg->pdev->dev, "scsi_dma_map failed!\n");
5790                 return -1;
5791         }
5792
5793         ipr_cmd->dma_use_sg = nseg;
5794
5795         if (scsi_cmd->sc_data_direction == DMA_TO_DEVICE) {
5796                 ioadl_flags = IPR_IOADL_FLAGS_WRITE;
5797                 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
5798                 ioarcb->data_transfer_length = cpu_to_be32(length);
5799                 ioarcb->ioadl_len =
5800                         cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
5801         } else if (scsi_cmd->sc_data_direction == DMA_FROM_DEVICE) {
5802                 ioadl_flags = IPR_IOADL_FLAGS_READ;
5803                 ioarcb->read_data_transfer_length = cpu_to_be32(length);
5804                 ioarcb->read_ioadl_len =
5805                         cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
5806         }
5807
5808         if (ipr_cmd->dma_use_sg <= ARRAY_SIZE(ioarcb->u.add_data.u.ioadl)) {
5809                 ioadl = ioarcb->u.add_data.u.ioadl;
5810                 ioarcb->write_ioadl_addr = cpu_to_be32((ipr_cmd->dma_addr) +
5811                                     offsetof(struct ipr_ioarcb, u.add_data));
5812                 ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
5813         }
5814
5815         scsi_for_each_sg(scsi_cmd, sg, ipr_cmd->dma_use_sg, i) {
5816                 ioadl[i].flags_and_data_len =
5817                         cpu_to_be32(ioadl_flags | sg_dma_len(sg));
5818                 ioadl[i].address = cpu_to_be32(sg_dma_address(sg));
5819         }
5820
5821         ioadl[i-1].flags_and_data_len |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
5822         return 0;
5823 }
5824
5825 /**
5826  * ipr_erp_done - Process completion of ERP for a device
5827  * @ipr_cmd:            ipr command struct
5828  *
5829  * This function copies the sense buffer into the scsi_cmd
5830  * struct and pushes the scsi_done function.
5831  *
5832  * Return value:
5833  *      nothing
5834  **/
5835 static void ipr_erp_done(struct ipr_cmnd *ipr_cmd)
5836 {
5837         struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
5838         struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
5839         u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
5840
5841         if (IPR_IOASC_SENSE_KEY(ioasc) > 0) {
5842                 scsi_cmd->result |= (DID_ERROR << 16);
5843                 scmd_printk(KERN_ERR, scsi_cmd,
5844                             "Request Sense failed with IOASC: 0x%08X\n", ioasc);
5845         } else {
5846                 memcpy(scsi_cmd->sense_buffer, ipr_cmd->sense_buffer,
5847                        SCSI_SENSE_BUFFERSIZE);
5848         }
5849
5850         if (res) {
5851                 if (!ipr_is_naca_model(res))
5852                         res->needs_sync_complete = 1;
5853                 res->in_erp = 0;
5854         }
5855         scsi_dma_unmap(ipr_cmd->scsi_cmd);
5856         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
5857         scsi_cmd->scsi_done(scsi_cmd);
5858 }
5859
5860 /**
5861  * ipr_reinit_ipr_cmnd_for_erp - Re-initialize a cmnd block to be used for ERP
5862  * @ipr_cmd:    ipr command struct
5863  *
5864  * Return value:
5865  *      none
5866  **/
5867 static void ipr_reinit_ipr_cmnd_for_erp(struct ipr_cmnd *ipr_cmd)
5868 {
5869         struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
5870         struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
5871         dma_addr_t dma_addr = ipr_cmd->dma_addr;
5872
5873         memset(&ioarcb->cmd_pkt, 0, sizeof(struct ipr_cmd_pkt));
5874         ioarcb->data_transfer_length = 0;
5875         ioarcb->read_data_transfer_length = 0;
5876         ioarcb->ioadl_len = 0;
5877         ioarcb->read_ioadl_len = 0;
5878         ioasa->hdr.ioasc = 0;
5879         ioasa->hdr.residual_data_len = 0;
5880
5881         if (ipr_cmd->ioa_cfg->sis64)
5882                 ioarcb->u.sis64_addr_data.data_ioadl_addr =
5883                         cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ioadl64));
5884         else {
5885                 ioarcb->write_ioadl_addr =
5886                         cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, i.ioadl));
5887                 ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
5888         }
5889 }
5890
5891 /**
5892  * ipr_erp_request_sense - Send request sense to a device
5893  * @ipr_cmd:    ipr command struct
5894  *
5895  * This function sends a request sense to a device as a result
5896  * of a check condition.
5897  *
5898  * Return value:
5899  *      nothing
5900  **/
5901 static void ipr_erp_request_sense(struct ipr_cmnd *ipr_cmd)
5902 {
5903         struct ipr_cmd_pkt *cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
5904         u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
5905
5906         if (IPR_IOASC_SENSE_KEY(ioasc) > 0) {
5907                 ipr_erp_done(ipr_cmd);
5908                 return;
5909         }
5910
5911         ipr_reinit_ipr_cmnd_for_erp(ipr_cmd);
5912
5913         cmd_pkt->request_type = IPR_RQTYPE_SCSICDB;
5914         cmd_pkt->cdb[0] = REQUEST_SENSE;
5915         cmd_pkt->cdb[4] = SCSI_SENSE_BUFFERSIZE;
5916         cmd_pkt->flags_hi |= IPR_FLAGS_HI_SYNC_OVERRIDE;
5917         cmd_pkt->flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
5918         cmd_pkt->timeout = cpu_to_be16(IPR_REQUEST_SENSE_TIMEOUT / HZ);
5919
5920         ipr_init_ioadl(ipr_cmd, ipr_cmd->sense_buffer_dma,
5921                        SCSI_SENSE_BUFFERSIZE, IPR_IOADL_FLAGS_READ_LAST);
5922
5923         ipr_do_req(ipr_cmd, ipr_erp_done, ipr_timeout,
5924                    IPR_REQUEST_SENSE_TIMEOUT * 2);
5925 }
5926
5927 /**
5928  * ipr_erp_cancel_all - Send cancel all to a device
5929  * @ipr_cmd:    ipr command struct
5930  *
5931  * This function sends a cancel all to a device to clear the
5932  * queue. If we are running TCQ on the device, QERR is set to 1,
5933  * which means all outstanding ops have been dropped on the floor.
5934  * Cancel all will return them to us.
5935  *
5936  * Return value:
5937  *      nothing
5938  **/
5939 static void ipr_erp_cancel_all(struct ipr_cmnd *ipr_cmd)
5940 {
5941         struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
5942         struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
5943         struct ipr_cmd_pkt *cmd_pkt;
5944
5945         res->in_erp = 1;
5946
5947         ipr_reinit_ipr_cmnd_for_erp(ipr_cmd);
5948
5949         if (!scsi_cmd->device->simple_tags) {
5950                 ipr_erp_request_sense(ipr_cmd);
5951                 return;
5952         }
5953
5954         cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
5955         cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
5956         cmd_pkt->cdb[0] = IPR_CANCEL_ALL_REQUESTS;
5957
5958         ipr_do_req(ipr_cmd, ipr_erp_request_sense, ipr_timeout,
5959                    IPR_CANCEL_ALL_TIMEOUT);
5960 }
5961
5962 /**
5963  * ipr_dump_ioasa - Dump contents of IOASA
5964  * @ioa_cfg:    ioa config struct
5965  * @ipr_cmd:    ipr command struct
5966  * @res:                resource entry struct
5967  *
5968  * This function is invoked by the interrupt handler when ops
5969  * fail. It will log the IOASA if appropriate. Only called
5970  * for GPDD ops.
5971  *
5972  * Return value:
5973  *      none
5974  **/
5975 static void ipr_dump_ioasa(struct ipr_ioa_cfg *ioa_cfg,
5976                            struct ipr_cmnd *ipr_cmd, struct ipr_resource_entry *res)
5977 {
5978         int i;
5979         u16 data_len;
5980         u32 ioasc, fd_ioasc;
5981         struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
5982         __be32 *ioasa_data = (__be32 *)ioasa;
5983         int error_index;
5984
5985         ioasc = be32_to_cpu(ioasa->hdr.ioasc) & IPR_IOASC_IOASC_MASK;
5986         fd_ioasc = be32_to_cpu(ioasa->hdr.fd_ioasc) & IPR_IOASC_IOASC_MASK;
5987
5988         if (0 == ioasc)
5989                 return;
5990
5991         if (ioa_cfg->log_level < IPR_DEFAULT_LOG_LEVEL)
5992                 return;
5993
5994         if (ioasc == IPR_IOASC_BUS_WAS_RESET && fd_ioasc)
5995                 error_index = ipr_get_error(fd_ioasc);
5996         else
5997                 error_index = ipr_get_error(ioasc);
5998
5999         if (ioa_cfg->log_level < IPR_MAX_LOG_LEVEL) {
6000                 /* Don't log an error if the IOA already logged one */
6001                 if (ioasa->hdr.ilid != 0)
6002                         return;
6003
6004                 if (!ipr_is_gscsi(res))
6005                         return;
6006
6007                 if (ipr_error_table[error_index].log_ioasa == 0)
6008                         return;
6009         }
6010
6011         ipr_res_err(ioa_cfg, res, "%s\n", ipr_error_table[error_index].error);
6012
6013         data_len = be16_to_cpu(ioasa->hdr.ret_stat_len);
6014         if (ioa_cfg->sis64 && sizeof(struct ipr_ioasa64) < data_len)
6015                 data_len = sizeof(struct ipr_ioasa64);
6016         else if (!ioa_cfg->sis64 && sizeof(struct ipr_ioasa) < data_len)
6017                 data_len = sizeof(struct ipr_ioasa);
6018
6019         ipr_err("IOASA Dump:\n");
6020
6021         for (i = 0; i < data_len / 4; i += 4) {
6022                 ipr_err("%08X: %08X %08X %08X %08X\n", i*4,
6023                         be32_to_cpu(ioasa_data[i]),
6024                         be32_to_cpu(ioasa_data[i+1]),
6025                         be32_to_cpu(ioasa_data[i+2]),
6026                         be32_to_cpu(ioasa_data[i+3]));
6027         }
6028 }
6029
6030 /**
6031  * ipr_gen_sense - Generate SCSI sense data from an IOASA
6032  * @ioasa:              IOASA
6033  * @sense_buf:  sense data buffer
6034  *
6035  * Return value:
6036  *      none
6037  **/
6038 static void ipr_gen_sense(struct ipr_cmnd *ipr_cmd)
6039 {
6040         u32 failing_lba;
6041         u8 *sense_buf = ipr_cmd->scsi_cmd->sense_buffer;
6042         struct ipr_resource_entry *res = ipr_cmd->scsi_cmd->device->hostdata;
6043         struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
6044         u32 ioasc = be32_to_cpu(ioasa->hdr.ioasc);
6045
6046         memset(sense_buf, 0, SCSI_SENSE_BUFFERSIZE);
6047
6048         if (ioasc >= IPR_FIRST_DRIVER_IOASC)
6049                 return;
6050
6051         ipr_cmd->scsi_cmd->result = SAM_STAT_CHECK_CONDITION;
6052
6053         if (ipr_is_vset_device(res) &&
6054             ioasc == IPR_IOASC_MED_DO_NOT_REALLOC &&
6055             ioasa->u.vset.failing_lba_hi != 0) {
6056                 sense_buf[0] = 0x72;
6057                 sense_buf[1] = IPR_IOASC_SENSE_KEY(ioasc);
6058                 sense_buf[2] = IPR_IOASC_SENSE_CODE(ioasc);
6059                 sense_buf[3] = IPR_IOASC_SENSE_QUAL(ioasc);
6060
6061                 sense_buf[7] = 12;
6062                 sense_buf[8] = 0;
6063                 sense_buf[9] = 0x0A;
6064                 sense_buf[10] = 0x80;
6065
6066                 failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_hi);
6067
6068                 sense_buf[12] = (failing_lba & 0xff000000) >> 24;
6069                 sense_buf[13] = (failing_lba & 0x00ff0000) >> 16;
6070                 sense_buf[14] = (failing_lba & 0x0000ff00) >> 8;
6071                 sense_buf[15] = failing_lba & 0x000000ff;
6072
6073                 failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_lo);
6074
6075                 sense_buf[16] = (failing_lba & 0xff000000) >> 24;
6076                 sense_buf[17] = (failing_lba & 0x00ff0000) >> 16;
6077                 sense_buf[18] = (failing_lba & 0x0000ff00) >> 8;
6078                 sense_buf[19] = failing_lba & 0x000000ff;
6079         } else {
6080                 sense_buf[0] = 0x70;
6081                 sense_buf[2] = IPR_IOASC_SENSE_KEY(ioasc);
6082                 sense_buf[12] = IPR_IOASC_SENSE_CODE(ioasc);
6083                 sense_buf[13] = IPR_IOASC_SENSE_QUAL(ioasc);
6084
6085                 /* Illegal request */
6086                 if ((IPR_IOASC_SENSE_KEY(ioasc) == 0x05) &&
6087                     (be32_to_cpu(ioasa->hdr.ioasc_specific) & IPR_FIELD_POINTER_VALID)) {
6088                         sense_buf[7] = 10;      /* additional length */
6089
6090                         /* IOARCB was in error */
6091                         if (IPR_IOASC_SENSE_CODE(ioasc) == 0x24)
6092                                 sense_buf[15] = 0xC0;
6093                         else    /* Parameter data was invalid */
6094                                 sense_buf[15] = 0x80;
6095
6096                         sense_buf[16] =
6097                             ((IPR_FIELD_POINTER_MASK &
6098                               be32_to_cpu(ioasa->hdr.ioasc_specific)) >> 8) & 0xff;
6099                         sense_buf[17] =
6100                             (IPR_FIELD_POINTER_MASK &
6101                              be32_to_cpu(ioasa->hdr.ioasc_specific)) & 0xff;
6102                 } else {
6103                         if (ioasc == IPR_IOASC_MED_DO_NOT_REALLOC) {
6104                                 if (ipr_is_vset_device(res))
6105                                         failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_lo);
6106                                 else
6107                                         failing_lba = be32_to_cpu(ioasa->u.dasd.failing_lba);
6108
6109                                 sense_buf[0] |= 0x80;   /* Or in the Valid bit */
6110                                 sense_buf[3] = (failing_lba & 0xff000000) >> 24;
6111                                 sense_buf[4] = (failing_lba & 0x00ff0000) >> 16;
6112                                 sense_buf[5] = (failing_lba & 0x0000ff00) >> 8;
6113                                 sense_buf[6] = failing_lba & 0x000000ff;
6114                         }
6115
6116                         sense_buf[7] = 6;       /* additional length */
6117                 }
6118         }
6119 }
6120
6121 /**
6122  * ipr_get_autosense - Copy autosense data to sense buffer
6123  * @ipr_cmd:    ipr command struct
6124  *
6125  * This function copies the autosense buffer to the buffer
6126  * in the scsi_cmd, if there is autosense available.
6127  *
6128  * Return value:
6129  *      1 if autosense was available / 0 if not
6130  **/
6131 static int ipr_get_autosense(struct ipr_cmnd *ipr_cmd)
6132 {
6133         struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
6134         struct ipr_ioasa64 *ioasa64 = &ipr_cmd->s.ioasa64;
6135
6136         if ((be32_to_cpu(ioasa->hdr.ioasc_specific) & IPR_AUTOSENSE_VALID) == 0)
6137                 return 0;
6138
6139         if (ipr_cmd->ioa_cfg->sis64)
6140                 memcpy(ipr_cmd->scsi_cmd->sense_buffer, ioasa64->auto_sense.data,
6141                        min_t(u16, be16_to_cpu(ioasa64->auto_sense.auto_sense_len),
6142                            SCSI_SENSE_BUFFERSIZE));
6143         else
6144                 memcpy(ipr_cmd->scsi_cmd->sense_buffer, ioasa->auto_sense.data,
6145                        min_t(u16, be16_to_cpu(ioasa->auto_sense.auto_sense_len),
6146                            SCSI_SENSE_BUFFERSIZE));
6147         return 1;
6148 }
6149
6150 /**
6151  * ipr_erp_start - Process an error response for a SCSI op
6152  * @ioa_cfg:    ioa config struct
6153  * @ipr_cmd:    ipr command struct
6154  *
6155  * This function determines whether or not to initiate ERP
6156  * on the affected device.
6157  *
6158  * Return value:
6159  *      nothing
6160  **/
6161 static void ipr_erp_start(struct ipr_ioa_cfg *ioa_cfg,
6162                               struct ipr_cmnd *ipr_cmd)
6163 {
6164         struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
6165         struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
6166         u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
6167         u32 masked_ioasc = ioasc & IPR_IOASC_IOASC_MASK;
6168
6169         if (!res) {
6170                 ipr_scsi_eh_done(ipr_cmd);
6171                 return;
6172         }
6173
6174         if (!ipr_is_gscsi(res) && masked_ioasc != IPR_IOASC_HW_DEV_BUS_STATUS)
6175                 ipr_gen_sense(ipr_cmd);
6176
6177         ipr_dump_ioasa(ioa_cfg, ipr_cmd, res);
6178
6179         switch (masked_ioasc) {
6180         case IPR_IOASC_ABORTED_CMD_TERM_BY_HOST:
6181                 if (ipr_is_naca_model(res))
6182                         scsi_cmd->result |= (DID_ABORT << 16);
6183                 else
6184                         scsi_cmd->result |= (DID_IMM_RETRY << 16);
6185                 break;
6186         case IPR_IOASC_IR_RESOURCE_HANDLE:
6187         case IPR_IOASC_IR_NO_CMDS_TO_2ND_IOA:
6188                 scsi_cmd->result |= (DID_NO_CONNECT << 16);
6189                 break;
6190         case IPR_IOASC_HW_SEL_TIMEOUT:
6191                 scsi_cmd->result |= (DID_NO_CONNECT << 16);
6192                 if (!ipr_is_naca_model(res))
6193                         res->needs_sync_complete = 1;
6194                 break;
6195         case IPR_IOASC_SYNC_REQUIRED:
6196                 if (!res->in_erp)
6197                         res->needs_sync_complete = 1;
6198                 scsi_cmd->result |= (DID_IMM_RETRY << 16);
6199                 break;
6200         case IPR_IOASC_MED_DO_NOT_REALLOC: /* prevent retries */
6201         case IPR_IOASA_IR_DUAL_IOA_DISABLED:
6202                 scsi_cmd->result |= (DID_PASSTHROUGH << 16);
6203                 break;
6204         case IPR_IOASC_BUS_WAS_RESET:
6205         case IPR_IOASC_BUS_WAS_RESET_BY_OTHER:
6206                 /*
6207                  * Report the bus reset and ask for a retry. The device
6208                  * will give CC/UA the next command.
6209                  */
6210                 if (!res->resetting_device)
6211                         scsi_report_bus_reset(ioa_cfg->host, scsi_cmd->device->channel);
6212                 scsi_cmd->result |= (DID_ERROR << 16);
6213                 if (!ipr_is_naca_model(res))
6214                         res->needs_sync_complete = 1;
6215                 break;
6216         case IPR_IOASC_HW_DEV_BUS_STATUS:
6217                 scsi_cmd->result |= IPR_IOASC_SENSE_STATUS(ioasc);
6218                 if (IPR_IOASC_SENSE_STATUS(ioasc) == SAM_STAT_CHECK_CONDITION) {
6219                         if (!ipr_get_autosense(ipr_cmd)) {
6220                                 if (!ipr_is_naca_model(res)) {
6221                                         ipr_erp_cancel_all(ipr_cmd);
6222                                         return;
6223                                 }
6224                         }
6225                 }
6226                 if (!ipr_is_naca_model(res))
6227                         res->needs_sync_complete = 1;
6228                 break;
6229         case IPR_IOASC_NR_INIT_CMD_REQUIRED:
6230                 break;
6231         case IPR_IOASC_IR_NON_OPTIMIZED:
6232                 if (res->raw_mode) {
6233                         res->raw_mode = 0;
6234                         scsi_cmd->result |= (DID_IMM_RETRY << 16);
6235                 } else
6236                         scsi_cmd->result |= (DID_ERROR << 16);
6237                 break;
6238         default:
6239                 if (IPR_IOASC_SENSE_KEY(ioasc) > RECOVERED_ERROR)
6240                         scsi_cmd->result |= (DID_ERROR << 16);
6241                 if (!ipr_is_vset_device(res) && !ipr_is_naca_model(res))
6242                         res->needs_sync_complete = 1;
6243                 break;
6244         }
6245
6246         scsi_dma_unmap(ipr_cmd->scsi_cmd);
6247         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
6248         scsi_cmd->scsi_done(scsi_cmd);
6249 }
6250
6251 /**
6252  * ipr_scsi_done - mid-layer done function
6253  * @ipr_cmd:    ipr command struct
6254  *
6255  * This function is invoked by the interrupt handler for
6256  * ops generated by the SCSI mid-layer
6257  *
6258  * Return value:
6259  *      none
6260  **/
6261 static void ipr_scsi_done(struct ipr_cmnd *ipr_cmd)
6262 {
6263         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6264         struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
6265         u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
6266         unsigned long hrrq_flags;
6267
6268         scsi_set_resid(scsi_cmd, be32_to_cpu(ipr_cmd->s.ioasa.hdr.residual_data_len));
6269
6270         if (likely(IPR_IOASC_SENSE_KEY(ioasc) == 0)) {
6271                 scsi_dma_unmap(scsi_cmd);
6272
6273                 spin_lock_irqsave(ipr_cmd->hrrq->lock, hrrq_flags);
6274                 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
6275                 scsi_cmd->scsi_done(scsi_cmd);
6276                 spin_unlock_irqrestore(ipr_cmd->hrrq->lock, hrrq_flags);
6277         } else {
6278                 spin_lock_irqsave(ipr_cmd->hrrq->lock, hrrq_flags);
6279                 ipr_erp_start(ioa_cfg, ipr_cmd);
6280                 spin_unlock_irqrestore(ipr_cmd->hrrq->lock, hrrq_flags);
6281         }
6282 }
6283
6284 /**
6285  * ipr_queuecommand - Queue a mid-layer request
6286  * @shost:              scsi host struct
6287  * @scsi_cmd:   scsi command struct
6288  *
6289  * This function queues a request generated by the mid-layer.
6290  *
6291  * Return value:
6292  *      0 on success
6293  *      SCSI_MLQUEUE_DEVICE_BUSY if device is busy
6294  *      SCSI_MLQUEUE_HOST_BUSY if host is busy
6295  **/
6296 static int ipr_queuecommand(struct Scsi_Host *shost,
6297                             struct scsi_cmnd *scsi_cmd)
6298 {
6299         struct ipr_ioa_cfg *ioa_cfg;
6300         struct ipr_resource_entry *res;
6301         struct ipr_ioarcb *ioarcb;
6302         struct ipr_cmnd *ipr_cmd;
6303         unsigned long hrrq_flags, lock_flags;
6304         int rc;
6305         struct ipr_hrr_queue *hrrq;
6306         int hrrq_id;
6307
6308         ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
6309
6310         scsi_cmd->result = (DID_OK << 16);
6311         res = scsi_cmd->device->hostdata;
6312
6313         if (ipr_is_gata(res) && res->sata_port) {
6314                 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
6315                 rc = ata_sas_queuecmd(scsi_cmd, res->sata_port->ap);
6316                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
6317                 return rc;
6318         }
6319
6320         hrrq_id = ipr_get_hrrq_index(ioa_cfg);
6321         hrrq = &ioa_cfg->hrrq[hrrq_id];
6322
6323         spin_lock_irqsave(hrrq->lock, hrrq_flags);
6324         /*
6325          * We are currently blocking all devices due to a host reset
6326          * We have told the host to stop giving us new requests, but
6327          * ERP ops don't count. FIXME
6328          */
6329         if (unlikely(!hrrq->allow_cmds && !hrrq->ioa_is_dead && !hrrq->removing_ioa)) {
6330                 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
6331                 return SCSI_MLQUEUE_HOST_BUSY;
6332         }
6333
6334         /*
6335          * FIXME - Create scsi_set_host_offline interface
6336          *  and the ioa_is_dead check can be removed
6337          */
6338         if (unlikely(hrrq->ioa_is_dead || hrrq->removing_ioa || !res)) {
6339                 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
6340                 goto err_nodev;
6341         }
6342
6343         ipr_cmd = __ipr_get_free_ipr_cmnd(hrrq);
6344         if (ipr_cmd == NULL) {
6345                 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
6346                 return SCSI_MLQUEUE_HOST_BUSY;
6347         }
6348         spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
6349
6350         ipr_init_ipr_cmnd(ipr_cmd, ipr_scsi_done);
6351         ioarcb = &ipr_cmd->ioarcb;
6352
6353         memcpy(ioarcb->cmd_pkt.cdb, scsi_cmd->cmnd, scsi_cmd->cmd_len);
6354         ipr_cmd->scsi_cmd = scsi_cmd;
6355         ipr_cmd->done = ipr_scsi_eh_done;
6356
6357         if (ipr_is_gscsi(res) || ipr_is_vset_device(res)) {
6358                 if (scsi_cmd->underflow == 0)
6359                         ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
6360
6361                 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_LINK_DESC;
6362                 if (ipr_is_gscsi(res) && res->reset_occurred) {
6363                         res->reset_occurred = 0;
6364                         ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_DELAY_AFTER_RST;
6365                 }
6366                 ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_ALIGNED_BFR;
6367                 if (scsi_cmd->flags & SCMD_TAGGED)
6368                         ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_SIMPLE_TASK;
6369                 else
6370                         ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_UNTAGGED_TASK;
6371         }
6372
6373         if (scsi_cmd->cmnd[0] >= 0xC0 &&
6374             (!ipr_is_gscsi(res) || scsi_cmd->cmnd[0] == IPR_QUERY_RSRC_STATE)) {
6375                 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
6376         }
6377         if (res->raw_mode && ipr_is_af_dasd_device(res))
6378                 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_PIPE;
6379
6380         if (ioa_cfg->sis64)
6381                 rc = ipr_build_ioadl64(ioa_cfg, ipr_cmd);
6382         else
6383                 rc = ipr_build_ioadl(ioa_cfg, ipr_cmd);
6384
6385         spin_lock_irqsave(hrrq->lock, hrrq_flags);
6386         if (unlikely(rc || (!hrrq->allow_cmds && !hrrq->ioa_is_dead))) {
6387                 list_add_tail(&ipr_cmd->queue, &hrrq->hrrq_free_q);
6388                 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
6389                 if (!rc)
6390                         scsi_dma_unmap(scsi_cmd);
6391                 return SCSI_MLQUEUE_HOST_BUSY;
6392         }
6393
6394         if (unlikely(hrrq->ioa_is_dead)) {
6395                 list_add_tail(&ipr_cmd->queue, &hrrq->hrrq_free_q);
6396                 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
6397                 scsi_dma_unmap(scsi_cmd);
6398                 goto err_nodev;
6399         }
6400
6401         ioarcb->res_handle = res->res_handle;
6402         if (res->needs_sync_complete) {
6403                 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_SYNC_COMPLETE;
6404                 res->needs_sync_complete = 0;
6405         }
6406         list_add_tail(&ipr_cmd->queue, &hrrq->hrrq_pending_q);
6407         ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_GET_RES_PHYS_LOC(res));
6408         ipr_send_command(ipr_cmd);
6409         spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
6410         return 0;
6411
6412 err_nodev:
6413         spin_lock_irqsave(hrrq->lock, hrrq_flags);
6414         memset(scsi_cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
6415         scsi_cmd->result = (DID_NO_CONNECT << 16);
6416         scsi_cmd->scsi_done(scsi_cmd);
6417         spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
6418         return 0;
6419 }
6420
6421 /**
6422  * ipr_ioctl - IOCTL handler
6423  * @sdev:       scsi device struct
6424  * @cmd:        IOCTL cmd
6425  * @arg:        IOCTL arg
6426  *
6427  * Return value:
6428  *      0 on success / other on failure
6429  **/
6430 static int ipr_ioctl(struct scsi_device *sdev, int cmd, void __user *arg)
6431 {
6432         struct ipr_resource_entry *res;
6433
6434         res = (struct ipr_resource_entry *)sdev->hostdata;
6435         if (res && ipr_is_gata(res)) {
6436                 if (cmd == HDIO_GET_IDENTITY)
6437                         return -ENOTTY;
6438                 return ata_sas_scsi_ioctl(res->sata_port->ap, sdev, cmd, arg);
6439         }
6440
6441         return -EINVAL;
6442 }
6443
6444 /**
6445  * ipr_info - Get information about the card/driver
6446  * @scsi_host:  scsi host struct
6447  *
6448  * Return value:
6449  *      pointer to buffer with description string
6450  **/
6451 static const char *ipr_ioa_info(struct Scsi_Host *host)
6452 {
6453         static char buffer[512];
6454         struct ipr_ioa_cfg *ioa_cfg;
6455         unsigned long lock_flags = 0;
6456
6457         ioa_cfg = (struct ipr_ioa_cfg *) host->hostdata;
6458
6459         spin_lock_irqsave(host->host_lock, lock_flags);
6460         sprintf(buffer, "IBM %X Storage Adapter", ioa_cfg->type);
6461         spin_unlock_irqrestore(host->host_lock, lock_flags);
6462
6463         return buffer;
6464 }
6465
6466 static struct scsi_host_template driver_template = {
6467         .module = THIS_MODULE,
6468         .name = "IPR",
6469         .info = ipr_ioa_info,
6470         .ioctl = ipr_ioctl,
6471         .queuecommand = ipr_queuecommand,
6472         .eh_abort_handler = ipr_eh_abort,
6473         .eh_device_reset_handler = ipr_eh_dev_reset,
6474         .eh_host_reset_handler = ipr_eh_host_reset,
6475         .slave_alloc = ipr_slave_alloc,
6476         .slave_configure = ipr_slave_configure,
6477         .slave_destroy = ipr_slave_destroy,
6478         .scan_finished = ipr_scan_finished,
6479         .target_alloc = ipr_target_alloc,
6480         .target_destroy = ipr_target_destroy,
6481         .change_queue_depth = ipr_change_queue_depth,
6482         .bios_param = ipr_biosparam,
6483         .can_queue = IPR_MAX_COMMANDS,
6484         .this_id = -1,
6485         .sg_tablesize = IPR_MAX_SGLIST,
6486         .max_sectors = IPR_IOA_MAX_SECTORS,
6487         .cmd_per_lun = IPR_MAX_CMD_PER_LUN,
6488         .use_clustering = ENABLE_CLUSTERING,
6489         .shost_attrs = ipr_ioa_attrs,
6490         .sdev_attrs = ipr_dev_attrs,
6491         .proc_name = IPR_NAME,
6492         .use_blk_tags = 1,
6493 };
6494
6495 /**
6496  * ipr_ata_phy_reset - libata phy_reset handler
6497  * @ap:         ata port to reset
6498  *
6499  **/
6500 static void ipr_ata_phy_reset(struct ata_port *ap)
6501 {
6502         unsigned long flags;
6503         struct ipr_sata_port *sata_port = ap->private_data;
6504         struct ipr_resource_entry *res = sata_port->res;
6505         struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
6506         int rc;
6507
6508         ENTER;
6509         spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
6510         while (ioa_cfg->in_reset_reload) {
6511                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
6512                 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
6513                 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
6514         }
6515
6516         if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds)
6517                 goto out_unlock;
6518
6519         rc = ipr_device_reset(ioa_cfg, res);
6520
6521         if (rc) {
6522                 ap->link.device[0].class = ATA_DEV_NONE;
6523                 goto out_unlock;
6524         }
6525
6526         ap->link.device[0].class = res->ata_class;
6527         if (ap->link.device[0].class == ATA_DEV_UNKNOWN)
6528                 ap->link.device[0].class = ATA_DEV_NONE;
6529
6530 out_unlock:
6531         spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
6532         LEAVE;
6533 }
6534
6535 /**
6536  * ipr_ata_post_internal - Cleanup after an internal command
6537  * @qc: ATA queued command
6538  *
6539  * Return value:
6540  *      none
6541  **/
6542 static void ipr_ata_post_internal(struct ata_queued_cmd *qc)
6543 {
6544         struct ipr_sata_port *sata_port = qc->ap->private_data;
6545         struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
6546         struct ipr_cmnd *ipr_cmd;
6547         struct ipr_hrr_queue *hrrq;
6548         unsigned long flags;
6549
6550         spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
6551         while (ioa_cfg->in_reset_reload) {
6552                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
6553                 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
6554                 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
6555         }
6556
6557         for_each_hrrq(hrrq, ioa_cfg) {
6558                 spin_lock(&hrrq->_lock);
6559                 list_for_each_entry(ipr_cmd, &hrrq->hrrq_pending_q, queue) {
6560                         if (ipr_cmd->qc == qc) {
6561                                 ipr_device_reset(ioa_cfg, sata_port->res);
6562                                 break;
6563                         }
6564                 }
6565                 spin_unlock(&hrrq->_lock);
6566         }
6567         spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
6568 }
6569
6570 /**
6571  * ipr_copy_sata_tf - Copy a SATA taskfile to an IOA data structure
6572  * @regs:       destination
6573  * @tf: source ATA taskfile
6574  *
6575  * Return value:
6576  *      none
6577  **/
6578 static void ipr_copy_sata_tf(struct ipr_ioarcb_ata_regs *regs,
6579                              struct ata_taskfile *tf)
6580 {
6581         regs->feature = tf->feature;
6582         regs->nsect = tf->nsect;
6583         regs->lbal = tf->lbal;
6584         regs->lbam = tf->lbam;
6585         regs->lbah = tf->lbah;
6586         regs->device = tf->device;
6587         regs->command = tf->command;
6588         regs->hob_feature = tf->hob_feature;
6589         regs->hob_nsect = tf->hob_nsect;
6590         regs->hob_lbal = tf->hob_lbal;
6591         regs->hob_lbam = tf->hob_lbam;
6592         regs->hob_lbah = tf->hob_lbah;
6593         regs->ctl = tf->ctl;
6594 }
6595
6596 /**
6597  * ipr_sata_done - done function for SATA commands
6598  * @ipr_cmd:    ipr command struct
6599  *
6600  * This function is invoked by the interrupt handler for
6601  * ops generated by the SCSI mid-layer to SATA devices
6602  *
6603  * Return value:
6604  *      none
6605  **/
6606 static void ipr_sata_done(struct ipr_cmnd *ipr_cmd)
6607 {
6608         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6609         struct ata_queued_cmd *qc = ipr_cmd->qc;
6610         struct ipr_sata_port *sata_port = qc->ap->private_data;
6611         struct ipr_resource_entry *res = sata_port->res;
6612         u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
6613
6614         spin_lock(&ipr_cmd->hrrq->_lock);
6615         if (ipr_cmd->ioa_cfg->sis64)
6616                 memcpy(&sata_port->ioasa, &ipr_cmd->s.ioasa64.u.gata,
6617                        sizeof(struct ipr_ioasa_gata));
6618         else
6619                 memcpy(&sata_port->ioasa, &ipr_cmd->s.ioasa.u.gata,
6620                        sizeof(struct ipr_ioasa_gata));
6621         ipr_dump_ioasa(ioa_cfg, ipr_cmd, res);
6622
6623         if (be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc_specific) & IPR_ATA_DEVICE_WAS_RESET)
6624                 scsi_report_device_reset(ioa_cfg->host, res->bus, res->target);
6625
6626         if (IPR_IOASC_SENSE_KEY(ioasc) > RECOVERED_ERROR)
6627                 qc->err_mask |= __ac_err_mask(sata_port->ioasa.status);
6628         else
6629                 qc->err_mask |= ac_err_mask(sata_port->ioasa.status);
6630         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
6631         spin_unlock(&ipr_cmd->hrrq->_lock);
6632         ata_qc_complete(qc);
6633 }
6634
6635 /**
6636  * ipr_build_ata_ioadl64 - Build an ATA scatter/gather list
6637  * @ipr_cmd:    ipr command struct
6638  * @qc:         ATA queued command
6639  *
6640  **/
6641 static void ipr_build_ata_ioadl64(struct ipr_cmnd *ipr_cmd,
6642                                   struct ata_queued_cmd *qc)
6643 {
6644         u32 ioadl_flags = 0;
6645         struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
6646         struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ata_ioadl.ioadl64;
6647         struct ipr_ioadl64_desc *last_ioadl64 = NULL;
6648         int len = qc->nbytes;
6649         struct scatterlist *sg;
6650         unsigned int si;
6651         dma_addr_t dma_addr = ipr_cmd->dma_addr;
6652
6653         if (len == 0)
6654                 return;
6655
6656         if (qc->dma_dir == DMA_TO_DEVICE) {
6657                 ioadl_flags = IPR_IOADL_FLAGS_WRITE;
6658                 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
6659         } else if (qc->dma_dir == DMA_FROM_DEVICE)
6660                 ioadl_flags = IPR_IOADL_FLAGS_READ;
6661
6662         ioarcb->data_transfer_length = cpu_to_be32(len);
6663         ioarcb->ioadl_len =
6664                 cpu_to_be32(sizeof(struct ipr_ioadl64_desc) * ipr_cmd->dma_use_sg);
6665         ioarcb->u.sis64_addr_data.data_ioadl_addr =
6666                 cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ata_ioadl.ioadl64));
6667
6668         for_each_sg(qc->sg, sg, qc->n_elem, si) {
6669                 ioadl64->flags = cpu_to_be32(ioadl_flags);
6670                 ioadl64->data_len = cpu_to_be32(sg_dma_len(sg));
6671                 ioadl64->address = cpu_to_be64(sg_dma_address(sg));
6672
6673                 last_ioadl64 = ioadl64;
6674                 ioadl64++;
6675         }
6676
6677         if (likely(last_ioadl64))
6678                 last_ioadl64->flags |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
6679 }
6680
6681 /**
6682  * ipr_build_ata_ioadl - Build an ATA scatter/gather list
6683  * @ipr_cmd:    ipr command struct
6684  * @qc:         ATA queued command
6685  *
6686  **/
6687 static void ipr_build_ata_ioadl(struct ipr_cmnd *ipr_cmd,
6688                                 struct ata_queued_cmd *qc)
6689 {
6690         u32 ioadl_flags = 0;
6691         struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
6692         struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl;
6693         struct ipr_ioadl_desc *last_ioadl = NULL;
6694         int len = qc->nbytes;
6695         struct scatterlist *sg;
6696         unsigned int si;
6697
6698         if (len == 0)
6699                 return;
6700
6701         if (qc->dma_dir == DMA_TO_DEVICE) {
6702                 ioadl_flags = IPR_IOADL_FLAGS_WRITE;
6703                 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
6704                 ioarcb->data_transfer_length = cpu_to_be32(len);
6705                 ioarcb->ioadl_len =
6706                         cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
6707         } else if (qc->dma_dir == DMA_FROM_DEVICE) {
6708                 ioadl_flags = IPR_IOADL_FLAGS_READ;
6709                 ioarcb->read_data_transfer_length = cpu_to_be32(len);
6710                 ioarcb->read_ioadl_len =
6711                         cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
6712         }
6713
6714         for_each_sg(qc->sg, sg, qc->n_elem, si) {
6715                 ioadl->flags_and_data_len = cpu_to_be32(ioadl_flags | sg_dma_len(sg));
6716                 ioadl->address = cpu_to_be32(sg_dma_address(sg));
6717
6718                 last_ioadl = ioadl;
6719                 ioadl++;
6720         }
6721
6722         if (likely(last_ioadl))
6723                 last_ioadl->flags_and_data_len |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
6724 }
6725
6726 /**
6727  * ipr_qc_defer - Get a free ipr_cmd
6728  * @qc: queued command
6729  *
6730  * Return value:
6731  *      0 if success
6732  **/
6733 static int ipr_qc_defer(struct ata_queued_cmd *qc)
6734 {
6735         struct ata_port *ap = qc->ap;
6736         struct ipr_sata_port *sata_port = ap->private_data;
6737         struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
6738         struct ipr_cmnd *ipr_cmd;
6739         struct ipr_hrr_queue *hrrq;
6740         int hrrq_id;
6741
6742         hrrq_id = ipr_get_hrrq_index(ioa_cfg);
6743         hrrq = &ioa_cfg->hrrq[hrrq_id];
6744
6745         qc->lldd_task = NULL;
6746         spin_lock(&hrrq->_lock);
6747         if (unlikely(hrrq->ioa_is_dead)) {
6748                 spin_unlock(&hrrq->_lock);
6749                 return 0;
6750         }
6751
6752         if (unlikely(!hrrq->allow_cmds)) {
6753                 spin_unlock(&hrrq->_lock);
6754                 return ATA_DEFER_LINK;
6755         }
6756
6757         ipr_cmd = __ipr_get_free_ipr_cmnd(hrrq);
6758         if (ipr_cmd == NULL) {
6759                 spin_unlock(&hrrq->_lock);
6760                 return ATA_DEFER_LINK;
6761         }
6762
6763         qc->lldd_task = ipr_cmd;
6764         spin_unlock(&hrrq->_lock);
6765         return 0;
6766 }
6767
6768 /**
6769  * ipr_qc_issue - Issue a SATA qc to a device
6770  * @qc: queued command
6771  *
6772  * Return value:
6773  *      0 if success
6774  **/
6775 static unsigned int ipr_qc_issue(struct ata_queued_cmd *qc)
6776 {
6777         struct ata_port *ap = qc->ap;
6778         struct ipr_sata_port *sata_port = ap->private_data;
6779         struct ipr_resource_entry *res = sata_port->res;
6780         struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
6781         struct ipr_cmnd *ipr_cmd;
6782         struct ipr_ioarcb *ioarcb;
6783         struct ipr_ioarcb_ata_regs *regs;
6784
6785         if (qc->lldd_task == NULL)
6786                 ipr_qc_defer(qc);
6787
6788         ipr_cmd = qc->lldd_task;
6789         if (ipr_cmd == NULL)
6790                 return AC_ERR_SYSTEM;
6791
6792         qc->lldd_task = NULL;
6793         spin_lock(&ipr_cmd->hrrq->_lock);
6794         if (unlikely(!ipr_cmd->hrrq->allow_cmds ||
6795                         ipr_cmd->hrrq->ioa_is_dead)) {
6796                 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
6797                 spin_unlock(&ipr_cmd->hrrq->_lock);
6798                 return AC_ERR_SYSTEM;
6799         }
6800
6801         ipr_init_ipr_cmnd(ipr_cmd, ipr_lock_and_done);
6802         ioarcb = &ipr_cmd->ioarcb;
6803
6804         if (ioa_cfg->sis64) {
6805                 regs = &ipr_cmd->i.ata_ioadl.regs;
6806                 ioarcb->add_cmd_parms_offset = cpu_to_be16(sizeof(*ioarcb));
6807         } else
6808                 regs = &ioarcb->u.add_data.u.regs;
6809
6810         memset(regs, 0, sizeof(*regs));
6811         ioarcb->add_cmd_parms_len = cpu_to_be16(sizeof(*regs));
6812
6813         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
6814         ipr_cmd->qc = qc;
6815         ipr_cmd->done = ipr_sata_done;
6816         ipr_cmd->ioarcb.res_handle = res->res_handle;
6817         ioarcb->cmd_pkt.request_type = IPR_RQTYPE_ATA_PASSTHRU;
6818         ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_LINK_DESC;
6819         ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
6820         ipr_cmd->dma_use_sg = qc->n_elem;
6821
6822         if (ioa_cfg->sis64)
6823                 ipr_build_ata_ioadl64(ipr_cmd, qc);
6824         else
6825                 ipr_build_ata_ioadl(ipr_cmd, qc);
6826
6827         regs->flags |= IPR_ATA_FLAG_STATUS_ON_GOOD_COMPLETION;
6828         ipr_copy_sata_tf(regs, &qc->tf);
6829         memcpy(ioarcb->cmd_pkt.cdb, qc->cdb, IPR_MAX_CDB_LEN);
6830         ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_GET_RES_PHYS_LOC(res));
6831
6832         switch (qc->tf.protocol) {
6833         case ATA_PROT_NODATA:
6834         case ATA_PROT_PIO:
6835                 break;
6836
6837         case ATA_PROT_DMA:
6838                 regs->flags |= IPR_ATA_FLAG_XFER_TYPE_DMA;
6839                 break;
6840
6841         case ATAPI_PROT_PIO:
6842         case ATAPI_PROT_NODATA:
6843                 regs->flags |= IPR_ATA_FLAG_PACKET_CMD;
6844                 break;
6845
6846         case ATAPI_PROT_DMA:
6847                 regs->flags |= IPR_ATA_FLAG_PACKET_CMD;
6848                 regs->flags |= IPR_ATA_FLAG_XFER_TYPE_DMA;
6849                 break;
6850
6851         default:
6852                 WARN_ON(1);
6853                 spin_unlock(&ipr_cmd->hrrq->_lock);
6854                 return AC_ERR_INVALID;
6855         }
6856
6857         ipr_send_command(ipr_cmd);
6858         spin_unlock(&ipr_cmd->hrrq->_lock);
6859
6860         return 0;
6861 }
6862
6863 /**
6864  * ipr_qc_fill_rtf - Read result TF
6865  * @qc: ATA queued command
6866  *
6867  * Return value:
6868  *      true
6869  **/
6870 static bool ipr_qc_fill_rtf(struct ata_queued_cmd *qc)
6871 {
6872         struct ipr_sata_port *sata_port = qc->ap->private_data;
6873         struct ipr_ioasa_gata *g = &sata_port->ioasa;
6874         struct ata_taskfile *tf = &qc->result_tf;
6875
6876         tf->feature = g->error;
6877         tf->nsect = g->nsect;
6878         tf->lbal = g->lbal;
6879         tf->lbam = g->lbam;
6880         tf->lbah = g->lbah;
6881         tf->device = g->device;
6882         tf->command = g->status;
6883         tf->hob_nsect = g->hob_nsect;
6884         tf->hob_lbal = g->hob_lbal;
6885         tf->hob_lbam = g->hob_lbam;
6886         tf->hob_lbah = g->hob_lbah;
6887
6888         return true;
6889 }
6890
6891 static struct ata_port_operations ipr_sata_ops = {
6892         .phy_reset = ipr_ata_phy_reset,
6893         .hardreset = ipr_sata_reset,
6894         .post_internal_cmd = ipr_ata_post_internal,
6895         .qc_prep = ata_noop_qc_prep,
6896         .qc_defer = ipr_qc_defer,
6897         .qc_issue = ipr_qc_issue,
6898         .qc_fill_rtf = ipr_qc_fill_rtf,
6899         .port_start = ata_sas_port_start,
6900         .port_stop = ata_sas_port_stop
6901 };
6902
6903 static struct ata_port_info sata_port_info = {
6904         .flags          = ATA_FLAG_SATA | ATA_FLAG_PIO_DMA |
6905                           ATA_FLAG_SAS_HOST,
6906         .pio_mask       = ATA_PIO4_ONLY,
6907         .mwdma_mask     = ATA_MWDMA2,
6908         .udma_mask      = ATA_UDMA6,
6909         .port_ops       = &ipr_sata_ops
6910 };
6911
6912 #ifdef CONFIG_PPC_PSERIES
6913 static const u16 ipr_blocked_processors[] = {
6914         PVR_NORTHSTAR,
6915         PVR_PULSAR,
6916         PVR_POWER4,
6917         PVR_ICESTAR,
6918         PVR_SSTAR,
6919         PVR_POWER4p,
6920         PVR_630,
6921         PVR_630p
6922 };
6923
6924 /**
6925  * ipr_invalid_adapter - Determine if this adapter is supported on this hardware
6926  * @ioa_cfg:    ioa cfg struct
6927  *
6928  * Adapters that use Gemstone revision < 3.1 do not work reliably on
6929  * certain pSeries hardware. This function determines if the given
6930  * adapter is in one of these confgurations or not.
6931  *
6932  * Return value:
6933  *      1 if adapter is not supported / 0 if adapter is supported
6934  **/
6935 static int ipr_invalid_adapter(struct ipr_ioa_cfg *ioa_cfg)
6936 {
6937         int i;
6938
6939         if ((ioa_cfg->type == 0x5702) && (ioa_cfg->pdev->revision < 4)) {
6940                 for (i = 0; i < ARRAY_SIZE(ipr_blocked_processors); i++) {
6941                         if (pvr_version_is(ipr_blocked_processors[i]))
6942                                 return 1;
6943                 }
6944         }
6945         return 0;
6946 }
6947 #else
6948 #define ipr_invalid_adapter(ioa_cfg) 0
6949 #endif
6950
6951 /**
6952  * ipr_ioa_bringdown_done - IOA bring down completion.
6953  * @ipr_cmd:    ipr command struct
6954  *
6955  * This function processes the completion of an adapter bring down.
6956  * It wakes any reset sleepers.
6957  *
6958  * Return value:
6959  *      IPR_RC_JOB_RETURN
6960  **/
6961 static int ipr_ioa_bringdown_done(struct ipr_cmnd *ipr_cmd)
6962 {
6963         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6964         int i;
6965
6966         ENTER;
6967         if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].removing_ioa) {
6968                 ipr_trace;
6969                 spin_unlock_irq(ioa_cfg->host->host_lock);
6970                 scsi_unblock_requests(ioa_cfg->host);
6971                 spin_lock_irq(ioa_cfg->host->host_lock);
6972         }
6973
6974         ioa_cfg->in_reset_reload = 0;
6975         ioa_cfg->reset_retries = 0;
6976         for (i = 0; i < ioa_cfg->hrrq_num; i++) {
6977                 spin_lock(&ioa_cfg->hrrq[i]._lock);
6978                 ioa_cfg->hrrq[i].ioa_is_dead = 1;
6979                 spin_unlock(&ioa_cfg->hrrq[i]._lock);
6980         }
6981         wmb();
6982
6983         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
6984         wake_up_all(&ioa_cfg->reset_wait_q);
6985         LEAVE;
6986
6987         return IPR_RC_JOB_RETURN;
6988 }
6989
6990 /**
6991  * ipr_ioa_reset_done - IOA reset completion.
6992  * @ipr_cmd:    ipr command struct
6993  *
6994  * This function processes the completion of an adapter reset.
6995  * It schedules any necessary mid-layer add/removes and
6996  * wakes any reset sleepers.
6997  *
6998  * Return value:
6999  *      IPR_RC_JOB_RETURN
7000  **/
7001 static int ipr_ioa_reset_done(struct ipr_cmnd *ipr_cmd)
7002 {
7003         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7004         struct ipr_resource_entry *res;
7005         struct ipr_hostrcb *hostrcb, *temp;
7006         int i = 0, j;
7007
7008         ENTER;
7009         ioa_cfg->in_reset_reload = 0;
7010         for (j = 0; j < ioa_cfg->hrrq_num; j++) {
7011                 spin_lock(&ioa_cfg->hrrq[j]._lock);
7012                 ioa_cfg->hrrq[j].allow_cmds = 1;
7013                 spin_unlock(&ioa_cfg->hrrq[j]._lock);
7014         }
7015         wmb();
7016         ioa_cfg->reset_cmd = NULL;
7017         ioa_cfg->doorbell |= IPR_RUNTIME_RESET;
7018
7019         list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
7020                 if (res->add_to_ml || res->del_from_ml) {
7021                         ipr_trace;
7022                         break;
7023                 }
7024         }
7025         schedule_work(&ioa_cfg->work_q);
7026
7027         list_for_each_entry_safe(hostrcb, temp, &ioa_cfg->hostrcb_free_q, queue) {
7028                 list_del(&hostrcb->queue);
7029                 if (i++ < IPR_NUM_LOG_HCAMS)
7030                         ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_LOG_DATA, hostrcb);
7031                 else
7032                         ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb);
7033         }
7034
7035         scsi_report_bus_reset(ioa_cfg->host, IPR_VSET_BUS);
7036         dev_info(&ioa_cfg->pdev->dev, "IOA initialized.\n");
7037
7038         ioa_cfg->reset_retries = 0;
7039         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
7040         wake_up_all(&ioa_cfg->reset_wait_q);
7041
7042         spin_unlock(ioa_cfg->host->host_lock);
7043         scsi_unblock_requests(ioa_cfg->host);
7044         spin_lock(ioa_cfg->host->host_lock);
7045
7046         if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds)
7047                 scsi_block_requests(ioa_cfg->host);
7048
7049         schedule_work(&ioa_cfg->work_q);
7050         LEAVE;
7051         return IPR_RC_JOB_RETURN;
7052 }
7053
7054 /**
7055  * ipr_set_sup_dev_dflt - Initialize a Set Supported Device buffer
7056  * @supported_dev:      supported device struct
7057  * @vpids:                      vendor product id struct
7058  *
7059  * Return value:
7060  *      none
7061  **/
7062 static void ipr_set_sup_dev_dflt(struct ipr_supported_device *supported_dev,
7063                                  struct ipr_std_inq_vpids *vpids)
7064 {
7065         memset(supported_dev, 0, sizeof(struct ipr_supported_device));
7066         memcpy(&supported_dev->vpids, vpids, sizeof(struct ipr_std_inq_vpids));
7067         supported_dev->num_records = 1;
7068         supported_dev->data_length =
7069                 cpu_to_be16(sizeof(struct ipr_supported_device));
7070         supported_dev->reserved = 0;
7071 }
7072
7073 /**
7074  * ipr_set_supported_devs - Send Set Supported Devices for a device
7075  * @ipr_cmd:    ipr command struct
7076  *
7077  * This function sends a Set Supported Devices to the adapter
7078  *
7079  * Return value:
7080  *      IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7081  **/
7082 static int ipr_set_supported_devs(struct ipr_cmnd *ipr_cmd)
7083 {
7084         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7085         struct ipr_supported_device *supp_dev = &ioa_cfg->vpd_cbs->supp_dev;
7086         struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
7087         struct ipr_resource_entry *res = ipr_cmd->u.res;
7088
7089         ipr_cmd->job_step = ipr_ioa_reset_done;
7090
7091         list_for_each_entry_continue(res, &ioa_cfg->used_res_q, queue) {
7092                 if (!ipr_is_scsi_disk(res))
7093                         continue;
7094
7095                 ipr_cmd->u.res = res;
7096                 ipr_set_sup_dev_dflt(supp_dev, &res->std_inq_data.vpids);
7097
7098                 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
7099                 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
7100                 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
7101
7102                 ioarcb->cmd_pkt.cdb[0] = IPR_SET_SUPPORTED_DEVICES;
7103                 ioarcb->cmd_pkt.cdb[1] = IPR_SET_ALL_SUPPORTED_DEVICES;
7104                 ioarcb->cmd_pkt.cdb[7] = (sizeof(struct ipr_supported_device) >> 8) & 0xff;
7105                 ioarcb->cmd_pkt.cdb[8] = sizeof(struct ipr_supported_device) & 0xff;
7106
7107                 ipr_init_ioadl(ipr_cmd,
7108                                ioa_cfg->vpd_cbs_dma +
7109                                  offsetof(struct ipr_misc_cbs, supp_dev),
7110                                sizeof(struct ipr_supported_device),
7111                                IPR_IOADL_FLAGS_WRITE_LAST);
7112
7113                 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
7114                            IPR_SET_SUP_DEVICE_TIMEOUT);
7115
7116                 if (!ioa_cfg->sis64)
7117                         ipr_cmd->job_step = ipr_set_supported_devs;
7118                 LEAVE;
7119                 return IPR_RC_JOB_RETURN;
7120         }
7121
7122         LEAVE;
7123         return IPR_RC_JOB_CONTINUE;
7124 }
7125
7126 /**
7127  * ipr_get_mode_page - Locate specified mode page
7128  * @mode_pages: mode page buffer
7129  * @page_code:  page code to find
7130  * @len:                minimum required length for mode page
7131  *
7132  * Return value:
7133  *      pointer to mode page / NULL on failure
7134  **/
7135 static void *ipr_get_mode_page(struct ipr_mode_pages *mode_pages,
7136                                u32 page_code, u32 len)
7137 {
7138         struct ipr_mode_page_hdr *mode_hdr;
7139         u32 page_length;
7140         u32 length;
7141
7142         if (!mode_pages || (mode_pages->hdr.length == 0))
7143                 return NULL;
7144
7145         length = (mode_pages->hdr.length + 1) - 4 - mode_pages->hdr.block_desc_len;
7146         mode_hdr = (struct ipr_mode_page_hdr *)
7147                 (mode_pages->data + mode_pages->hdr.block_desc_len);
7148
7149         while (length) {
7150                 if (IPR_GET_MODE_PAGE_CODE(mode_hdr) == page_code) {
7151                         if (mode_hdr->page_length >= (len - sizeof(struct ipr_mode_page_hdr)))
7152                                 return mode_hdr;
7153                         break;
7154                 } else {
7155                         page_length = (sizeof(struct ipr_mode_page_hdr) +
7156                                        mode_hdr->page_length);
7157                         length -= page_length;
7158                         mode_hdr = (struct ipr_mode_page_hdr *)
7159                                 ((unsigned long)mode_hdr + page_length);
7160                 }
7161         }
7162         return NULL;
7163 }
7164
7165 /**
7166  * ipr_check_term_power - Check for term power errors
7167  * @ioa_cfg:    ioa config struct
7168  * @mode_pages: IOAFP mode pages buffer
7169  *
7170  * Check the IOAFP's mode page 28 for term power errors
7171  *
7172  * Return value:
7173  *      nothing
7174  **/
7175 static void ipr_check_term_power(struct ipr_ioa_cfg *ioa_cfg,
7176                                  struct ipr_mode_pages *mode_pages)
7177 {
7178         int i;
7179         int entry_length;
7180         struct ipr_dev_bus_entry *bus;
7181         struct ipr_mode_page28 *mode_page;
7182
7183         mode_page = ipr_get_mode_page(mode_pages, 0x28,
7184                                       sizeof(struct ipr_mode_page28));
7185
7186         entry_length = mode_page->entry_length;
7187
7188         bus = mode_page->bus;
7189
7190         for (i = 0; i < mode_page->num_entries; i++) {
7191                 if (bus->flags & IPR_SCSI_ATTR_NO_TERM_PWR) {
7192                         dev_err(&ioa_cfg->pdev->dev,
7193                                 "Term power is absent on scsi bus %d\n",
7194                                 bus->res_addr.bus);
7195                 }
7196
7197                 bus = (struct ipr_dev_bus_entry *)((char *)bus + entry_length);
7198         }
7199 }
7200
7201 /**
7202  * ipr_scsi_bus_speed_limit - Limit the SCSI speed based on SES table
7203  * @ioa_cfg:    ioa config struct
7204  *
7205  * Looks through the config table checking for SES devices. If
7206  * the SES device is in the SES table indicating a maximum SCSI
7207  * bus speed, the speed is limited for the bus.
7208  *
7209  * Return value:
7210  *      none
7211  **/
7212 static void ipr_scsi_bus_speed_limit(struct ipr_ioa_cfg *ioa_cfg)
7213 {
7214         u32 max_xfer_rate;
7215         int i;
7216
7217         for (i = 0; i < IPR_MAX_NUM_BUSES; i++) {
7218                 max_xfer_rate = ipr_get_max_scsi_speed(ioa_cfg, i,
7219                                                        ioa_cfg->bus_attr[i].bus_width);
7220
7221                 if (max_xfer_rate < ioa_cfg->bus_attr[i].max_xfer_rate)
7222                         ioa_cfg->bus_attr[i].max_xfer_rate = max_xfer_rate;
7223         }
7224 }
7225
7226 /**
7227  * ipr_modify_ioafp_mode_page_28 - Modify IOAFP Mode Page 28
7228  * @ioa_cfg:    ioa config struct
7229  * @mode_pages: mode page 28 buffer
7230  *
7231  * Updates mode page 28 based on driver configuration
7232  *
7233  * Return value:
7234  *      none
7235  **/
7236 static void ipr_modify_ioafp_mode_page_28(struct ipr_ioa_cfg *ioa_cfg,
7237                                           struct ipr_mode_pages *mode_pages)
7238 {
7239         int i, entry_length;
7240         struct ipr_dev_bus_entry *bus;
7241         struct ipr_bus_attributes *bus_attr;
7242         struct ipr_mode_page28 *mode_page;
7243
7244         mode_page = ipr_get_mode_page(mode_pages, 0x28,
7245                                       sizeof(struct ipr_mode_page28));
7246
7247         entry_length = mode_page->entry_length;
7248
7249         /* Loop for each device bus entry */
7250         for (i = 0, bus = mode_page->bus;
7251              i < mode_page->num_entries;
7252              i++, bus = (struct ipr_dev_bus_entry *)((u8 *)bus + entry_length)) {
7253                 if (bus->res_addr.bus > IPR_MAX_NUM_BUSES) {
7254                         dev_err(&ioa_cfg->pdev->dev,
7255                                 "Invalid resource address reported: 0x%08X\n",
7256                                 IPR_GET_PHYS_LOC(bus->res_addr));
7257                         continue;
7258                 }
7259
7260                 bus_attr = &ioa_cfg->bus_attr[i];
7261                 bus->extended_reset_delay = IPR_EXTENDED_RESET_DELAY;
7262                 bus->bus_width = bus_attr->bus_width;
7263                 bus->max_xfer_rate = cpu_to_be32(bus_attr->max_xfer_rate);
7264                 bus->flags &= ~IPR_SCSI_ATTR_QAS_MASK;
7265                 if (bus_attr->qas_enabled)
7266                         bus->flags |= IPR_SCSI_ATTR_ENABLE_QAS;
7267                 else
7268                         bus->flags |= IPR_SCSI_ATTR_DISABLE_QAS;
7269         }
7270 }
7271
7272 /**
7273  * ipr_build_mode_select - Build a mode select command
7274  * @ipr_cmd:    ipr command struct
7275  * @res_handle: resource handle to send command to
7276  * @parm:               Byte 2 of Mode Sense command
7277  * @dma_addr:   DMA buffer address
7278  * @xfer_len:   data transfer length
7279  *
7280  * Return value:
7281  *      none
7282  **/
7283 static void ipr_build_mode_select(struct ipr_cmnd *ipr_cmd,
7284                                   __be32 res_handle, u8 parm,
7285                                   dma_addr_t dma_addr, u8 xfer_len)
7286 {
7287         struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
7288
7289         ioarcb->res_handle = res_handle;
7290         ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
7291         ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
7292         ioarcb->cmd_pkt.cdb[0] = MODE_SELECT;
7293         ioarcb->cmd_pkt.cdb[1] = parm;
7294         ioarcb->cmd_pkt.cdb[4] = xfer_len;
7295
7296         ipr_init_ioadl(ipr_cmd, dma_addr, xfer_len, IPR_IOADL_FLAGS_WRITE_LAST);
7297 }
7298
7299 /**
7300  * ipr_ioafp_mode_select_page28 - Issue Mode Select Page 28 to IOA
7301  * @ipr_cmd:    ipr command struct
7302  *
7303  * This function sets up the SCSI bus attributes and sends
7304  * a Mode Select for Page 28 to activate them.
7305  *
7306  * Return value:
7307  *      IPR_RC_JOB_RETURN
7308  **/
7309 static int ipr_ioafp_mode_select_page28(struct ipr_cmnd *ipr_cmd)
7310 {
7311         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7312         struct ipr_mode_pages *mode_pages = &ioa_cfg->vpd_cbs->mode_pages;
7313         int length;
7314
7315         ENTER;
7316         ipr_scsi_bus_speed_limit(ioa_cfg);
7317         ipr_check_term_power(ioa_cfg, mode_pages);
7318         ipr_modify_ioafp_mode_page_28(ioa_cfg, mode_pages);
7319         length = mode_pages->hdr.length + 1;
7320         mode_pages->hdr.length = 0;
7321
7322         ipr_build_mode_select(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE), 0x11,
7323                               ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, mode_pages),
7324                               length);
7325
7326         ipr_cmd->job_step = ipr_set_supported_devs;
7327         ipr_cmd->u.res = list_entry(ioa_cfg->used_res_q.next,
7328                                     struct ipr_resource_entry, queue);
7329         ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
7330
7331         LEAVE;
7332         return IPR_RC_JOB_RETURN;
7333 }
7334
7335 /**
7336  * ipr_build_mode_sense - Builds a mode sense command
7337  * @ipr_cmd:    ipr command struct
7338  * @res:                resource entry struct
7339  * @parm:               Byte 2 of mode sense command
7340  * @dma_addr:   DMA address of mode sense buffer
7341  * @xfer_len:   Size of DMA buffer
7342  *
7343  * Return value:
7344  *      none
7345  **/
7346 static void ipr_build_mode_sense(struct ipr_cmnd *ipr_cmd,
7347                                  __be32 res_handle,
7348                                  u8 parm, dma_addr_t dma_addr, u8 xfer_len)
7349 {
7350         struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
7351
7352         ioarcb->res_handle = res_handle;
7353         ioarcb->cmd_pkt.cdb[0] = MODE_SENSE;
7354         ioarcb->cmd_pkt.cdb[2] = parm;
7355         ioarcb->cmd_pkt.cdb[4] = xfer_len;
7356         ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
7357
7358         ipr_init_ioadl(ipr_cmd, dma_addr, xfer_len, IPR_IOADL_FLAGS_READ_LAST);
7359 }
7360
7361 /**
7362  * ipr_reset_cmd_failed - Handle failure of IOA reset command
7363  * @ipr_cmd:    ipr command struct
7364  *
7365  * This function handles the failure of an IOA bringup command.
7366  *
7367  * Return value:
7368  *      IPR_RC_JOB_RETURN
7369  **/
7370 static int ipr_reset_cmd_failed(struct ipr_cmnd *ipr_cmd)
7371 {
7372         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7373         u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
7374
7375         dev_err(&ioa_cfg->pdev->dev,
7376                 "0x%02X failed with IOASC: 0x%08X\n",
7377                 ipr_cmd->ioarcb.cmd_pkt.cdb[0], ioasc);
7378
7379         ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
7380         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
7381         return IPR_RC_JOB_RETURN;
7382 }
7383
7384 /**
7385  * ipr_reset_mode_sense_failed - Handle failure of IOAFP mode sense
7386  * @ipr_cmd:    ipr command struct
7387  *
7388  * This function handles the failure of a Mode Sense to the IOAFP.
7389  * Some adapters do not handle all mode pages.
7390  *
7391  * Return value:
7392  *      IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7393  **/
7394 static int ipr_reset_mode_sense_failed(struct ipr_cmnd *ipr_cmd)
7395 {
7396         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7397         u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
7398
7399         if (ioasc == IPR_IOASC_IR_INVALID_REQ_TYPE_OR_PKT) {
7400                 ipr_cmd->job_step = ipr_set_supported_devs;
7401                 ipr_cmd->u.res = list_entry(ioa_cfg->used_res_q.next,
7402                                             struct ipr_resource_entry, queue);
7403                 return IPR_RC_JOB_CONTINUE;
7404         }
7405
7406         return ipr_reset_cmd_failed(ipr_cmd);
7407 }
7408
7409 /**
7410  * ipr_ioafp_mode_sense_page28 - Issue Mode Sense Page 28 to IOA
7411  * @ipr_cmd:    ipr command struct
7412  *
7413  * This function send a Page 28 mode sense to the IOA to
7414  * retrieve SCSI bus attributes.
7415  *
7416  * Return value:
7417  *      IPR_RC_JOB_RETURN
7418  **/
7419 static int ipr_ioafp_mode_sense_page28(struct ipr_cmnd *ipr_cmd)
7420 {
7421         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7422
7423         ENTER;
7424         ipr_build_mode_sense(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE),
7425                              0x28, ioa_cfg->vpd_cbs_dma +
7426                              offsetof(struct ipr_misc_cbs, mode_pages),
7427                              sizeof(struct ipr_mode_pages));
7428
7429         ipr_cmd->job_step = ipr_ioafp_mode_select_page28;
7430         ipr_cmd->job_step_failed = ipr_reset_mode_sense_failed;
7431
7432         ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
7433
7434         LEAVE;
7435         return IPR_RC_JOB_RETURN;
7436 }
7437
7438 /**
7439  * ipr_ioafp_mode_select_page24 - Issue Mode Select to IOA
7440  * @ipr_cmd:    ipr command struct
7441  *
7442  * This function enables dual IOA RAID support if possible.
7443  *
7444  * Return value:
7445  *      IPR_RC_JOB_RETURN
7446  **/
7447 static int ipr_ioafp_mode_select_page24(struct ipr_cmnd *ipr_cmd)
7448 {
7449         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7450         struct ipr_mode_pages *mode_pages = &ioa_cfg->vpd_cbs->mode_pages;
7451         struct ipr_mode_page24 *mode_page;
7452         int length;
7453
7454         ENTER;
7455         mode_page = ipr_get_mode_page(mode_pages, 0x24,
7456                                       sizeof(struct ipr_mode_page24));
7457
7458         if (mode_page)
7459                 mode_page->flags |= IPR_ENABLE_DUAL_IOA_AF;
7460
7461         length = mode_pages->hdr.length + 1;
7462         mode_pages->hdr.length = 0;
7463
7464         ipr_build_mode_select(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE), 0x11,
7465                               ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, mode_pages),
7466                               length);
7467
7468         ipr_cmd->job_step = ipr_ioafp_mode_sense_page28;
7469         ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
7470
7471         LEAVE;
7472         return IPR_RC_JOB_RETURN;
7473 }
7474
7475 /**
7476  * ipr_reset_mode_sense_page24_failed - Handle failure of IOAFP mode sense
7477  * @ipr_cmd:    ipr command struct
7478  *
7479  * This function handles the failure of a Mode Sense to the IOAFP.
7480  * Some adapters do not handle all mode pages.
7481  *
7482  * Return value:
7483  *      IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7484  **/
7485 static int ipr_reset_mode_sense_page24_failed(struct ipr_cmnd *ipr_cmd)
7486 {
7487         u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
7488
7489         if (ioasc == IPR_IOASC_IR_INVALID_REQ_TYPE_OR_PKT) {
7490                 ipr_cmd->job_step = ipr_ioafp_mode_sense_page28;
7491                 return IPR_RC_JOB_CONTINUE;
7492         }
7493
7494         return ipr_reset_cmd_failed(ipr_cmd);
7495 }
7496
7497 /**
7498  * ipr_ioafp_mode_sense_page24 - Issue Page 24 Mode Sense to IOA
7499  * @ipr_cmd:    ipr command struct
7500  *
7501  * This function send a mode sense to the IOA to retrieve
7502  * the IOA Advanced Function Control mode page.
7503  *
7504  * Return value:
7505  *      IPR_RC_JOB_RETURN
7506  **/
7507 static int ipr_ioafp_mode_sense_page24(struct ipr_cmnd *ipr_cmd)
7508 {
7509         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7510
7511         ENTER;
7512         ipr_build_mode_sense(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE),
7513                              0x24, ioa_cfg->vpd_cbs_dma +
7514                              offsetof(struct ipr_misc_cbs, mode_pages),
7515                              sizeof(struct ipr_mode_pages));
7516
7517         ipr_cmd->job_step = ipr_ioafp_mode_select_page24;
7518         ipr_cmd->job_step_failed = ipr_reset_mode_sense_page24_failed;
7519
7520         ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
7521
7522         LEAVE;
7523         return IPR_RC_JOB_RETURN;
7524 }
7525
7526 /**
7527  * ipr_init_res_table - Initialize the resource table
7528  * @ipr_cmd:    ipr command struct
7529  *
7530  * This function looks through the existing resource table, comparing
7531  * it with the config table. This function will take care of old/new
7532  * devices and schedule adding/removing them from the mid-layer
7533  * as appropriate.
7534  *
7535  * Return value:
7536  *      IPR_RC_JOB_CONTINUE
7537  **/
7538 static int ipr_init_res_table(struct ipr_cmnd *ipr_cmd)
7539 {
7540         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7541         struct ipr_resource_entry *res, *temp;
7542         struct ipr_config_table_entry_wrapper cfgtew;
7543         int entries, found, flag, i;
7544         LIST_HEAD(old_res);
7545
7546         ENTER;
7547         if (ioa_cfg->sis64)
7548                 flag = ioa_cfg->u.cfg_table64->hdr64.flags;
7549         else
7550                 flag = ioa_cfg->u.cfg_table->hdr.flags;
7551
7552         if (flag & IPR_UCODE_DOWNLOAD_REQ)
7553                 dev_err(&ioa_cfg->pdev->dev, "Microcode download required\n");
7554
7555         list_for_each_entry_safe(res, temp, &ioa_cfg->used_res_q, queue)
7556                 list_move_tail(&res->queue, &old_res);
7557
7558         if (ioa_cfg->sis64)
7559                 entries = be16_to_cpu(ioa_cfg->u.cfg_table64->hdr64.num_entries);
7560         else
7561                 entries = ioa_cfg->u.cfg_table->hdr.num_entries;
7562
7563         for (i = 0; i < entries; i++) {
7564                 if (ioa_cfg->sis64)
7565                         cfgtew.u.cfgte64 = &ioa_cfg->u.cfg_table64->dev[i];
7566                 else
7567                         cfgtew.u.cfgte = &ioa_cfg->u.cfg_table->dev[i];
7568                 found = 0;
7569
7570                 list_for_each_entry_safe(res, temp, &old_res, queue) {
7571                         if (ipr_is_same_device(res, &cfgtew)) {
7572                                 list_move_tail(&res->queue, &ioa_cfg->used_res_q);
7573                                 found = 1;
7574                                 break;
7575                         }
7576                 }
7577
7578                 if (!found) {
7579                         if (list_empty(&ioa_cfg->free_res_q)) {
7580                                 dev_err(&ioa_cfg->pdev->dev, "Too many devices attached\n");
7581                                 break;
7582                         }
7583
7584                         found = 1;
7585                         res = list_entry(ioa_cfg->free_res_q.next,
7586                                          struct ipr_resource_entry, queue);
7587                         list_move_tail(&res->queue, &ioa_cfg->used_res_q);
7588                         ipr_init_res_entry(res, &cfgtew);
7589                         res->add_to_ml = 1;
7590                 } else if (res->sdev && (ipr_is_vset_device(res) || ipr_is_scsi_disk(res)))
7591                         res->sdev->allow_restart = 1;
7592
7593                 if (found)
7594                         ipr_update_res_entry(res, &cfgtew);
7595         }
7596
7597         list_for_each_entry_safe(res, temp, &old_res, queue) {
7598                 if (res->sdev) {
7599                         res->del_from_ml = 1;
7600                         res->res_handle = IPR_INVALID_RES_HANDLE;
7601                         list_move_tail(&res->queue, &ioa_cfg->used_res_q);
7602                 }
7603         }
7604
7605         list_for_each_entry_safe(res, temp, &old_res, queue) {
7606                 ipr_clear_res_target(res);
7607                 list_move_tail(&res->queue, &ioa_cfg->free_res_q);
7608         }
7609
7610         if (ioa_cfg->dual_raid && ipr_dual_ioa_raid)
7611                 ipr_cmd->job_step = ipr_ioafp_mode_sense_page24;
7612         else
7613                 ipr_cmd->job_step = ipr_ioafp_mode_sense_page28;
7614
7615         LEAVE;
7616         return IPR_RC_JOB_CONTINUE;
7617 }
7618
7619 /**
7620  * ipr_ioafp_query_ioa_cfg - Send a Query IOA Config to the adapter.
7621  * @ipr_cmd:    ipr command struct
7622  *
7623  * This function sends a Query IOA Configuration command
7624  * to the adapter to retrieve the IOA configuration table.
7625  *
7626  * Return value:
7627  *      IPR_RC_JOB_RETURN
7628  **/
7629 static int ipr_ioafp_query_ioa_cfg(struct ipr_cmnd *ipr_cmd)
7630 {
7631         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7632         struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
7633         struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
7634         struct ipr_inquiry_cap *cap = &ioa_cfg->vpd_cbs->cap;
7635
7636         ENTER;
7637         if (cap->cap & IPR_CAP_DUAL_IOA_RAID)
7638                 ioa_cfg->dual_raid = 1;
7639         dev_info(&ioa_cfg->pdev->dev, "Adapter firmware version: %02X%02X%02X%02X\n",
7640                  ucode_vpd->major_release, ucode_vpd->card_type,
7641                  ucode_vpd->minor_release[0], ucode_vpd->minor_release[1]);
7642         ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
7643         ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
7644
7645         ioarcb->cmd_pkt.cdb[0] = IPR_QUERY_IOA_CONFIG;
7646         ioarcb->cmd_pkt.cdb[6] = (ioa_cfg->cfg_table_size >> 16) & 0xff;
7647         ioarcb->cmd_pkt.cdb[7] = (ioa_cfg->cfg_table_size >> 8) & 0xff;
7648         ioarcb->cmd_pkt.cdb[8] = ioa_cfg->cfg_table_size & 0xff;
7649
7650         ipr_init_ioadl(ipr_cmd, ioa_cfg->cfg_table_dma, ioa_cfg->cfg_table_size,
7651                        IPR_IOADL_FLAGS_READ_LAST);
7652
7653         ipr_cmd->job_step = ipr_init_res_table;
7654
7655         ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
7656
7657         LEAVE;
7658         return IPR_RC_JOB_RETURN;
7659 }
7660
7661 /**
7662  * ipr_ioafp_inquiry - Send an Inquiry to the adapter.
7663  * @ipr_cmd:    ipr command struct
7664  *
7665  * This utility function sends an inquiry to the adapter.
7666  *
7667  * Return value:
7668  *      none
7669  **/
7670 static void ipr_ioafp_inquiry(struct ipr_cmnd *ipr_cmd, u8 flags, u8 page,
7671                               dma_addr_t dma_addr, u8 xfer_len)
7672 {
7673         struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
7674
7675         ENTER;
7676         ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
7677         ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
7678
7679         ioarcb->cmd_pkt.cdb[0] = INQUIRY;
7680         ioarcb->cmd_pkt.cdb[1] = flags;
7681         ioarcb->cmd_pkt.cdb[2] = page;
7682         ioarcb->cmd_pkt.cdb[4] = xfer_len;
7683
7684         ipr_init_ioadl(ipr_cmd, dma_addr, xfer_len, IPR_IOADL_FLAGS_READ_LAST);
7685
7686         ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
7687         LEAVE;
7688 }
7689
7690 /**
7691  * ipr_inquiry_page_supported - Is the given inquiry page supported
7692  * @page0:              inquiry page 0 buffer
7693  * @page:               page code.
7694  *
7695  * This function determines if the specified inquiry page is supported.
7696  *
7697  * Return value:
7698  *      1 if page is supported / 0 if not
7699  **/
7700 static int ipr_inquiry_page_supported(struct ipr_inquiry_page0 *page0, u8 page)
7701 {
7702         int i;
7703
7704         for (i = 0; i < min_t(u8, page0->len, IPR_INQUIRY_PAGE0_ENTRIES); i++)
7705                 if (page0->page[i] == page)
7706                         return 1;
7707
7708         return 0;
7709 }
7710
7711 /**
7712  * ipr_ioafp_cap_inquiry - Send a Page 0xD0 Inquiry to the adapter.
7713  * @ipr_cmd:    ipr command struct
7714  *
7715  * This function sends a Page 0xD0 inquiry to the adapter
7716  * to retrieve adapter capabilities.
7717  *
7718  * Return value:
7719  *      IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7720  **/
7721 static int ipr_ioafp_cap_inquiry(struct ipr_cmnd *ipr_cmd)
7722 {
7723         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7724         struct ipr_inquiry_page0 *page0 = &ioa_cfg->vpd_cbs->page0_data;
7725         struct ipr_inquiry_cap *cap = &ioa_cfg->vpd_cbs->cap;
7726
7727         ENTER;
7728         ipr_cmd->job_step = ipr_ioafp_query_ioa_cfg;
7729         memset(cap, 0, sizeof(*cap));
7730
7731         if (ipr_inquiry_page_supported(page0, 0xD0)) {
7732                 ipr_ioafp_inquiry(ipr_cmd, 1, 0xD0,
7733                                   ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, cap),
7734                                   sizeof(struct ipr_inquiry_cap));
7735                 return IPR_RC_JOB_RETURN;
7736         }
7737
7738         LEAVE;
7739         return IPR_RC_JOB_CONTINUE;
7740 }
7741
7742 /**
7743  * ipr_ioafp_page3_inquiry - Send a Page 3 Inquiry to the adapter.
7744  * @ipr_cmd:    ipr command struct
7745  *
7746  * This function sends a Page 3 inquiry to the adapter
7747  * to retrieve software VPD information.
7748  *
7749  * Return value:
7750  *      IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7751  **/
7752 static int ipr_ioafp_page3_inquiry(struct ipr_cmnd *ipr_cmd)
7753 {
7754         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7755
7756         ENTER;
7757
7758         ipr_cmd->job_step = ipr_ioafp_cap_inquiry;
7759
7760         ipr_ioafp_inquiry(ipr_cmd, 1, 3,
7761                           ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, page3_data),
7762                           sizeof(struct ipr_inquiry_page3));
7763
7764         LEAVE;
7765         return IPR_RC_JOB_RETURN;
7766 }
7767
7768 /**
7769  * ipr_ioafp_page0_inquiry - Send a Page 0 Inquiry to the adapter.
7770  * @ipr_cmd:    ipr command struct
7771  *
7772  * This function sends a Page 0 inquiry to the adapter
7773  * to retrieve supported inquiry pages.
7774  *
7775  * Return value:
7776  *      IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7777  **/
7778 static int ipr_ioafp_page0_inquiry(struct ipr_cmnd *ipr_cmd)
7779 {
7780         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7781         char type[5];
7782
7783         ENTER;
7784
7785         /* Grab the type out of the VPD and store it away */
7786         memcpy(type, ioa_cfg->vpd_cbs->ioa_vpd.std_inq_data.vpids.product_id, 4);
7787         type[4] = '\0';
7788         ioa_cfg->type = simple_strtoul((char *)type, NULL, 16);
7789
7790         if (ipr_invalid_adapter(ioa_cfg)) {
7791                 dev_err(&ioa_cfg->pdev->dev,
7792                         "Adapter not supported in this hardware configuration.\n");
7793
7794                 if (!ipr_testmode) {
7795                         ioa_cfg->reset_retries += IPR_NUM_RESET_RELOAD_RETRIES;
7796                         ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
7797                         list_add_tail(&ipr_cmd->queue,
7798                                         &ioa_cfg->hrrq->hrrq_free_q);
7799                         return IPR_RC_JOB_RETURN;
7800                 }
7801         }
7802
7803         ipr_cmd->job_step = ipr_ioafp_page3_inquiry;
7804
7805         ipr_ioafp_inquiry(ipr_cmd, 1, 0,
7806                           ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, page0_data),
7807                           sizeof(struct ipr_inquiry_page0));
7808
7809         LEAVE;
7810         return IPR_RC_JOB_RETURN;
7811 }
7812
7813 /**
7814  * ipr_ioafp_std_inquiry - Send a Standard Inquiry to the adapter.
7815  * @ipr_cmd:    ipr command struct
7816  *
7817  * This function sends a standard inquiry to the adapter.
7818  *
7819  * Return value:
7820  *      IPR_RC_JOB_RETURN
7821  **/
7822 static int ipr_ioafp_std_inquiry(struct ipr_cmnd *ipr_cmd)
7823 {
7824         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7825
7826         ENTER;
7827         ipr_cmd->job_step = ipr_ioafp_page0_inquiry;
7828
7829         ipr_ioafp_inquiry(ipr_cmd, 0, 0,
7830                           ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, ioa_vpd),
7831                           sizeof(struct ipr_ioa_vpd));
7832
7833         LEAVE;
7834         return IPR_RC_JOB_RETURN;
7835 }
7836
7837 /**
7838  * ipr_ioafp_identify_hrrq - Send Identify Host RRQ.
7839  * @ipr_cmd:    ipr command struct
7840  *
7841  * This function send an Identify Host Request Response Queue
7842  * command to establish the HRRQ with the adapter.
7843  *
7844  * Return value:
7845  *      IPR_RC_JOB_RETURN
7846  **/
7847 static int ipr_ioafp_identify_hrrq(struct ipr_cmnd *ipr_cmd)
7848 {
7849         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7850         struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
7851         struct ipr_hrr_queue *hrrq;
7852
7853         ENTER;
7854         ipr_cmd->job_step = ipr_ioafp_std_inquiry;
7855         dev_info(&ioa_cfg->pdev->dev, "Starting IOA initialization sequence.\n");
7856
7857         if (ioa_cfg->identify_hrrq_index < ioa_cfg->hrrq_num) {
7858                 hrrq = &ioa_cfg->hrrq[ioa_cfg->identify_hrrq_index];
7859
7860                 ioarcb->cmd_pkt.cdb[0] = IPR_ID_HOST_RR_Q;
7861                 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
7862
7863                 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
7864                 if (ioa_cfg->sis64)
7865                         ioarcb->cmd_pkt.cdb[1] = 0x1;
7866
7867                 if (ioa_cfg->nvectors == 1)
7868                         ioarcb->cmd_pkt.cdb[1] &= ~IPR_ID_HRRQ_SELE_ENABLE;
7869                 else
7870                         ioarcb->cmd_pkt.cdb[1] |= IPR_ID_HRRQ_SELE_ENABLE;
7871
7872                 ioarcb->cmd_pkt.cdb[2] =
7873                         ((u64) hrrq->host_rrq_dma >> 24) & 0xff;
7874                 ioarcb->cmd_pkt.cdb[3] =
7875                         ((u64) hrrq->host_rrq_dma >> 16) & 0xff;
7876                 ioarcb->cmd_pkt.cdb[4] =
7877                         ((u64) hrrq->host_rrq_dma >> 8) & 0xff;
7878                 ioarcb->cmd_pkt.cdb[5] =
7879                         ((u64) hrrq->host_rrq_dma) & 0xff;
7880                 ioarcb->cmd_pkt.cdb[7] =
7881                         ((sizeof(u32) * hrrq->size) >> 8) & 0xff;
7882                 ioarcb->cmd_pkt.cdb[8] =
7883                         (sizeof(u32) * hrrq->size) & 0xff;
7884
7885                 if (ioarcb->cmd_pkt.cdb[1] & IPR_ID_HRRQ_SELE_ENABLE)
7886                         ioarcb->cmd_pkt.cdb[9] =
7887                                         ioa_cfg->identify_hrrq_index;
7888
7889                 if (ioa_cfg->sis64) {
7890                         ioarcb->cmd_pkt.cdb[10] =
7891                                 ((u64) hrrq->host_rrq_dma >> 56) & 0xff;
7892                         ioarcb->cmd_pkt.cdb[11] =
7893                                 ((u64) hrrq->host_rrq_dma >> 48) & 0xff;
7894                         ioarcb->cmd_pkt.cdb[12] =
7895                                 ((u64) hrrq->host_rrq_dma >> 40) & 0xff;
7896                         ioarcb->cmd_pkt.cdb[13] =
7897                                 ((u64) hrrq->host_rrq_dma >> 32) & 0xff;
7898                 }
7899
7900                 if (ioarcb->cmd_pkt.cdb[1] & IPR_ID_HRRQ_SELE_ENABLE)
7901                         ioarcb->cmd_pkt.cdb[14] =
7902                                         ioa_cfg->identify_hrrq_index;
7903
7904                 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
7905                            IPR_INTERNAL_TIMEOUT);
7906
7907                 if (++ioa_cfg->identify_hrrq_index < ioa_cfg->hrrq_num)
7908                         ipr_cmd->job_step = ipr_ioafp_identify_hrrq;
7909
7910                 LEAVE;
7911                 return IPR_RC_JOB_RETURN;
7912         }
7913
7914         LEAVE;
7915         return IPR_RC_JOB_CONTINUE;
7916 }
7917
7918 /**
7919  * ipr_reset_timer_done - Adapter reset timer function
7920  * @ipr_cmd:    ipr command struct
7921  *
7922  * Description: This function is used in adapter reset processing
7923  * for timing events. If the reset_cmd pointer in the IOA
7924  * config struct is not this adapter's we are doing nested
7925  * resets and fail_all_ops will take care of freeing the
7926  * command block.
7927  *
7928  * Return value:
7929  *      none
7930  **/
7931 static void ipr_reset_timer_done(struct ipr_cmnd *ipr_cmd)
7932 {
7933         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7934         unsigned long lock_flags = 0;
7935
7936         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
7937
7938         if (ioa_cfg->reset_cmd == ipr_cmd) {
7939                 list_del(&ipr_cmd->queue);
7940                 ipr_cmd->done(ipr_cmd);
7941         }
7942
7943         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
7944 }
7945
7946 /**
7947  * ipr_reset_start_timer - Start a timer for adapter reset job
7948  * @ipr_cmd:    ipr command struct
7949  * @timeout:    timeout value
7950  *
7951  * Description: This function is used in adapter reset processing
7952  * for timing events. If the reset_cmd pointer in the IOA
7953  * config struct is not this adapter's we are doing nested
7954  * resets and fail_all_ops will take care of freeing the
7955  * command block.
7956  *
7957  * Return value:
7958  *      none
7959  **/
7960 static void ipr_reset_start_timer(struct ipr_cmnd *ipr_cmd,
7961                                   unsigned long timeout)
7962 {
7963
7964         ENTER;
7965         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
7966         ipr_cmd->done = ipr_reset_ioa_job;
7967
7968         ipr_cmd->timer.data = (unsigned long) ipr_cmd;
7969         ipr_cmd->timer.expires = jiffies + timeout;
7970         ipr_cmd->timer.function = (void (*)(unsigned long))ipr_reset_timer_done;
7971         add_timer(&ipr_cmd->timer);
7972 }
7973
7974 /**
7975  * ipr_init_ioa_mem - Initialize ioa_cfg control block
7976  * @ioa_cfg:    ioa cfg struct
7977  *
7978  * Return value:
7979  *      nothing
7980  **/
7981 static void ipr_init_ioa_mem(struct ipr_ioa_cfg *ioa_cfg)
7982 {
7983         struct ipr_hrr_queue *hrrq;
7984
7985         for_each_hrrq(hrrq, ioa_cfg) {
7986                 spin_lock(&hrrq->_lock);
7987                 memset(hrrq->host_rrq, 0, sizeof(u32) * hrrq->size);
7988
7989                 /* Initialize Host RRQ pointers */
7990                 hrrq->hrrq_start = hrrq->host_rrq;
7991                 hrrq->hrrq_end = &hrrq->host_rrq[hrrq->size - 1];
7992                 hrrq->hrrq_curr = hrrq->hrrq_start;
7993                 hrrq->toggle_bit = 1;
7994                 spin_unlock(&hrrq->_lock);
7995         }
7996         wmb();
7997
7998         ioa_cfg->identify_hrrq_index = 0;
7999         if (ioa_cfg->hrrq_num == 1)
8000                 atomic_set(&ioa_cfg->hrrq_index, 0);
8001         else
8002                 atomic_set(&ioa_cfg->hrrq_index, 1);
8003
8004         /* Zero out config table */
8005         memset(ioa_cfg->u.cfg_table, 0, ioa_cfg->cfg_table_size);
8006 }
8007
8008 /**
8009  * ipr_reset_next_stage - Process IPL stage change based on feedback register.
8010  * @ipr_cmd:    ipr command struct
8011  *
8012  * Return value:
8013  *      IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8014  **/
8015 static int ipr_reset_next_stage(struct ipr_cmnd *ipr_cmd)
8016 {
8017         unsigned long stage, stage_time;
8018         u32 feedback;
8019         volatile u32 int_reg;
8020         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8021         u64 maskval = 0;
8022
8023         feedback = readl(ioa_cfg->regs.init_feedback_reg);
8024         stage = feedback & IPR_IPL_INIT_STAGE_MASK;
8025         stage_time = feedback & IPR_IPL_INIT_STAGE_TIME_MASK;
8026
8027         ipr_dbg("IPL stage = 0x%lx, IPL stage time = %ld\n", stage, stage_time);
8028
8029         /* sanity check the stage_time value */
8030         if (stage_time == 0)
8031                 stage_time = IPR_IPL_INIT_DEFAULT_STAGE_TIME;
8032         else if (stage_time < IPR_IPL_INIT_MIN_STAGE_TIME)
8033                 stage_time = IPR_IPL_INIT_MIN_STAGE_TIME;
8034         else if (stage_time > IPR_LONG_OPERATIONAL_TIMEOUT)
8035                 stage_time = IPR_LONG_OPERATIONAL_TIMEOUT;
8036
8037         if (stage == IPR_IPL_INIT_STAGE_UNKNOWN) {
8038                 writel(IPR_PCII_IPL_STAGE_CHANGE, ioa_cfg->regs.set_interrupt_mask_reg);
8039                 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
8040                 stage_time = ioa_cfg->transop_timeout;
8041                 ipr_cmd->job_step = ipr_ioafp_identify_hrrq;
8042         } else if (stage == IPR_IPL_INIT_STAGE_TRANSOP) {
8043                 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
8044                 if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) {
8045                         ipr_cmd->job_step = ipr_ioafp_identify_hrrq;
8046                         maskval = IPR_PCII_IPL_STAGE_CHANGE;
8047                         maskval = (maskval << 32) | IPR_PCII_IOA_TRANS_TO_OPER;
8048                         writeq(maskval, ioa_cfg->regs.set_interrupt_mask_reg);
8049                         int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
8050                         return IPR_RC_JOB_CONTINUE;
8051                 }
8052         }
8053
8054         ipr_cmd->timer.data = (unsigned long) ipr_cmd;
8055         ipr_cmd->timer.expires = jiffies + stage_time * HZ;
8056         ipr_cmd->timer.function = (void (*)(unsigned long))ipr_oper_timeout;
8057         ipr_cmd->done = ipr_reset_ioa_job;
8058         add_timer(&ipr_cmd->timer);
8059
8060         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
8061
8062         return IPR_RC_JOB_RETURN;
8063 }
8064
8065 /**
8066  * ipr_reset_enable_ioa - Enable the IOA following a reset.
8067  * @ipr_cmd:    ipr command struct
8068  *
8069  * This function reinitializes some control blocks and
8070  * enables destructive diagnostics on the adapter.
8071  *
8072  * Return value:
8073  *      IPR_RC_JOB_RETURN
8074  **/
8075 static int ipr_reset_enable_ioa(struct ipr_cmnd *ipr_cmd)
8076 {
8077         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8078         volatile u32 int_reg;
8079         volatile u64 maskval;
8080         int i;
8081
8082         ENTER;
8083         ipr_cmd->job_step = ipr_ioafp_identify_hrrq;
8084         ipr_init_ioa_mem(ioa_cfg);
8085
8086         for (i = 0; i < ioa_cfg->hrrq_num; i++) {
8087                 spin_lock(&ioa_cfg->hrrq[i]._lock);
8088                 ioa_cfg->hrrq[i].allow_interrupts = 1;
8089                 spin_unlock(&ioa_cfg->hrrq[i]._lock);
8090         }
8091         wmb();
8092         if (ioa_cfg->sis64) {
8093                 /* Set the adapter to the correct endian mode. */
8094                 writel(IPR_ENDIAN_SWAP_KEY, ioa_cfg->regs.endian_swap_reg);
8095                 int_reg = readl(ioa_cfg->regs.endian_swap_reg);
8096         }
8097
8098         int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
8099
8100         if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) {
8101                 writel((IPR_PCII_ERROR_INTERRUPTS | IPR_PCII_HRRQ_UPDATED),
8102                        ioa_cfg->regs.clr_interrupt_mask_reg32);
8103                 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
8104                 return IPR_RC_JOB_CONTINUE;
8105         }
8106
8107         /* Enable destructive diagnostics on IOA */
8108         writel(ioa_cfg->doorbell, ioa_cfg->regs.set_uproc_interrupt_reg32);
8109
8110         if (ioa_cfg->sis64) {
8111                 maskval = IPR_PCII_IPL_STAGE_CHANGE;
8112                 maskval = (maskval << 32) | IPR_PCII_OPER_INTERRUPTS;
8113                 writeq(maskval, ioa_cfg->regs.clr_interrupt_mask_reg);
8114         } else
8115                 writel(IPR_PCII_OPER_INTERRUPTS, ioa_cfg->regs.clr_interrupt_mask_reg32);
8116
8117         int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
8118
8119         dev_info(&ioa_cfg->pdev->dev, "Initializing IOA.\n");
8120
8121         if (ioa_cfg->sis64) {
8122                 ipr_cmd->job_step = ipr_reset_next_stage;
8123                 return IPR_RC_JOB_CONTINUE;
8124         }
8125
8126         ipr_cmd->timer.data = (unsigned long) ipr_cmd;
8127         ipr_cmd->timer.expires = jiffies + (ioa_cfg->transop_timeout * HZ);
8128         ipr_cmd->timer.function = (void (*)(unsigned long))ipr_oper_timeout;
8129         ipr_cmd->done = ipr_reset_ioa_job;
8130         add_timer(&ipr_cmd->timer);
8131         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
8132
8133         LEAVE;
8134         return IPR_RC_JOB_RETURN;
8135 }
8136
8137 /**
8138  * ipr_reset_wait_for_dump - Wait for a dump to timeout.
8139  * @ipr_cmd:    ipr command struct
8140  *
8141  * This function is invoked when an adapter dump has run out
8142  * of processing time.
8143  *
8144  * Return value:
8145  *      IPR_RC_JOB_CONTINUE
8146  **/
8147 static int ipr_reset_wait_for_dump(struct ipr_cmnd *ipr_cmd)
8148 {
8149         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8150
8151         if (ioa_cfg->sdt_state == GET_DUMP)
8152                 ioa_cfg->sdt_state = WAIT_FOR_DUMP;
8153         else if (ioa_cfg->sdt_state == READ_DUMP)
8154                 ioa_cfg->sdt_state = ABORT_DUMP;
8155
8156         ioa_cfg->dump_timeout = 1;
8157         ipr_cmd->job_step = ipr_reset_alert;
8158
8159         return IPR_RC_JOB_CONTINUE;
8160 }
8161
8162 /**
8163  * ipr_unit_check_no_data - Log a unit check/no data error log
8164  * @ioa_cfg:            ioa config struct
8165  *
8166  * Logs an error indicating the adapter unit checked, but for some
8167  * reason, we were unable to fetch the unit check buffer.
8168  *
8169  * Return value:
8170  *      nothing
8171  **/
8172 static void ipr_unit_check_no_data(struct ipr_ioa_cfg *ioa_cfg)
8173 {
8174         ioa_cfg->errors_logged++;
8175         dev_err(&ioa_cfg->pdev->dev, "IOA unit check with no data\n");
8176 }
8177
8178 /**
8179  * ipr_get_unit_check_buffer - Get the unit check buffer from the IOA
8180  * @ioa_cfg:            ioa config struct
8181  *
8182  * Fetches the unit check buffer from the adapter by clocking the data
8183  * through the mailbox register.
8184  *
8185  * Return value:
8186  *      nothing
8187  **/
8188 static void ipr_get_unit_check_buffer(struct ipr_ioa_cfg *ioa_cfg)
8189 {
8190         unsigned long mailbox;
8191         struct ipr_hostrcb *hostrcb;
8192         struct ipr_uc_sdt sdt;
8193         int rc, length;
8194         u32 ioasc;
8195
8196         mailbox = readl(ioa_cfg->ioa_mailbox);
8197
8198         if (!ioa_cfg->sis64 && !ipr_sdt_is_fmt2(mailbox)) {
8199                 ipr_unit_check_no_data(ioa_cfg);
8200                 return;
8201         }
8202
8203         memset(&sdt, 0, sizeof(struct ipr_uc_sdt));
8204         rc = ipr_get_ldump_data_section(ioa_cfg, mailbox, (__be32 *) &sdt,
8205                                         (sizeof(struct ipr_uc_sdt)) / sizeof(__be32));
8206
8207         if (rc || !(sdt.entry[0].flags & IPR_SDT_VALID_ENTRY) ||
8208             ((be32_to_cpu(sdt.hdr.state) != IPR_FMT3_SDT_READY_TO_USE) &&
8209             (be32_to_cpu(sdt.hdr.state) != IPR_FMT2_SDT_READY_TO_USE))) {
8210                 ipr_unit_check_no_data(ioa_cfg);
8211                 return;
8212         }
8213
8214         /* Find length of the first sdt entry (UC buffer) */
8215         if (be32_to_cpu(sdt.hdr.state) == IPR_FMT3_SDT_READY_TO_USE)
8216                 length = be32_to_cpu(sdt.entry[0].end_token);
8217         else
8218                 length = (be32_to_cpu(sdt.entry[0].end_token) -
8219                           be32_to_cpu(sdt.entry[0].start_token)) &
8220                           IPR_FMT2_MBX_ADDR_MASK;
8221
8222         hostrcb = list_entry(ioa_cfg->hostrcb_free_q.next,
8223                              struct ipr_hostrcb, queue);
8224         list_del(&hostrcb->queue);
8225         memset(&hostrcb->hcam, 0, sizeof(hostrcb->hcam));
8226
8227         rc = ipr_get_ldump_data_section(ioa_cfg,
8228                                         be32_to_cpu(sdt.entry[0].start_token),
8229                                         (__be32 *)&hostrcb->hcam,
8230                                         min(length, (int)sizeof(hostrcb->hcam)) / sizeof(__be32));
8231
8232         if (!rc) {
8233                 ipr_handle_log_data(ioa_cfg, hostrcb);
8234                 ioasc = be32_to_cpu(hostrcb->hcam.u.error.fd_ioasc);
8235                 if (ioasc == IPR_IOASC_NR_IOA_RESET_REQUIRED &&
8236                     ioa_cfg->sdt_state == GET_DUMP)
8237                         ioa_cfg->sdt_state = WAIT_FOR_DUMP;
8238         } else
8239                 ipr_unit_check_no_data(ioa_cfg);
8240
8241         list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_free_q);
8242 }
8243
8244 /**
8245  * ipr_reset_get_unit_check_job - Call to get the unit check buffer.
8246  * @ipr_cmd:    ipr command struct
8247  *
8248  * Description: This function will call to get the unit check buffer.
8249  *
8250  * Return value:
8251  *      IPR_RC_JOB_RETURN
8252  **/
8253 static int ipr_reset_get_unit_check_job(struct ipr_cmnd *ipr_cmd)
8254 {
8255         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8256
8257         ENTER;
8258         ioa_cfg->ioa_unit_checked = 0;
8259         ipr_get_unit_check_buffer(ioa_cfg);
8260         ipr_cmd->job_step = ipr_reset_alert;
8261         ipr_reset_start_timer(ipr_cmd, 0);
8262
8263         LEAVE;
8264         return IPR_RC_JOB_RETURN;
8265 }
8266
8267 /**
8268  * ipr_reset_restore_cfg_space - Restore PCI config space.
8269  * @ipr_cmd:    ipr command struct
8270  *
8271  * Description: This function restores the saved PCI config space of
8272  * the adapter, fails all outstanding ops back to the callers, and
8273  * fetches the dump/unit check if applicable to this reset.
8274  *
8275  * Return value:
8276  *      IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8277  **/
8278 static int ipr_reset_restore_cfg_space(struct ipr_cmnd *ipr_cmd)
8279 {
8280         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8281         u32 int_reg;
8282
8283         ENTER;
8284         ioa_cfg->pdev->state_saved = true;
8285         pci_restore_state(ioa_cfg->pdev);
8286
8287         if (ipr_set_pcix_cmd_reg(ioa_cfg)) {
8288                 ipr_cmd->s.ioasa.hdr.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR);
8289                 return IPR_RC_JOB_CONTINUE;
8290         }
8291
8292         ipr_fail_all_ops(ioa_cfg);
8293
8294         if (ioa_cfg->sis64) {
8295                 /* Set the adapter to the correct endian mode. */
8296                 writel(IPR_ENDIAN_SWAP_KEY, ioa_cfg->regs.endian_swap_reg);
8297                 int_reg = readl(ioa_cfg->regs.endian_swap_reg);
8298         }
8299
8300         if (ioa_cfg->ioa_unit_checked) {
8301                 if (ioa_cfg->sis64) {
8302                         ipr_cmd->job_step = ipr_reset_get_unit_check_job;
8303                         ipr_reset_start_timer(ipr_cmd, IPR_DUMP_DELAY_TIMEOUT);
8304                         return IPR_RC_JOB_RETURN;
8305                 } else {
8306                         ioa_cfg->ioa_unit_checked = 0;
8307                         ipr_get_unit_check_buffer(ioa_cfg);
8308                         ipr_cmd->job_step = ipr_reset_alert;
8309                         ipr_reset_start_timer(ipr_cmd, 0);
8310                         return IPR_RC_JOB_RETURN;
8311                 }
8312         }
8313
8314         if (ioa_cfg->in_ioa_bringdown) {
8315                 ipr_cmd->job_step = ipr_ioa_bringdown_done;
8316         } else {
8317                 ipr_cmd->job_step = ipr_reset_enable_ioa;
8318
8319                 if (GET_DUMP == ioa_cfg->sdt_state) {
8320                         ioa_cfg->sdt_state = READ_DUMP;
8321                         ioa_cfg->dump_timeout = 0;
8322                         if (ioa_cfg->sis64)
8323                                 ipr_reset_start_timer(ipr_cmd, IPR_SIS64_DUMP_TIMEOUT);
8324                         else
8325                                 ipr_reset_start_timer(ipr_cmd, IPR_SIS32_DUMP_TIMEOUT);
8326                         ipr_cmd->job_step = ipr_reset_wait_for_dump;
8327                         schedule_work(&ioa_cfg->work_q);
8328                         return IPR_RC_JOB_RETURN;
8329                 }
8330         }
8331
8332         LEAVE;
8333         return IPR_RC_JOB_CONTINUE;
8334 }
8335
8336 /**
8337  * ipr_reset_bist_done - BIST has completed on the adapter.
8338  * @ipr_cmd:    ipr command struct
8339  *
8340  * Description: Unblock config space and resume the reset process.
8341  *
8342  * Return value:
8343  *      IPR_RC_JOB_CONTINUE
8344  **/
8345 static int ipr_reset_bist_done(struct ipr_cmnd *ipr_cmd)
8346 {
8347         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8348
8349         ENTER;
8350         if (ioa_cfg->cfg_locked)
8351                 pci_cfg_access_unlock(ioa_cfg->pdev);
8352         ioa_cfg->cfg_locked = 0;
8353         ipr_cmd->job_step = ipr_reset_restore_cfg_space;
8354         LEAVE;
8355         return IPR_RC_JOB_CONTINUE;
8356 }
8357
8358 /**
8359  * ipr_reset_start_bist - Run BIST on the adapter.
8360  * @ipr_cmd:    ipr command struct
8361  *
8362  * Description: This function runs BIST on the adapter, then delays 2 seconds.
8363  *
8364  * Return value:
8365  *      IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8366  **/
8367 static int ipr_reset_start_bist(struct ipr_cmnd *ipr_cmd)
8368 {
8369         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8370         int rc = PCIBIOS_SUCCESSFUL;
8371
8372         ENTER;
8373         if (ioa_cfg->ipr_chip->bist_method == IPR_MMIO)
8374                 writel(IPR_UPROCI_SIS64_START_BIST,
8375                        ioa_cfg->regs.set_uproc_interrupt_reg32);
8376         else
8377                 rc = pci_write_config_byte(ioa_cfg->pdev, PCI_BIST, PCI_BIST_START);
8378
8379         if (rc == PCIBIOS_SUCCESSFUL) {
8380                 ipr_cmd->job_step = ipr_reset_bist_done;
8381                 ipr_reset_start_timer(ipr_cmd, IPR_WAIT_FOR_BIST_TIMEOUT);
8382                 rc = IPR_RC_JOB_RETURN;
8383         } else {
8384                 if (ioa_cfg->cfg_locked)
8385                         pci_cfg_access_unlock(ipr_cmd->ioa_cfg->pdev);
8386                 ioa_cfg->cfg_locked = 0;
8387                 ipr_cmd->s.ioasa.hdr.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR);
8388                 rc = IPR_RC_JOB_CONTINUE;
8389         }
8390
8391         LEAVE;
8392         return rc;
8393 }
8394
8395 /**
8396  * ipr_reset_slot_reset_done - Clear PCI reset to the adapter
8397  * @ipr_cmd:    ipr command struct
8398  *
8399  * Description: This clears PCI reset to the adapter and delays two seconds.
8400  *
8401  * Return value:
8402  *      IPR_RC_JOB_RETURN
8403  **/
8404 static int ipr_reset_slot_reset_done(struct ipr_cmnd *ipr_cmd)
8405 {
8406         ENTER;
8407         ipr_cmd->job_step = ipr_reset_bist_done;
8408         ipr_reset_start_timer(ipr_cmd, IPR_WAIT_FOR_BIST_TIMEOUT);
8409         LEAVE;
8410         return IPR_RC_JOB_RETURN;
8411 }
8412
8413 /**
8414  * ipr_reset_reset_work - Pulse a PCIe fundamental reset
8415  * @work:       work struct
8416  *
8417  * Description: This pulses warm reset to a slot.
8418  *
8419  **/
8420 static void ipr_reset_reset_work(struct work_struct *work)
8421 {
8422         struct ipr_cmnd *ipr_cmd = container_of(work, struct ipr_cmnd, work);
8423         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8424         struct pci_dev *pdev = ioa_cfg->pdev;
8425         unsigned long lock_flags = 0;
8426
8427         ENTER;
8428         pci_set_pcie_reset_state(pdev, pcie_warm_reset);
8429         msleep(jiffies_to_msecs(IPR_PCI_RESET_TIMEOUT));
8430         pci_set_pcie_reset_state(pdev, pcie_deassert_reset);
8431
8432         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
8433         if (ioa_cfg->reset_cmd == ipr_cmd)
8434                 ipr_reset_ioa_job(ipr_cmd);
8435         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
8436         LEAVE;
8437 }
8438
8439 /**
8440  * ipr_reset_slot_reset - Reset the PCI slot of the adapter.
8441  * @ipr_cmd:    ipr command struct
8442  *
8443  * Description: This asserts PCI reset to the adapter.
8444  *
8445  * Return value:
8446  *      IPR_RC_JOB_RETURN
8447  **/
8448 static int ipr_reset_slot_reset(struct ipr_cmnd *ipr_cmd)
8449 {
8450         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8451
8452         ENTER;
8453         INIT_WORK(&ipr_cmd->work, ipr_reset_reset_work);
8454         queue_work(ioa_cfg->reset_work_q, &ipr_cmd->work);
8455         ipr_cmd->job_step = ipr_reset_slot_reset_done;
8456         LEAVE;
8457         return IPR_RC_JOB_RETURN;
8458 }
8459
8460 /**
8461  * ipr_reset_block_config_access_wait - Wait for permission to block config access
8462  * @ipr_cmd:    ipr command struct
8463  *
8464  * Description: This attempts to block config access to the IOA.
8465  *
8466  * Return value:
8467  *      IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8468  **/
8469 static int ipr_reset_block_config_access_wait(struct ipr_cmnd *ipr_cmd)
8470 {
8471         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8472         int rc = IPR_RC_JOB_CONTINUE;
8473
8474         if (pci_cfg_access_trylock(ioa_cfg->pdev)) {
8475                 ioa_cfg->cfg_locked = 1;
8476                 ipr_cmd->job_step = ioa_cfg->reset;
8477         } else {
8478                 if (ipr_cmd->u.time_left) {
8479                         rc = IPR_RC_JOB_RETURN;
8480                         ipr_cmd->u.time_left -= IPR_CHECK_FOR_RESET_TIMEOUT;
8481                         ipr_reset_start_timer(ipr_cmd,
8482                                               IPR_CHECK_FOR_RESET_TIMEOUT);
8483                 } else {
8484                         ipr_cmd->job_step = ioa_cfg->reset;
8485                         dev_err(&ioa_cfg->pdev->dev,
8486                                 "Timed out waiting to lock config access. Resetting anyway.\n");
8487                 }
8488         }
8489
8490         return rc;
8491 }
8492
8493 /**
8494  * ipr_reset_block_config_access - Block config access to the IOA
8495  * @ipr_cmd:    ipr command struct
8496  *
8497  * Description: This attempts to block config access to the IOA
8498  *
8499  * Return value:
8500  *      IPR_RC_JOB_CONTINUE
8501  **/
8502 static int ipr_reset_block_config_access(struct ipr_cmnd *ipr_cmd)
8503 {
8504         ipr_cmd->ioa_cfg->cfg_locked = 0;
8505         ipr_cmd->job_step = ipr_reset_block_config_access_wait;
8506         ipr_cmd->u.time_left = IPR_WAIT_FOR_RESET_TIMEOUT;
8507         return IPR_RC_JOB_CONTINUE;
8508 }
8509
8510 /**
8511  * ipr_reset_allowed - Query whether or not IOA can be reset
8512  * @ioa_cfg:    ioa config struct
8513  *
8514  * Return value:
8515  *      0 if reset not allowed / non-zero if reset is allowed
8516  **/
8517 static int ipr_reset_allowed(struct ipr_ioa_cfg *ioa_cfg)
8518 {
8519         volatile u32 temp_reg;
8520
8521         temp_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
8522         return ((temp_reg & IPR_PCII_CRITICAL_OPERATION) == 0);
8523 }
8524
8525 /**
8526  * ipr_reset_wait_to_start_bist - Wait for permission to reset IOA.
8527  * @ipr_cmd:    ipr command struct
8528  *
8529  * Description: This function waits for adapter permission to run BIST,
8530  * then runs BIST. If the adapter does not give permission after a
8531  * reasonable time, we will reset the adapter anyway. The impact of
8532  * resetting the adapter without warning the adapter is the risk of
8533  * losing the persistent error log on the adapter. If the adapter is
8534  * reset while it is writing to the flash on the adapter, the flash
8535  * segment will have bad ECC and be zeroed.
8536  *
8537  * Return value:
8538  *      IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8539  **/
8540 static int ipr_reset_wait_to_start_bist(struct ipr_cmnd *ipr_cmd)
8541 {
8542         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8543         int rc = IPR_RC_JOB_RETURN;
8544
8545         if (!ipr_reset_allowed(ioa_cfg) && ipr_cmd->u.time_left) {
8546                 ipr_cmd->u.time_left -= IPR_CHECK_FOR_RESET_TIMEOUT;
8547                 ipr_reset_start_timer(ipr_cmd, IPR_CHECK_FOR_RESET_TIMEOUT);
8548         } else {
8549                 ipr_cmd->job_step = ipr_reset_block_config_access;
8550                 rc = IPR_RC_JOB_CONTINUE;
8551         }
8552
8553         return rc;
8554 }
8555
8556 /**
8557  * ipr_reset_alert - Alert the adapter of a pending reset
8558  * @ipr_cmd:    ipr command struct
8559  *
8560  * Description: This function alerts the adapter that it will be reset.
8561  * If memory space is not currently enabled, proceed directly
8562  * to running BIST on the adapter. The timer must always be started
8563  * so we guarantee we do not run BIST from ipr_isr.
8564  *
8565  * Return value:
8566  *      IPR_RC_JOB_RETURN
8567  **/
8568 static int ipr_reset_alert(struct ipr_cmnd *ipr_cmd)
8569 {
8570         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8571         u16 cmd_reg;
8572         int rc;
8573
8574         ENTER;
8575         rc = pci_read_config_word(ioa_cfg->pdev, PCI_COMMAND, &cmd_reg);
8576
8577         if ((rc == PCIBIOS_SUCCESSFUL) && (cmd_reg & PCI_COMMAND_MEMORY)) {
8578                 ipr_mask_and_clear_interrupts(ioa_cfg, ~0);
8579                 writel(IPR_UPROCI_RESET_ALERT, ioa_cfg->regs.set_uproc_interrupt_reg32);
8580                 ipr_cmd->job_step = ipr_reset_wait_to_start_bist;
8581         } else {
8582                 ipr_cmd->job_step = ipr_reset_block_config_access;
8583         }
8584
8585         ipr_cmd->u.time_left = IPR_WAIT_FOR_RESET_TIMEOUT;
8586         ipr_reset_start_timer(ipr_cmd, IPR_CHECK_FOR_RESET_TIMEOUT);
8587
8588         LEAVE;
8589         return IPR_RC_JOB_RETURN;
8590 }
8591
8592 /**
8593  * ipr_reset_quiesce_done - Complete IOA disconnect
8594  * @ipr_cmd:    ipr command struct
8595  *
8596  * Description: Freeze the adapter to complete quiesce processing
8597  *
8598  * Return value:
8599  *      IPR_RC_JOB_CONTINUE
8600  **/
8601 static int ipr_reset_quiesce_done(struct ipr_cmnd *ipr_cmd)
8602 {
8603         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8604
8605         ENTER;
8606         ipr_cmd->job_step = ipr_ioa_bringdown_done;
8607         ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
8608         LEAVE;
8609         return IPR_RC_JOB_CONTINUE;
8610 }
8611
8612 /**
8613  * ipr_reset_cancel_hcam_done - Check for outstanding commands
8614  * @ipr_cmd:    ipr command struct
8615  *
8616  * Description: Ensure nothing is outstanding to the IOA and
8617  *                      proceed with IOA disconnect. Otherwise reset the IOA.
8618  *
8619  * Return value:
8620  *      IPR_RC_JOB_RETURN / IPR_RC_JOB_CONTINUE
8621  **/
8622 static int ipr_reset_cancel_hcam_done(struct ipr_cmnd *ipr_cmd)
8623 {
8624         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8625         struct ipr_cmnd *loop_cmd;
8626         struct ipr_hrr_queue *hrrq;
8627         int rc = IPR_RC_JOB_CONTINUE;
8628         int count = 0;
8629
8630         ENTER;
8631         ipr_cmd->job_step = ipr_reset_quiesce_done;
8632
8633         for_each_hrrq(hrrq, ioa_cfg) {
8634                 spin_lock(&hrrq->_lock);
8635                 list_for_each_entry(loop_cmd, &hrrq->hrrq_pending_q, queue) {
8636                         count++;
8637                         ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
8638                         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
8639                         rc = IPR_RC_JOB_RETURN;
8640                         break;
8641                 }
8642                 spin_unlock(&hrrq->_lock);
8643
8644                 if (count)
8645                         break;
8646         }
8647
8648         LEAVE;
8649         return rc;
8650 }
8651
8652 /**
8653  * ipr_reset_cancel_hcam - Cancel outstanding HCAMs
8654  * @ipr_cmd:    ipr command struct
8655  *
8656  * Description: Cancel any oustanding HCAMs to the IOA.
8657  *
8658  * Return value:
8659  *      IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8660  **/
8661 static int ipr_reset_cancel_hcam(struct ipr_cmnd *ipr_cmd)
8662 {
8663         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8664         int rc = IPR_RC_JOB_CONTINUE;
8665         struct ipr_cmd_pkt *cmd_pkt;
8666         struct ipr_cmnd *hcam_cmd;
8667         struct ipr_hrr_queue *hrrq = &ioa_cfg->hrrq[IPR_INIT_HRRQ];
8668
8669         ENTER;
8670         ipr_cmd->job_step = ipr_reset_cancel_hcam_done;
8671
8672         if (!hrrq->ioa_is_dead) {
8673                 if (!list_empty(&ioa_cfg->hostrcb_pending_q)) {
8674                         list_for_each_entry(hcam_cmd, &hrrq->hrrq_pending_q, queue) {
8675                                 if (hcam_cmd->ioarcb.cmd_pkt.cdb[0] != IPR_HOST_CONTROLLED_ASYNC)
8676                                         continue;
8677
8678                                 ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
8679                                 ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
8680                                 cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
8681                                 cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
8682                                 cmd_pkt->cdb[0] = IPR_CANCEL_REQUEST;
8683                                 cmd_pkt->cdb[1] = IPR_CANCEL_64BIT_IOARCB;
8684                                 cmd_pkt->cdb[10] = ((u64) hcam_cmd->dma_addr >> 56) & 0xff;
8685                                 cmd_pkt->cdb[11] = ((u64) hcam_cmd->dma_addr >> 48) & 0xff;
8686                                 cmd_pkt->cdb[12] = ((u64) hcam_cmd->dma_addr >> 40) & 0xff;
8687                                 cmd_pkt->cdb[13] = ((u64) hcam_cmd->dma_addr >> 32) & 0xff;
8688                                 cmd_pkt->cdb[2] = ((u64) hcam_cmd->dma_addr >> 24) & 0xff;
8689                                 cmd_pkt->cdb[3] = ((u64) hcam_cmd->dma_addr >> 16) & 0xff;
8690                                 cmd_pkt->cdb[4] = ((u64) hcam_cmd->dma_addr >> 8) & 0xff;
8691                                 cmd_pkt->cdb[5] = ((u64) hcam_cmd->dma_addr) & 0xff;
8692
8693                                 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
8694                                            IPR_CANCEL_TIMEOUT);
8695
8696                                 rc = IPR_RC_JOB_RETURN;
8697                                 ipr_cmd->job_step = ipr_reset_cancel_hcam;
8698                                 break;
8699                         }
8700                 }
8701         } else
8702                 ipr_cmd->job_step = ipr_reset_alert;
8703
8704         LEAVE;
8705         return rc;
8706 }
8707
8708 /**
8709  * ipr_reset_ucode_download_done - Microcode download completion
8710  * @ipr_cmd:    ipr command struct
8711  *
8712  * Description: This function unmaps the microcode download buffer.
8713  *
8714  * Return value:
8715  *      IPR_RC_JOB_CONTINUE
8716  **/
8717 static int ipr_reset_ucode_download_done(struct ipr_cmnd *ipr_cmd)
8718 {
8719         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8720         struct ipr_sglist *sglist = ioa_cfg->ucode_sglist;
8721
8722         dma_unmap_sg(&ioa_cfg->pdev->dev, sglist->scatterlist,
8723                      sglist->num_sg, DMA_TO_DEVICE);
8724
8725         ipr_cmd->job_step = ipr_reset_alert;
8726         return IPR_RC_JOB_CONTINUE;
8727 }
8728
8729 /**
8730  * ipr_reset_ucode_download - Download microcode to the adapter
8731  * @ipr_cmd:    ipr command struct
8732  *
8733  * Description: This function checks to see if it there is microcode
8734  * to download to the adapter. If there is, a download is performed.
8735  *
8736  * Return value:
8737  *      IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8738  **/
8739 static int ipr_reset_ucode_download(struct ipr_cmnd *ipr_cmd)
8740 {
8741         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8742         struct ipr_sglist *sglist = ioa_cfg->ucode_sglist;
8743
8744         ENTER;
8745         ipr_cmd->job_step = ipr_reset_alert;
8746
8747         if (!sglist)
8748                 return IPR_RC_JOB_CONTINUE;
8749
8750         ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
8751         ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
8752         ipr_cmd->ioarcb.cmd_pkt.cdb[0] = WRITE_BUFFER;
8753         ipr_cmd->ioarcb.cmd_pkt.cdb[1] = IPR_WR_BUF_DOWNLOAD_AND_SAVE;
8754         ipr_cmd->ioarcb.cmd_pkt.cdb[6] = (sglist->buffer_len & 0xff0000) >> 16;
8755         ipr_cmd->ioarcb.cmd_pkt.cdb[7] = (sglist->buffer_len & 0x00ff00) >> 8;
8756         ipr_cmd->ioarcb.cmd_pkt.cdb[8] = sglist->buffer_len & 0x0000ff;
8757
8758         if (ioa_cfg->sis64)
8759                 ipr_build_ucode_ioadl64(ipr_cmd, sglist);
8760         else
8761                 ipr_build_ucode_ioadl(ipr_cmd, sglist);
8762         ipr_cmd->job_step = ipr_reset_ucode_download_done;
8763
8764         ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
8765                    IPR_WRITE_BUFFER_TIMEOUT);
8766
8767         LEAVE;
8768         return IPR_RC_JOB_RETURN;
8769 }
8770
8771 /**
8772  * ipr_reset_shutdown_ioa - Shutdown the adapter
8773  * @ipr_cmd:    ipr command struct
8774  *
8775  * Description: This function issues an adapter shutdown of the
8776  * specified type to the specified adapter as part of the
8777  * adapter reset job.
8778  *
8779  * Return value:
8780  *      IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8781  **/
8782 static int ipr_reset_shutdown_ioa(struct ipr_cmnd *ipr_cmd)
8783 {
8784         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8785         enum ipr_shutdown_type shutdown_type = ipr_cmd->u.shutdown_type;
8786         unsigned long timeout;
8787         int rc = IPR_RC_JOB_CONTINUE;
8788
8789         ENTER;
8790         if (shutdown_type == IPR_SHUTDOWN_QUIESCE)
8791                 ipr_cmd->job_step = ipr_reset_cancel_hcam;
8792         else if (shutdown_type != IPR_SHUTDOWN_NONE &&
8793                         !ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead) {
8794                 ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
8795                 ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
8796                 ipr_cmd->ioarcb.cmd_pkt.cdb[0] = IPR_IOA_SHUTDOWN;
8797                 ipr_cmd->ioarcb.cmd_pkt.cdb[1] = shutdown_type;
8798
8799                 if (shutdown_type == IPR_SHUTDOWN_NORMAL)
8800                         timeout = IPR_SHUTDOWN_TIMEOUT;
8801                 else if (shutdown_type == IPR_SHUTDOWN_PREPARE_FOR_NORMAL)
8802                         timeout = IPR_INTERNAL_TIMEOUT;
8803                 else if (ioa_cfg->dual_raid && ipr_dual_ioa_raid)
8804                         timeout = IPR_DUAL_IOA_ABBR_SHUTDOWN_TO;
8805                 else
8806                         timeout = IPR_ABBREV_SHUTDOWN_TIMEOUT;
8807
8808                 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, timeout);
8809
8810                 rc = IPR_RC_JOB_RETURN;
8811                 ipr_cmd->job_step = ipr_reset_ucode_download;
8812         } else
8813                 ipr_cmd->job_step = ipr_reset_alert;
8814
8815         LEAVE;
8816         return rc;
8817 }
8818
8819 /**
8820  * ipr_reset_ioa_job - Adapter reset job
8821  * @ipr_cmd:    ipr command struct
8822  *
8823  * Description: This function is the job router for the adapter reset job.
8824  *
8825  * Return value:
8826  *      none
8827  **/
8828 static void ipr_reset_ioa_job(struct ipr_cmnd *ipr_cmd)
8829 {
8830         u32 rc, ioasc;
8831         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8832
8833         do {
8834                 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
8835
8836                 if (ioa_cfg->reset_cmd != ipr_cmd) {
8837                         /*
8838                          * We are doing nested adapter resets and this is
8839                          * not the current reset job.
8840                          */
8841                         list_add_tail(&ipr_cmd->queue,
8842                                         &ipr_cmd->hrrq->hrrq_free_q);
8843                         return;
8844                 }
8845
8846                 if (IPR_IOASC_SENSE_KEY(ioasc)) {
8847                         rc = ipr_cmd->job_step_failed(ipr_cmd);
8848                         if (rc == IPR_RC_JOB_RETURN)
8849                                 return;
8850                 }
8851
8852                 ipr_reinit_ipr_cmnd(ipr_cmd);
8853                 ipr_cmd->job_step_failed = ipr_reset_cmd_failed;
8854                 rc = ipr_cmd->job_step(ipr_cmd);
8855         } while (rc == IPR_RC_JOB_CONTINUE);
8856 }
8857
8858 /**
8859  * _ipr_initiate_ioa_reset - Initiate an adapter reset
8860  * @ioa_cfg:            ioa config struct
8861  * @job_step:           first job step of reset job
8862  * @shutdown_type:      shutdown type
8863  *
8864  * Description: This function will initiate the reset of the given adapter
8865  * starting at the selected job step.
8866  * If the caller needs to wait on the completion of the reset,
8867  * the caller must sleep on the reset_wait_q.
8868  *
8869  * Return value:
8870  *      none
8871  **/
8872 static void _ipr_initiate_ioa_reset(struct ipr_ioa_cfg *ioa_cfg,
8873                                     int (*job_step) (struct ipr_cmnd *),
8874                                     enum ipr_shutdown_type shutdown_type)
8875 {
8876         struct ipr_cmnd *ipr_cmd;
8877         int i;
8878
8879         ioa_cfg->in_reset_reload = 1;
8880         for (i = 0; i < ioa_cfg->hrrq_num; i++) {
8881                 spin_lock(&ioa_cfg->hrrq[i]._lock);
8882                 ioa_cfg->hrrq[i].allow_cmds = 0;
8883                 spin_unlock(&ioa_cfg->hrrq[i]._lock);
8884         }
8885         wmb();
8886         if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].removing_ioa)
8887                 scsi_block_requests(ioa_cfg->host);
8888
8889         ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
8890         ioa_cfg->reset_cmd = ipr_cmd;
8891         ipr_cmd->job_step = job_step;
8892         ipr_cmd->u.shutdown_type = shutdown_type;
8893
8894         ipr_reset_ioa_job(ipr_cmd);
8895 }
8896
8897 /**
8898  * ipr_initiate_ioa_reset - Initiate an adapter reset
8899  * @ioa_cfg:            ioa config struct
8900  * @shutdown_type:      shutdown type
8901  *
8902  * Description: This function will initiate the reset of the given adapter.
8903  * If the caller needs to wait on the completion of the reset,
8904  * the caller must sleep on the reset_wait_q.
8905  *
8906  * Return value:
8907  *      none
8908  **/
8909 static void ipr_initiate_ioa_reset(struct ipr_ioa_cfg *ioa_cfg,
8910                                    enum ipr_shutdown_type shutdown_type)
8911 {
8912         int i;
8913
8914         if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead)
8915                 return;
8916
8917         if (ioa_cfg->in_reset_reload) {
8918                 if (ioa_cfg->sdt_state == GET_DUMP)
8919                         ioa_cfg->sdt_state = WAIT_FOR_DUMP;
8920                 else if (ioa_cfg->sdt_state == READ_DUMP)
8921                         ioa_cfg->sdt_state = ABORT_DUMP;
8922         }
8923
8924         if (ioa_cfg->reset_retries++ >= IPR_NUM_RESET_RELOAD_RETRIES) {
8925                 dev_err(&ioa_cfg->pdev->dev,
8926                         "IOA taken offline - error recovery failed\n");
8927
8928                 ioa_cfg->reset_retries = 0;
8929                 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
8930                         spin_lock(&ioa_cfg->hrrq[i]._lock);
8931                         ioa_cfg->hrrq[i].ioa_is_dead = 1;
8932                         spin_unlock(&ioa_cfg->hrrq[i]._lock);
8933                 }
8934                 wmb();
8935
8936                 if (ioa_cfg->in_ioa_bringdown) {
8937                         ioa_cfg->reset_cmd = NULL;
8938                         ioa_cfg->in_reset_reload = 0;
8939                         ipr_fail_all_ops(ioa_cfg);
8940                         wake_up_all(&ioa_cfg->reset_wait_q);
8941
8942                         if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].removing_ioa) {
8943                                 spin_unlock_irq(ioa_cfg->host->host_lock);
8944                                 scsi_unblock_requests(ioa_cfg->host);
8945                                 spin_lock_irq(ioa_cfg->host->host_lock);
8946                         }
8947                         return;
8948                 } else {
8949                         ioa_cfg->in_ioa_bringdown = 1;
8950                         shutdown_type = IPR_SHUTDOWN_NONE;
8951                 }
8952         }
8953
8954         _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_shutdown_ioa,
8955                                 shutdown_type);
8956 }
8957
8958 /**
8959  * ipr_reset_freeze - Hold off all I/O activity
8960  * @ipr_cmd:    ipr command struct
8961  *
8962  * Description: If the PCI slot is frozen, hold off all I/O
8963  * activity; then, as soon as the slot is available again,
8964  * initiate an adapter reset.
8965  */
8966 static int ipr_reset_freeze(struct ipr_cmnd *ipr_cmd)
8967 {
8968         struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8969         int i;
8970
8971         /* Disallow new interrupts, avoid loop */
8972         for (i = 0; i < ioa_cfg->hrrq_num; i++) {
8973                 spin_lock(&ioa_cfg->hrrq[i]._lock);
8974                 ioa_cfg->hrrq[i].allow_interrupts = 0;
8975                 spin_unlock(&ioa_cfg->hrrq[i]._lock);
8976         }
8977         wmb();
8978         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
8979         ipr_cmd->done = ipr_reset_ioa_job;
8980         return IPR_RC_JOB_RETURN;
8981 }
8982
8983 /**
8984  * ipr_pci_mmio_enabled - Called when MMIO has been re-enabled
8985  * @pdev:       PCI device struct
8986  *
8987  * Description: This routine is called to tell us that the MMIO
8988  * access to the IOA has been restored
8989  */
8990 static pci_ers_result_t ipr_pci_mmio_enabled(struct pci_dev *pdev)
8991 {
8992         unsigned long flags = 0;
8993         struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
8994
8995         spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
8996         if (!ioa_cfg->probe_done)
8997                 pci_save_state(pdev);
8998         spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
8999         return PCI_ERS_RESULT_NEED_RESET;
9000 }
9001
9002 /**
9003  * ipr_pci_frozen - Called when slot has experienced a PCI bus error.
9004  * @pdev:       PCI device struct
9005  *
9006  * Description: This routine is called to tell us that the PCI bus
9007  * is down. Can't do anything here, except put the device driver
9008  * into a holding pattern, waiting for the PCI bus to come back.
9009  */
9010 static void ipr_pci_frozen(struct pci_dev *pdev)
9011 {
9012         unsigned long flags = 0;
9013         struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
9014
9015         spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
9016         if (ioa_cfg->probe_done)
9017                 _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_freeze, IPR_SHUTDOWN_NONE);
9018         spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
9019 }
9020
9021 /**
9022  * ipr_pci_slot_reset - Called when PCI slot has been reset.
9023  * @pdev:       PCI device struct
9024  *
9025  * Description: This routine is called by the pci error recovery
9026  * code after the PCI slot has been reset, just before we
9027  * should resume normal operations.
9028  */
9029 static pci_ers_result_t ipr_pci_slot_reset(struct pci_dev *pdev)
9030 {
9031         unsigned long flags = 0;
9032         struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
9033
9034         spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
9035         if (ioa_cfg->probe_done) {
9036                 if (ioa_cfg->needs_warm_reset)
9037                         ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
9038                 else
9039                         _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_restore_cfg_space,
9040                                                 IPR_SHUTDOWN_NONE);
9041         } else
9042                 wake_up_all(&ioa_cfg->eeh_wait_q);
9043         spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
9044         return PCI_ERS_RESULT_RECOVERED;
9045 }
9046
9047 /**
9048  * ipr_pci_perm_failure - Called when PCI slot is dead for good.
9049  * @pdev:       PCI device struct
9050  *
9051  * Description: This routine is called when the PCI bus has
9052  * permanently failed.
9053  */
9054 static void ipr_pci_perm_failure(struct pci_dev *pdev)
9055 {
9056         unsigned long flags = 0;
9057         struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
9058         int i;
9059
9060         spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
9061         if (ioa_cfg->probe_done) {
9062                 if (ioa_cfg->sdt_state == WAIT_FOR_DUMP)
9063                         ioa_cfg->sdt_state = ABORT_DUMP;
9064                 ioa_cfg->reset_retries = IPR_NUM_RESET_RELOAD_RETRIES - 1;
9065                 ioa_cfg->in_ioa_bringdown = 1;
9066                 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
9067                         spin_lock(&ioa_cfg->hrrq[i]._lock);
9068                         ioa_cfg->hrrq[i].allow_cmds = 0;
9069                         spin_unlock(&ioa_cfg->hrrq[i]._lock);
9070                 }
9071                 wmb();
9072                 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
9073         } else
9074                 wake_up_all(&ioa_cfg->eeh_wait_q);
9075         spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
9076 }
9077
9078 /**
9079  * ipr_pci_error_detected - Called when a PCI error is detected.
9080  * @pdev:       PCI device struct
9081  * @state:      PCI channel state
9082  *
9083  * Description: Called when a PCI error is detected.
9084  *
9085  * Return value:
9086  *      PCI_ERS_RESULT_NEED_RESET or PCI_ERS_RESULT_DISCONNECT
9087  */
9088 static pci_ers_result_t ipr_pci_error_detected(struct pci_dev *pdev,
9089                                                pci_channel_state_t state)
9090 {
9091         switch (state) {
9092         case pci_channel_io_frozen:
9093                 ipr_pci_frozen(pdev);
9094                 return PCI_ERS_RESULT_CAN_RECOVER;
9095         case pci_channel_io_perm_failure:
9096                 ipr_pci_perm_failure(pdev);
9097                 return PCI_ERS_RESULT_DISCONNECT;
9098                 break;
9099         default:
9100                 break;
9101         }
9102         return PCI_ERS_RESULT_NEED_RESET;
9103 }
9104
9105 /**
9106  * ipr_probe_ioa_part2 - Initializes IOAs found in ipr_probe_ioa(..)
9107  * @ioa_cfg:    ioa cfg struct
9108  *
9109  * Description: This is the second phase of adapter intialization
9110  * This function takes care of initilizing the adapter to the point
9111  * where it can accept new commands.
9112
9113  * Return value:
9114  *      0 on success / -EIO on failure
9115  **/
9116 static int ipr_probe_ioa_part2(struct ipr_ioa_cfg *ioa_cfg)
9117 {
9118         int rc = 0;
9119         unsigned long host_lock_flags = 0;
9120
9121         ENTER;
9122         spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
9123         dev_dbg(&ioa_cfg->pdev->dev, "ioa_cfg adx: 0x%p\n", ioa_cfg);
9124         ioa_cfg->probe_done = 1;
9125         if (ioa_cfg->needs_hard_reset) {
9126                 ioa_cfg->needs_hard_reset = 0;
9127                 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
9128         } else
9129                 _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_enable_ioa,
9130                                         IPR_SHUTDOWN_NONE);
9131         spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
9132
9133         LEAVE;
9134         return rc;
9135 }
9136
9137 /**
9138  * ipr_free_cmd_blks - Frees command blocks allocated for an adapter
9139  * @ioa_cfg:    ioa config struct
9140  *
9141  * Return value:
9142  *      none
9143  **/
9144 static void ipr_free_cmd_blks(struct ipr_ioa_cfg *ioa_cfg)
9145 {
9146         int i;
9147
9148         if (ioa_cfg->ipr_cmnd_list) {
9149                 for (i = 0; i < IPR_NUM_CMD_BLKS; i++) {
9150                         if (ioa_cfg->ipr_cmnd_list[i])
9151                                 dma_pool_free(ioa_cfg->ipr_cmd_pool,
9152                                               ioa_cfg->ipr_cmnd_list[i],
9153                                               ioa_cfg->ipr_cmnd_list_dma[i]);
9154
9155                         ioa_cfg->ipr_cmnd_list[i] = NULL;
9156                 }
9157         }
9158
9159         if (ioa_cfg->ipr_cmd_pool)
9160                 dma_pool_destroy(ioa_cfg->ipr_cmd_pool);
9161
9162         kfree(ioa_cfg->ipr_cmnd_list);
9163         kfree(ioa_cfg->ipr_cmnd_list_dma);
9164         ioa_cfg->ipr_cmnd_list = NULL;
9165         ioa_cfg->ipr_cmnd_list_dma = NULL;
9166         ioa_cfg->ipr_cmd_pool = NULL;
9167 }
9168
9169 /**
9170  * ipr_free_mem - Frees memory allocated for an adapter
9171  * @ioa_cfg:    ioa cfg struct
9172  *
9173  * Return value:
9174  *      nothing
9175  **/
9176 static void ipr_free_mem(struct ipr_ioa_cfg *ioa_cfg)
9177 {
9178         int i;
9179
9180         kfree(ioa_cfg->res_entries);
9181         dma_free_coherent(&ioa_cfg->pdev->dev, sizeof(struct ipr_misc_cbs),
9182                           ioa_cfg->vpd_cbs, ioa_cfg->vpd_cbs_dma);
9183         ipr_free_cmd_blks(ioa_cfg);
9184
9185         for (i = 0; i < ioa_cfg->hrrq_num; i++)
9186                 dma_free_coherent(&ioa_cfg->pdev->dev,
9187                                   sizeof(u32) * ioa_cfg->hrrq[i].size,
9188                                   ioa_cfg->hrrq[i].host_rrq,
9189                                   ioa_cfg->hrrq[i].host_rrq_dma);
9190
9191         dma_free_coherent(&ioa_cfg->pdev->dev, ioa_cfg->cfg_table_size,
9192                           ioa_cfg->u.cfg_table, ioa_cfg->cfg_table_dma);
9193
9194         for (i = 0; i < IPR_NUM_HCAMS; i++) {
9195                 dma_free_coherent(&ioa_cfg->pdev->dev,
9196                                   sizeof(struct ipr_hostrcb),
9197                                   ioa_cfg->hostrcb[i],
9198                                   ioa_cfg->hostrcb_dma[i]);
9199         }
9200
9201         ipr_free_dump(ioa_cfg);
9202         kfree(ioa_cfg->trace);
9203 }
9204
9205 /**
9206  * ipr_free_irqs - Free all allocated IRQs for the adapter.
9207  * @ioa_cfg:    ipr cfg struct
9208  *
9209  * This function frees all allocated IRQs for the
9210  * specified adapter.
9211  *
9212  * Return value:
9213  *      none
9214  **/
9215 static void ipr_free_irqs(struct ipr_ioa_cfg *ioa_cfg)
9216 {
9217         struct pci_dev *pdev = ioa_cfg->pdev;
9218
9219         if (ioa_cfg->intr_flag == IPR_USE_MSI ||
9220             ioa_cfg->intr_flag == IPR_USE_MSIX) {
9221                 int i;
9222                 for (i = 0; i < ioa_cfg->nvectors; i++)
9223                         free_irq(ioa_cfg->vectors_info[i].vec,
9224                                  &ioa_cfg->hrrq[i]);
9225         } else
9226                 free_irq(pdev->irq, &ioa_cfg->hrrq[0]);
9227
9228         if (ioa_cfg->intr_flag == IPR_USE_MSI) {
9229                 pci_disable_msi(pdev);
9230                 ioa_cfg->intr_flag &= ~IPR_USE_MSI;
9231         } else if (ioa_cfg->intr_flag == IPR_USE_MSIX) {
9232                 pci_disable_msix(pdev);
9233                 ioa_cfg->intr_flag &= ~IPR_USE_MSIX;
9234         }
9235 }
9236
9237 /**
9238  * ipr_free_all_resources - Free all allocated resources for an adapter.
9239  * @ipr_cmd:    ipr command struct
9240  *
9241  * This function frees all allocated resources for the
9242  * specified adapter.
9243  *
9244  * Return value:
9245  *      none
9246  **/
9247 static void ipr_free_all_resources(struct ipr_ioa_cfg *ioa_cfg)
9248 {
9249         struct pci_dev *pdev = ioa_cfg->pdev;
9250
9251         ENTER;
9252         ipr_free_irqs(ioa_cfg);
9253         if (ioa_cfg->reset_work_q)
9254                 destroy_workqueue(ioa_cfg->reset_work_q);
9255         iounmap(ioa_cfg->hdw_dma_regs);
9256         pci_release_regions(pdev);
9257         ipr_free_mem(ioa_cfg);
9258         scsi_host_put(ioa_cfg->host);
9259         pci_disable_device(pdev);
9260         LEAVE;
9261 }
9262
9263 /**
9264  * ipr_alloc_cmd_blks - Allocate command blocks for an adapter
9265  * @ioa_cfg:    ioa config struct
9266  *
9267  * Return value:
9268  *      0 on success / -ENOMEM on allocation failure
9269  **/
9270 static int ipr_alloc_cmd_blks(struct ipr_ioa_cfg *ioa_cfg)
9271 {
9272         struct ipr_cmnd *ipr_cmd;
9273         struct ipr_ioarcb *ioarcb;
9274         dma_addr_t dma_addr;
9275         int i, entries_each_hrrq, hrrq_id = 0;
9276
9277         ioa_cfg->ipr_cmd_pool = dma_pool_create(IPR_NAME, &ioa_cfg->pdev->dev,
9278                                                 sizeof(struct ipr_cmnd), 512, 0);
9279
9280         if (!ioa_cfg->ipr_cmd_pool)
9281                 return -ENOMEM;
9282
9283         ioa_cfg->ipr_cmnd_list = kcalloc(IPR_NUM_CMD_BLKS, sizeof(struct ipr_cmnd *), GFP_KERNEL);
9284         ioa_cfg->ipr_cmnd_list_dma = kcalloc(IPR_NUM_CMD_BLKS, sizeof(dma_addr_t), GFP_KERNEL);
9285
9286         if (!ioa_cfg->ipr_cmnd_list || !ioa_cfg->ipr_cmnd_list_dma) {
9287                 ipr_free_cmd_blks(ioa_cfg);
9288                 return -ENOMEM;
9289         }
9290
9291         for (i = 0; i < ioa_cfg->hrrq_num; i++) {
9292                 if (ioa_cfg->hrrq_num > 1) {
9293                         if (i == 0) {
9294                                 entries_each_hrrq = IPR_NUM_INTERNAL_CMD_BLKS;
9295                                 ioa_cfg->hrrq[i].min_cmd_id = 0;
9296                                         ioa_cfg->hrrq[i].max_cmd_id =
9297                                                 (entries_each_hrrq - 1);
9298                         } else {
9299                                 entries_each_hrrq =
9300                                         IPR_NUM_BASE_CMD_BLKS/
9301                                         (ioa_cfg->hrrq_num - 1);
9302                                 ioa_cfg->hrrq[i].min_cmd_id =
9303                                         IPR_NUM_INTERNAL_CMD_BLKS +
9304                                         (i - 1) * entries_each_hrrq;
9305                                 ioa_cfg->hrrq[i].max_cmd_id =
9306                                         (IPR_NUM_INTERNAL_CMD_BLKS +
9307                                         i * entries_each_hrrq - 1);
9308                         }
9309                 } else {
9310                         entries_each_hrrq = IPR_NUM_CMD_BLKS;
9311                         ioa_cfg->hrrq[i].min_cmd_id = 0;
9312                         ioa_cfg->hrrq[i].max_cmd_id = (entries_each_hrrq - 1);
9313                 }
9314                 ioa_cfg->hrrq[i].size = entries_each_hrrq;
9315         }
9316
9317         BUG_ON(ioa_cfg->hrrq_num == 0);
9318
9319         i = IPR_NUM_CMD_BLKS -
9320                 ioa_cfg->hrrq[ioa_cfg->hrrq_num - 1].max_cmd_id - 1;
9321         if (i > 0) {
9322                 ioa_cfg->hrrq[ioa_cfg->hrrq_num - 1].size += i;
9323                 ioa_cfg->hrrq[ioa_cfg->hrrq_num - 1].max_cmd_id += i;
9324         }
9325
9326         for (i = 0; i < IPR_NUM_CMD_BLKS; i++) {
9327                 ipr_cmd = dma_pool_alloc(ioa_cfg->ipr_cmd_pool, GFP_KERNEL, &dma_addr);
9328
9329                 if (!ipr_cmd) {
9330                         ipr_free_cmd_blks(ioa_cfg);
9331                         return -ENOMEM;
9332                 }
9333
9334                 memset(ipr_cmd, 0, sizeof(*ipr_cmd));
9335                 ioa_cfg->ipr_cmnd_list[i] = ipr_cmd;
9336                 ioa_cfg->ipr_cmnd_list_dma[i] = dma_addr;
9337
9338                 ioarcb = &ipr_cmd->ioarcb;
9339                 ipr_cmd->dma_addr = dma_addr;
9340                 if (ioa_cfg->sis64)
9341                         ioarcb->a.ioarcb_host_pci_addr64 = cpu_to_be64(dma_addr);
9342                 else
9343                         ioarcb->a.ioarcb_host_pci_addr = cpu_to_be32(dma_addr);
9344
9345                 ioarcb->host_response_handle = cpu_to_be32(i << 2);
9346                 if (ioa_cfg->sis64) {
9347                         ioarcb->u.sis64_addr_data.data_ioadl_addr =
9348                                 cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ioadl64));
9349                         ioarcb->u.sis64_addr_data.ioasa_host_pci_addr =
9350                                 cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, s.ioasa64));
9351                 } else {
9352                         ioarcb->write_ioadl_addr =
9353                                 cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, i.ioadl));
9354                         ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
9355                         ioarcb->ioasa_host_pci_addr =
9356                                 cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, s.ioasa));
9357                 }
9358                 ioarcb->ioasa_len = cpu_to_be16(sizeof(struct ipr_ioasa));
9359                 ipr_cmd->cmd_index = i;
9360                 ipr_cmd->ioa_cfg = ioa_cfg;
9361                 ipr_cmd->sense_buffer_dma = dma_addr +
9362                         offsetof(struct ipr_cmnd, sense_buffer);
9363
9364                 ipr_cmd->ioarcb.cmd_pkt.hrrq_id = hrrq_id;
9365                 ipr_cmd->hrrq = &ioa_cfg->hrrq[hrrq_id];
9366                 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
9367                 if (i >= ioa_cfg->hrrq[hrrq_id].max_cmd_id)
9368                         hrrq_id++;
9369         }
9370
9371         return 0;
9372 }
9373
9374 /**
9375  * ipr_alloc_mem - Allocate memory for an adapter
9376  * @ioa_cfg:    ioa config struct
9377  *
9378  * Return value:
9379  *      0 on success / non-zero for error
9380  **/
9381 static int ipr_alloc_mem(struct ipr_ioa_cfg *ioa_cfg)
9382 {
9383         struct pci_dev *pdev = ioa_cfg->pdev;
9384         int i, rc = -ENOMEM;
9385
9386         ENTER;
9387         ioa_cfg->res_entries = kzalloc(sizeof(struct ipr_resource_entry) *
9388                                        ioa_cfg->max_devs_supported, GFP_KERNEL);
9389
9390         if (!ioa_cfg->res_entries)
9391                 goto out;
9392
9393         for (i = 0; i < ioa_cfg->max_devs_supported; i++) {
9394                 list_add_tail(&ioa_cfg->res_entries[i].queue, &ioa_cfg->free_res_q);
9395                 ioa_cfg->res_entries[i].ioa_cfg = ioa_cfg;
9396         }
9397
9398         ioa_cfg->vpd_cbs = dma_alloc_coherent(&pdev->dev,
9399                                               sizeof(struct ipr_misc_cbs),
9400                                               &ioa_cfg->vpd_cbs_dma,
9401                                               GFP_KERNEL);
9402
9403         if (!ioa_cfg->vpd_cbs)
9404                 goto out_free_res_entries;
9405
9406         if (ipr_alloc_cmd_blks(ioa_cfg))
9407                 goto out_free_vpd_cbs;
9408
9409         for (i = 0; i < ioa_cfg->hrrq_num; i++) {
9410                 ioa_cfg->hrrq[i].host_rrq = dma_alloc_coherent(&pdev->dev,
9411                                         sizeof(u32) * ioa_cfg->hrrq[i].size,
9412                                         &ioa_cfg->hrrq[i].host_rrq_dma,
9413                                         GFP_KERNEL);
9414
9415                 if (!ioa_cfg->hrrq[i].host_rrq)  {
9416                         while (--i > 0)
9417                                 dma_free_coherent(&pdev->dev,
9418                                         sizeof(u32) * ioa_cfg->hrrq[i].size,
9419                                         ioa_cfg->hrrq[i].host_rrq,
9420                                         ioa_cfg->hrrq[i].host_rrq_dma);
9421                         goto out_ipr_free_cmd_blocks;
9422                 }
9423                 ioa_cfg->hrrq[i].ioa_cfg = ioa_cfg;
9424         }
9425
9426         ioa_cfg->u.cfg_table = dma_alloc_coherent(&pdev->dev,
9427                                                   ioa_cfg->cfg_table_size,
9428                                                   &ioa_cfg->cfg_table_dma,
9429                                                   GFP_KERNEL);
9430
9431         if (!ioa_cfg->u.cfg_table)
9432                 goto out_free_host_rrq;
9433
9434         for (i = 0; i < IPR_NUM_HCAMS; i++) {
9435                 ioa_cfg->hostrcb[i] = dma_alloc_coherent(&pdev->dev,
9436                                                          sizeof(struct ipr_hostrcb),
9437                                                          &ioa_cfg->hostrcb_dma[i],
9438                                                          GFP_KERNEL);
9439
9440                 if (!ioa_cfg->hostrcb[i])
9441                         goto out_free_hostrcb_dma;
9442
9443                 ioa_cfg->hostrcb[i]->hostrcb_dma =
9444                         ioa_cfg->hostrcb_dma[i] + offsetof(struct ipr_hostrcb, hcam);
9445                 ioa_cfg->hostrcb[i]->ioa_cfg = ioa_cfg;
9446                 list_add_tail(&ioa_cfg->hostrcb[i]->queue, &ioa_cfg->hostrcb_free_q);
9447         }
9448
9449         ioa_cfg->trace = kzalloc(sizeof(struct ipr_trace_entry) *
9450                                  IPR_NUM_TRACE_ENTRIES, GFP_KERNEL);
9451
9452         if (!ioa_cfg->trace)
9453                 goto out_free_hostrcb_dma;
9454
9455         rc = 0;
9456 out:
9457         LEAVE;
9458         return rc;
9459
9460 out_free_hostrcb_dma:
9461         while (i-- > 0) {
9462                 dma_free_coherent(&pdev->dev, sizeof(struct ipr_hostrcb),
9463                                   ioa_cfg->hostrcb[i],
9464                                   ioa_cfg->hostrcb_dma[i]);
9465         }
9466         dma_free_coherent(&pdev->dev, ioa_cfg->cfg_table_size,
9467                           ioa_cfg->u.cfg_table, ioa_cfg->cfg_table_dma);
9468 out_free_host_rrq:
9469         for (i = 0; i < ioa_cfg->hrrq_num; i++) {
9470                 dma_free_coherent(&pdev->dev,
9471                                   sizeof(u32) * ioa_cfg->hrrq[i].size,
9472                                   ioa_cfg->hrrq[i].host_rrq,
9473                                   ioa_cfg->hrrq[i].host_rrq_dma);
9474         }
9475 out_ipr_free_cmd_blocks:
9476         ipr_free_cmd_blks(ioa_cfg);
9477 out_free_vpd_cbs:
9478         dma_free_coherent(&pdev->dev, sizeof(struct ipr_misc_cbs),
9479                           ioa_cfg->vpd_cbs, ioa_cfg->vpd_cbs_dma);
9480 out_free_res_entries:
9481         kfree(ioa_cfg->res_entries);
9482         goto out;
9483 }
9484
9485 /**
9486  * ipr_initialize_bus_attr - Initialize SCSI bus attributes to default values
9487  * @ioa_cfg:    ioa config struct
9488  *
9489  * Return value:
9490  *      none
9491  **/
9492 static void ipr_initialize_bus_attr(struct ipr_ioa_cfg *ioa_cfg)
9493 {
9494         int i;
9495
9496         for (i = 0; i < IPR_MAX_NUM_BUSES; i++) {
9497                 ioa_cfg->bus_attr[i].bus = i;
9498                 ioa_cfg->bus_attr[i].qas_enabled = 0;
9499                 ioa_cfg->bus_attr[i].bus_width = IPR_DEFAULT_BUS_WIDTH;
9500                 if (ipr_max_speed < ARRAY_SIZE(ipr_max_bus_speeds))
9501                         ioa_cfg->bus_attr[i].max_xfer_rate = ipr_max_bus_speeds[ipr_max_speed];
9502                 else
9503                         ioa_cfg->bus_attr[i].max_xfer_rate = IPR_U160_SCSI_RATE;
9504         }
9505 }
9506
9507 /**
9508  * ipr_init_regs - Initialize IOA registers
9509  * @ioa_cfg:    ioa config struct
9510  *
9511  * Return value:
9512  *      none
9513  **/
9514 static void ipr_init_regs(struct ipr_ioa_cfg *ioa_cfg)
9515 {
9516         const struct ipr_interrupt_offsets *p;
9517         struct ipr_interrupts *t;
9518         void __iomem *base;
9519
9520         p = &ioa_cfg->chip_cfg->regs;
9521         t = &ioa_cfg->regs;
9522         base = ioa_cfg->hdw_dma_regs;
9523
9524         t->set_interrupt_mask_reg = base + p->set_interrupt_mask_reg;
9525         t->clr_interrupt_mask_reg = base + p->clr_interrupt_mask_reg;
9526         t->clr_interrupt_mask_reg32 = base + p->clr_interrupt_mask_reg32;
9527         t->sense_interrupt_mask_reg = base + p->sense_interrupt_mask_reg;
9528         t->sense_interrupt_mask_reg32 = base + p->sense_interrupt_mask_reg32;
9529         t->clr_interrupt_reg = base + p->clr_interrupt_reg;
9530         t->clr_interrupt_reg32 = base + p->clr_interrupt_reg32;
9531         t->sense_interrupt_reg = base + p->sense_interrupt_reg;
9532         t->sense_interrupt_reg32 = base + p->sense_interrupt_reg32;
9533         t->ioarrin_reg = base + p->ioarrin_reg;
9534         t->sense_uproc_interrupt_reg = base + p->sense_uproc_interrupt_reg;
9535         t->sense_uproc_interrupt_reg32 = base + p->sense_uproc_interrupt_reg32;
9536         t->set_uproc_interrupt_reg = base + p->set_uproc_interrupt_reg;
9537         t->set_uproc_interrupt_reg32 = base + p->set_uproc_interrupt_reg32;
9538         t->clr_uproc_interrupt_reg = base + p->clr_uproc_interrupt_reg;
9539         t->clr_uproc_interrupt_reg32 = base + p->clr_uproc_interrupt_reg32;
9540
9541         if (ioa_cfg->sis64) {
9542                 t->init_feedback_reg = base + p->init_feedback_reg;
9543                 t->dump_addr_reg = base + p->dump_addr_reg;
9544                 t->dump_data_reg = base + p->dump_data_reg;
9545                 t->endian_swap_reg = base + p->endian_swap_reg;
9546         }
9547 }
9548
9549 /**
9550  * ipr_init_ioa_cfg - Initialize IOA config struct
9551  * @ioa_cfg:    ioa config struct
9552  * @host:               scsi host struct
9553  * @pdev:               PCI dev struct
9554  *
9555  * Return value:
9556  *      none
9557  **/
9558 static void ipr_init_ioa_cfg(struct ipr_ioa_cfg *ioa_cfg,
9559                              struct Scsi_Host *host, struct pci_dev *pdev)
9560 {
9561         int i;
9562
9563         ioa_cfg->host = host;
9564         ioa_cfg->pdev = pdev;
9565         ioa_cfg->log_level = ipr_log_level;
9566         ioa_cfg->doorbell = IPR_DOORBELL;
9567         sprintf(ioa_cfg->eye_catcher, IPR_EYECATCHER);
9568         sprintf(ioa_cfg->trace_start, IPR_TRACE_START_LABEL);
9569         sprintf(ioa_cfg->cfg_table_start, IPR_CFG_TBL_START);
9570         sprintf(ioa_cfg->resource_table_label, IPR_RES_TABLE_LABEL);
9571         sprintf(ioa_cfg->ipr_hcam_label, IPR_HCAM_LABEL);
9572         sprintf(ioa_cfg->ipr_cmd_label, IPR_CMD_LABEL);
9573
9574         INIT_LIST_HEAD(&ioa_cfg->hostrcb_free_q);
9575         INIT_LIST_HEAD(&ioa_cfg->hostrcb_pending_q);
9576         INIT_LIST_HEAD(&ioa_cfg->free_res_q);
9577         INIT_LIST_HEAD(&ioa_cfg->used_res_q);
9578         INIT_WORK(&ioa_cfg->work_q, ipr_worker_thread);
9579         init_waitqueue_head(&ioa_cfg->reset_wait_q);
9580         init_waitqueue_head(&ioa_cfg->msi_wait_q);
9581         init_waitqueue_head(&ioa_cfg->eeh_wait_q);
9582         ioa_cfg->sdt_state = INACTIVE;
9583
9584         ipr_initialize_bus_attr(ioa_cfg);
9585         ioa_cfg->max_devs_supported = ipr_max_devs;
9586
9587         if (ioa_cfg->sis64) {
9588                 host->max_id = IPR_MAX_SIS64_TARGETS_PER_BUS;
9589                 host->max_lun = IPR_MAX_SIS64_LUNS_PER_TARGET;
9590                 if (ipr_max_devs > IPR_MAX_SIS64_DEVS)
9591                         ioa_cfg->max_devs_supported = IPR_MAX_SIS64_DEVS;
9592                 ioa_cfg->cfg_table_size = (sizeof(struct ipr_config_table_hdr64)
9593                                            + ((sizeof(struct ipr_config_table_entry64)
9594                                                * ioa_cfg->max_devs_supported)));
9595         } else {
9596                 host->max_id = IPR_MAX_NUM_TARGETS_PER_BUS;
9597                 host->max_lun = IPR_MAX_NUM_LUNS_PER_TARGET;
9598                 if (ipr_max_devs > IPR_MAX_PHYSICAL_DEVS)
9599                         ioa_cfg->max_devs_supported = IPR_MAX_PHYSICAL_DEVS;
9600                 ioa_cfg->cfg_table_size = (sizeof(struct ipr_config_table_hdr)
9601                                            + ((sizeof(struct ipr_config_table_entry)
9602                                                * ioa_cfg->max_devs_supported)));
9603         }
9604
9605         host->max_channel = IPR_VSET_BUS;
9606         host->unique_id = host->host_no;
9607         host->max_cmd_len = IPR_MAX_CDB_LEN;
9608         host->can_queue = ioa_cfg->max_cmds;
9609         pci_set_drvdata(pdev, ioa_cfg);
9610
9611         for (i = 0; i < ARRAY_SIZE(ioa_cfg->hrrq); i++) {
9612                 INIT_LIST_HEAD(&ioa_cfg->hrrq[i].hrrq_free_q);
9613                 INIT_LIST_HEAD(&ioa_cfg->hrrq[i].hrrq_pending_q);
9614                 spin_lock_init(&ioa_cfg->hrrq[i]._lock);
9615                 if (i == 0)
9616                         ioa_cfg->hrrq[i].lock = ioa_cfg->host->host_lock;
9617                 else
9618                         ioa_cfg->hrrq[i].lock = &ioa_cfg->hrrq[i]._lock;
9619         }
9620 }
9621
9622 /**
9623  * ipr_get_chip_info - Find adapter chip information
9624  * @dev_id:             PCI device id struct
9625  *
9626  * Return value:
9627  *      ptr to chip information on success / NULL on failure
9628  **/
9629 static const struct ipr_chip_t *
9630 ipr_get_chip_info(const struct pci_device_id *dev_id)
9631 {
9632         int i;
9633
9634         for (i = 0; i < ARRAY_SIZE(ipr_chip); i++)
9635                 if (ipr_chip[i].vendor == dev_id->vendor &&
9636                     ipr_chip[i].device == dev_id->device)
9637                         return &ipr_chip[i];
9638         return NULL;
9639 }
9640
9641 /**
9642  * ipr_wait_for_pci_err_recovery - Wait for any PCI error recovery to complete
9643  *                                              during probe time
9644  * @ioa_cfg:    ioa config struct
9645  *
9646  * Return value:
9647  *      None
9648  **/
9649 static void ipr_wait_for_pci_err_recovery(struct ipr_ioa_cfg *ioa_cfg)
9650 {
9651         struct pci_dev *pdev = ioa_cfg->pdev;
9652
9653         if (pci_channel_offline(pdev)) {
9654                 wait_event_timeout(ioa_cfg->eeh_wait_q,
9655                                    !pci_channel_offline(pdev),
9656                                    IPR_PCI_ERROR_RECOVERY_TIMEOUT);
9657                 pci_restore_state(pdev);
9658         }
9659 }
9660
9661 static int ipr_enable_msix(struct ipr_ioa_cfg *ioa_cfg)
9662 {
9663         struct msix_entry entries[IPR_MAX_MSIX_VECTORS];
9664         int i, vectors;
9665
9666         for (i = 0; i < ARRAY_SIZE(entries); ++i)
9667                 entries[i].entry = i;
9668
9669         vectors = pci_enable_msix_range(ioa_cfg->pdev,
9670                                         entries, 1, ipr_number_of_msix);
9671         if (vectors < 0) {
9672                 ipr_wait_for_pci_err_recovery(ioa_cfg);
9673                 return vectors;
9674         }
9675
9676         for (i = 0; i < vectors; i++)
9677                 ioa_cfg->vectors_info[i].vec = entries[i].vector;
9678         ioa_cfg->nvectors = vectors;
9679
9680         return 0;
9681 }
9682
9683 static int ipr_enable_msi(struct ipr_ioa_cfg *ioa_cfg)
9684 {
9685         int i, vectors;
9686
9687         vectors = pci_enable_msi_range(ioa_cfg->pdev, 1, ipr_number_of_msix);
9688         if (vectors < 0) {
9689                 ipr_wait_for_pci_err_recovery(ioa_cfg);
9690                 return vectors;
9691         }
9692
9693         for (i = 0; i < vectors; i++)
9694                 ioa_cfg->vectors_info[i].vec = ioa_cfg->pdev->irq + i;
9695         ioa_cfg->nvectors = vectors;
9696
9697         return 0;
9698 }
9699
9700 static void name_msi_vectors(struct ipr_ioa_cfg *ioa_cfg)
9701 {
9702         int vec_idx, n = sizeof(ioa_cfg->vectors_info[0].desc) - 1;
9703
9704         for (vec_idx = 0; vec_idx < ioa_cfg->nvectors; vec_idx++) {
9705                 snprintf(ioa_cfg->vectors_info[vec_idx].desc, n,
9706                          "host%d-%d", ioa_cfg->host->host_no, vec_idx);
9707                 ioa_cfg->vectors_info[vec_idx].
9708                         desc[strlen(ioa_cfg->vectors_info[vec_idx].desc)] = 0;
9709         }
9710 }
9711
9712 static int ipr_request_other_msi_irqs(struct ipr_ioa_cfg *ioa_cfg)
9713 {
9714         int i, rc;
9715
9716         for (i = 1; i < ioa_cfg->nvectors; i++) {
9717                 rc = request_irq(ioa_cfg->vectors_info[i].vec,
9718                         ipr_isr_mhrrq,
9719                         0,
9720                         ioa_cfg->vectors_info[i].desc,
9721                         &ioa_cfg->hrrq[i]);
9722                 if (rc) {
9723                         while (--i >= 0)
9724                                 free_irq(ioa_cfg->vectors_info[i].vec,
9725                                         &ioa_cfg->hrrq[i]);
9726                         return rc;
9727                 }
9728         }
9729         return 0;
9730 }
9731
9732 /**
9733  * ipr_test_intr - Handle the interrupt generated in ipr_test_msi().
9734  * @pdev:               PCI device struct
9735  *
9736  * Description: Simply set the msi_received flag to 1 indicating that
9737  * Message Signaled Interrupts are supported.
9738  *
9739  * Return value:
9740  *      0 on success / non-zero on failure
9741  **/
9742 static irqreturn_t ipr_test_intr(int irq, void *devp)
9743 {
9744         struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)devp;
9745         unsigned long lock_flags = 0;
9746         irqreturn_t rc = IRQ_HANDLED;
9747
9748         dev_info(&ioa_cfg->pdev->dev, "Received IRQ : %d\n", irq);
9749         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
9750
9751         ioa_cfg->msi_received = 1;
9752         wake_up(&ioa_cfg->msi_wait_q);
9753
9754         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
9755         return rc;
9756 }
9757
9758 /**
9759  * ipr_test_msi - Test for Message Signaled Interrupt (MSI) support.
9760  * @pdev:               PCI device struct
9761  *
9762  * Description: The return value from pci_enable_msi_range() can not always be
9763  * trusted.  This routine sets up and initiates a test interrupt to determine
9764  * if the interrupt is received via the ipr_test_intr() service routine.
9765  * If the tests fails, the driver will fall back to LSI.
9766  *
9767  * Return value:
9768  *      0 on success / non-zero on failure
9769  **/
9770 static int ipr_test_msi(struct ipr_ioa_cfg *ioa_cfg, struct pci_dev *pdev)
9771 {
9772         int rc;
9773         volatile u32 int_reg;
9774         unsigned long lock_flags = 0;
9775
9776         ENTER;
9777
9778         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
9779         init_waitqueue_head(&ioa_cfg->msi_wait_q);
9780         ioa_cfg->msi_received = 0;
9781         ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
9782         writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE, ioa_cfg->regs.clr_interrupt_mask_reg32);
9783         int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
9784         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
9785
9786         if (ioa_cfg->intr_flag == IPR_USE_MSIX)
9787                 rc = request_irq(ioa_cfg->vectors_info[0].vec, ipr_test_intr, 0, IPR_NAME, ioa_cfg);
9788         else
9789                 rc = request_irq(pdev->irq, ipr_test_intr, 0, IPR_NAME, ioa_cfg);
9790         if (rc) {
9791                 dev_err(&pdev->dev, "Can not assign irq %d\n", pdev->irq);
9792                 return rc;
9793         } else if (ipr_debug)
9794                 dev_info(&pdev->dev, "IRQ assigned: %d\n", pdev->irq);
9795
9796         writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE, ioa_cfg->regs.sense_interrupt_reg32);
9797         int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
9798         wait_event_timeout(ioa_cfg->msi_wait_q, ioa_cfg->msi_received, HZ);
9799         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
9800         ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
9801
9802         if (!ioa_cfg->msi_received) {
9803                 /* MSI test failed */
9804                 dev_info(&pdev->dev, "MSI test failed.  Falling back to LSI.\n");
9805                 rc = -EOPNOTSUPP;
9806         } else if (ipr_debug)
9807                 dev_info(&pdev->dev, "MSI test succeeded.\n");
9808
9809         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
9810
9811         if (ioa_cfg->intr_flag == IPR_USE_MSIX)
9812                 free_irq(ioa_cfg->vectors_info[0].vec, ioa_cfg);
9813         else
9814                 free_irq(pdev->irq, ioa_cfg);
9815
9816         LEAVE;
9817
9818         return rc;
9819 }
9820
9821  /* ipr_probe_ioa - Allocates memory and does first stage of initialization
9822  * @pdev:               PCI device struct
9823  * @dev_id:             PCI device id struct
9824  *
9825  * Return value:
9826  *      0 on success / non-zero on failure
9827  **/
9828 static int ipr_probe_ioa(struct pci_dev *pdev,
9829                          const struct pci_device_id *dev_id)
9830 {
9831         struct ipr_ioa_cfg *ioa_cfg;
9832         struct Scsi_Host *host;
9833         unsigned long ipr_regs_pci;
9834         void __iomem *ipr_regs;
9835         int rc = PCIBIOS_SUCCESSFUL;
9836         volatile u32 mask, uproc, interrupts;
9837         unsigned long lock_flags, driver_lock_flags;
9838
9839         ENTER;
9840
9841         dev_info(&pdev->dev, "Found IOA with IRQ: %d\n", pdev->irq);
9842         host = scsi_host_alloc(&driver_template, sizeof(*ioa_cfg));
9843
9844         if (!host) {
9845                 dev_err(&pdev->dev, "call to scsi_host_alloc failed!\n");
9846                 rc = -ENOMEM;
9847                 goto out;
9848         }
9849
9850         ioa_cfg = (struct ipr_ioa_cfg *)host->hostdata;
9851         memset(ioa_cfg, 0, sizeof(struct ipr_ioa_cfg));
9852         ata_host_init(&ioa_cfg->ata_host, &pdev->dev, &ipr_sata_ops);
9853
9854         ioa_cfg->ipr_chip = ipr_get_chip_info(dev_id);
9855
9856         if (!ioa_cfg->ipr_chip) {
9857                 dev_err(&pdev->dev, "Unknown adapter chipset 0x%04X 0x%04X\n",
9858                         dev_id->vendor, dev_id->device);
9859                 goto out_scsi_host_put;
9860         }
9861
9862         /* set SIS 32 or SIS 64 */
9863         ioa_cfg->sis64 = ioa_cfg->ipr_chip->sis_type == IPR_SIS64 ? 1 : 0;
9864         ioa_cfg->chip_cfg = ioa_cfg->ipr_chip->cfg;
9865         ioa_cfg->clear_isr = ioa_cfg->chip_cfg->clear_isr;
9866         ioa_cfg->max_cmds = ioa_cfg->chip_cfg->max_cmds;
9867
9868         if (ipr_transop_timeout)
9869                 ioa_cfg->transop_timeout = ipr_transop_timeout;
9870         else if (dev_id->driver_data & IPR_USE_LONG_TRANSOP_TIMEOUT)
9871                 ioa_cfg->transop_timeout = IPR_LONG_OPERATIONAL_TIMEOUT;
9872         else
9873                 ioa_cfg->transop_timeout = IPR_OPERATIONAL_TIMEOUT;
9874
9875         ioa_cfg->revid = pdev->revision;
9876
9877         ipr_init_ioa_cfg(ioa_cfg, host, pdev);
9878
9879         ipr_regs_pci = pci_resource_start(pdev, 0);
9880
9881         rc = pci_request_regions(pdev, IPR_NAME);
9882         if (rc < 0) {
9883                 dev_err(&pdev->dev,
9884                         "Couldn't register memory range of registers\n");
9885                 goto out_scsi_host_put;
9886         }
9887
9888         rc = pci_enable_device(pdev);
9889
9890         if (rc || pci_channel_offline(pdev)) {
9891                 if (pci_channel_offline(pdev)) {
9892                         ipr_wait_for_pci_err_recovery(ioa_cfg);
9893                         rc = pci_enable_device(pdev);
9894                 }
9895
9896                 if (rc) {
9897                         dev_err(&pdev->dev, "Cannot enable adapter\n");
9898                         ipr_wait_for_pci_err_recovery(ioa_cfg);
9899                         goto out_release_regions;
9900                 }
9901         }
9902
9903         ipr_regs = pci_ioremap_bar(pdev, 0);
9904
9905         if (!ipr_regs) {
9906                 dev_err(&pdev->dev,
9907                         "Couldn't map memory range of registers\n");
9908                 rc = -ENOMEM;
9909                 goto out_disable;
9910         }
9911
9912         ioa_cfg->hdw_dma_regs = ipr_regs;
9913         ioa_cfg->hdw_dma_regs_pci = ipr_regs_pci;
9914         ioa_cfg->ioa_mailbox = ioa_cfg->chip_cfg->mailbox + ipr_regs;
9915
9916         ipr_init_regs(ioa_cfg);
9917
9918         if (ioa_cfg->sis64) {
9919                 rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
9920                 if (rc < 0) {
9921                         dev_dbg(&pdev->dev, "Failed to set 64 bit DMA mask\n");
9922                         rc = dma_set_mask_and_coherent(&pdev->dev,
9923                                                        DMA_BIT_MASK(32));
9924                 }
9925         } else
9926                 rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
9927
9928         if (rc < 0) {
9929                 dev_err(&pdev->dev, "Failed to set DMA mask\n");
9930                 goto cleanup_nomem;
9931         }
9932
9933         rc = pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE,
9934                                    ioa_cfg->chip_cfg->cache_line_size);
9935
9936         if (rc != PCIBIOS_SUCCESSFUL) {
9937                 dev_err(&pdev->dev, "Write of cache line size failed\n");
9938                 ipr_wait_for_pci_err_recovery(ioa_cfg);
9939                 rc = -EIO;
9940                 goto cleanup_nomem;
9941         }
9942
9943         /* Issue MMIO read to ensure card is not in EEH */
9944         interrupts = readl(ioa_cfg->regs.sense_interrupt_reg);
9945         ipr_wait_for_pci_err_recovery(ioa_cfg);
9946
9947         if (ipr_number_of_msix > IPR_MAX_MSIX_VECTORS) {
9948                 dev_err(&pdev->dev, "The max number of MSIX is %d\n",
9949                         IPR_MAX_MSIX_VECTORS);
9950                 ipr_number_of_msix = IPR_MAX_MSIX_VECTORS;
9951         }
9952
9953         if (ioa_cfg->ipr_chip->intr_type == IPR_USE_MSI &&
9954                         ipr_enable_msix(ioa_cfg) == 0)
9955                 ioa_cfg->intr_flag = IPR_USE_MSIX;
9956         else if (ioa_cfg->ipr_chip->intr_type == IPR_USE_MSI &&
9957                         ipr_enable_msi(ioa_cfg) == 0)
9958                 ioa_cfg->intr_flag = IPR_USE_MSI;
9959         else {
9960                 ioa_cfg->intr_flag = IPR_USE_LSI;
9961                 ioa_cfg->nvectors = 1;
9962                 dev_info(&pdev->dev, "Cannot enable MSI.\n");
9963         }
9964
9965         pci_set_master(pdev);
9966
9967         if (pci_channel_offline(pdev)) {
9968                 ipr_wait_for_pci_err_recovery(ioa_cfg);
9969                 pci_set_master(pdev);
9970                 if (pci_channel_offline(pdev)) {
9971                         rc = -EIO;
9972                         goto out_msi_disable;
9973                 }
9974         }
9975
9976         if (ioa_cfg->intr_flag == IPR_USE_MSI ||
9977             ioa_cfg->intr_flag == IPR_USE_MSIX) {
9978                 rc = ipr_test_msi(ioa_cfg, pdev);
9979                 if (rc == -EOPNOTSUPP) {
9980                         ipr_wait_for_pci_err_recovery(ioa_cfg);
9981                         if (ioa_cfg->intr_flag == IPR_USE_MSI) {
9982                                 ioa_cfg->intr_flag &= ~IPR_USE_MSI;
9983                                 pci_disable_msi(pdev);
9984                          } else if (ioa_cfg->intr_flag == IPR_USE_MSIX) {
9985                                 ioa_cfg->intr_flag &= ~IPR_USE_MSIX;
9986                                 pci_disable_msix(pdev);
9987                         }
9988
9989                         ioa_cfg->intr_flag = IPR_USE_LSI;
9990                         ioa_cfg->nvectors = 1;
9991                 }
9992                 else if (rc)
9993                         goto out_msi_disable;
9994                 else {
9995                         if (ioa_cfg->intr_flag == IPR_USE_MSI)
9996                                 dev_info(&pdev->dev,
9997                                         "Request for %d MSIs succeeded with starting IRQ: %d\n",
9998                                         ioa_cfg->nvectors, pdev->irq);
9999                         else if (ioa_cfg->intr_flag == IPR_USE_MSIX)
10000                                 dev_info(&pdev->dev,
10001                                         "Request for %d MSIXs succeeded.",
10002                                         ioa_cfg->nvectors);
10003                 }
10004         }
10005
10006         ioa_cfg->hrrq_num = min3(ioa_cfg->nvectors,
10007                                 (unsigned int)num_online_cpus(),
10008                                 (unsigned int)IPR_MAX_HRRQ_NUM);
10009
10010         if ((rc = ipr_save_pcix_cmd_reg(ioa_cfg)))
10011                 goto out_msi_disable;
10012
10013         if ((rc = ipr_set_pcix_cmd_reg(ioa_cfg)))
10014                 goto out_msi_disable;
10015
10016         rc = ipr_alloc_mem(ioa_cfg);
10017         if (rc < 0) {
10018                 dev_err(&pdev->dev,
10019                         "Couldn't allocate enough memory for device driver!\n");
10020                 goto out_msi_disable;
10021         }
10022
10023         /* Save away PCI config space for use following IOA reset */
10024         rc = pci_save_state(pdev);
10025
10026         if (rc != PCIBIOS_SUCCESSFUL) {
10027                 dev_err(&pdev->dev, "Failed to save PCI config space\n");
10028                 rc = -EIO;
10029                 goto cleanup_nolog;
10030         }
10031
10032         /*
10033          * If HRRQ updated interrupt is not masked, or reset alert is set,
10034          * the card is in an unknown state and needs a hard reset
10035          */
10036         mask = readl(ioa_cfg->regs.sense_interrupt_mask_reg32);
10037         interrupts = readl(ioa_cfg->regs.sense_interrupt_reg32);
10038         uproc = readl(ioa_cfg->regs.sense_uproc_interrupt_reg32);
10039         if ((mask & IPR_PCII_HRRQ_UPDATED) == 0 || (uproc & IPR_UPROCI_RESET_ALERT))
10040                 ioa_cfg->needs_hard_reset = 1;
10041         if ((interrupts & IPR_PCII_ERROR_INTERRUPTS) || reset_devices)
10042                 ioa_cfg->needs_hard_reset = 1;
10043         if (interrupts & IPR_PCII_IOA_UNIT_CHECKED)
10044                 ioa_cfg->ioa_unit_checked = 1;
10045
10046         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
10047         ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
10048         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
10049
10050         if (ioa_cfg->intr_flag == IPR_USE_MSI
10051                         || ioa_cfg->intr_flag == IPR_USE_MSIX) {
10052                 name_msi_vectors(ioa_cfg);
10053                 rc = request_irq(ioa_cfg->vectors_info[0].vec, ipr_isr,
10054                         0,
10055                         ioa_cfg->vectors_info[0].desc,
10056                         &ioa_cfg->hrrq[0]);
10057                 if (!rc)
10058                         rc = ipr_request_other_msi_irqs(ioa_cfg);
10059         } else {
10060                 rc = request_irq(pdev->irq, ipr_isr,
10061                          IRQF_SHARED,
10062                          IPR_NAME, &ioa_cfg->hrrq[0]);
10063         }
10064         if (rc) {
10065                 dev_err(&pdev->dev, "Couldn't register IRQ %d! rc=%d\n",
10066                         pdev->irq, rc);
10067                 goto cleanup_nolog;
10068         }
10069
10070         if ((dev_id->driver_data & IPR_USE_PCI_WARM_RESET) ||
10071             (dev_id->device == PCI_DEVICE_ID_IBM_OBSIDIAN_E && !ioa_cfg->revid)) {
10072                 ioa_cfg->needs_warm_reset = 1;
10073                 ioa_cfg->reset = ipr_reset_slot_reset;
10074
10075                 ioa_cfg->reset_work_q = alloc_ordered_workqueue("ipr_reset_%d",
10076                                                                 WQ_MEM_RECLAIM, host->host_no);
10077
10078                 if (!ioa_cfg->reset_work_q) {
10079                         dev_err(&pdev->dev, "Couldn't register reset workqueue\n");
10080                         goto out_free_irq;
10081                 }
10082         } else
10083                 ioa_cfg->reset = ipr_reset_start_bist;
10084
10085         spin_lock_irqsave(&ipr_driver_lock, driver_lock_flags);
10086         list_add_tail(&ioa_cfg->queue, &ipr_ioa_head);
10087         spin_unlock_irqrestore(&ipr_driver_lock, driver_lock_flags);
10088
10089         LEAVE;
10090 out:
10091         return rc;
10092
10093 out_free_irq:
10094         ipr_free_irqs(ioa_cfg);
10095 cleanup_nolog:
10096         ipr_free_mem(ioa_cfg);
10097 out_msi_disable:
10098         ipr_wait_for_pci_err_recovery(ioa_cfg);
10099         if (ioa_cfg->intr_flag == IPR_USE_MSI)
10100                 pci_disable_msi(pdev);
10101         else if (ioa_cfg->intr_flag == IPR_USE_MSIX)
10102                 pci_disable_msix(pdev);
10103 cleanup_nomem:
10104         iounmap(ipr_regs);
10105 out_disable:
10106         pci_disable_device(pdev);
10107 out_release_regions:
10108         pci_release_regions(pdev);
10109 out_scsi_host_put:
10110         scsi_host_put(host);
10111         goto out;
10112 }
10113
10114 /**
10115  * ipr_initiate_ioa_bringdown - Bring down an adapter
10116  * @ioa_cfg:            ioa config struct
10117  * @shutdown_type:      shutdown type
10118  *
10119  * Description: This function will initiate bringing down the adapter.
10120  * This consists of issuing an IOA shutdown to the adapter
10121  * to flush the cache, and running BIST.
10122  * If the caller needs to wait on the completion of the reset,
10123  * the caller must sleep on the reset_wait_q.
10124  *
10125  * Return value:
10126  *      none
10127  **/
10128 static void ipr_initiate_ioa_bringdown(struct ipr_ioa_cfg *ioa_cfg,
10129                                        enum ipr_shutdown_type shutdown_type)
10130 {
10131         ENTER;
10132         if (ioa_cfg->sdt_state == WAIT_FOR_DUMP)
10133                 ioa_cfg->sdt_state = ABORT_DUMP;
10134         ioa_cfg->reset_retries = 0;
10135         ioa_cfg->in_ioa_bringdown = 1;
10136         ipr_initiate_ioa_reset(ioa_cfg, shutdown_type);
10137         LEAVE;
10138 }
10139
10140 /**
10141  * __ipr_remove - Remove a single adapter
10142  * @pdev:       pci device struct
10143  *
10144  * Adapter hot plug remove entry point.
10145  *
10146  * Return value:
10147  *      none
10148  **/
10149 static void __ipr_remove(struct pci_dev *pdev)
10150 {
10151         unsigned long host_lock_flags = 0;
10152         struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
10153         int i;
10154         unsigned long driver_lock_flags;
10155         ENTER;
10156
10157         spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
10158         while (ioa_cfg->in_reset_reload) {
10159                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
10160                 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
10161                 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
10162         }
10163
10164         for (i = 0; i < ioa_cfg->hrrq_num; i++) {
10165                 spin_lock(&ioa_cfg->hrrq[i]._lock);
10166                 ioa_cfg->hrrq[i].removing_ioa = 1;
10167                 spin_unlock(&ioa_cfg->hrrq[i]._lock);
10168         }
10169         wmb();
10170         ipr_initiate_ioa_bringdown(ioa_cfg, IPR_SHUTDOWN_NORMAL);
10171
10172         spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
10173         wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
10174         flush_work(&ioa_cfg->work_q);
10175         if (ioa_cfg->reset_work_q)
10176                 flush_workqueue(ioa_cfg->reset_work_q);
10177         INIT_LIST_HEAD(&ioa_cfg->used_res_q);
10178         spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
10179
10180         spin_lock_irqsave(&ipr_driver_lock, driver_lock_flags);
10181         list_del(&ioa_cfg->queue);
10182         spin_unlock_irqrestore(&ipr_driver_lock, driver_lock_flags);
10183
10184         if (ioa_cfg->sdt_state == ABORT_DUMP)
10185                 ioa_cfg->sdt_state = WAIT_FOR_DUMP;
10186         spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
10187
10188         ipr_free_all_resources(ioa_cfg);
10189
10190         LEAVE;
10191 }
10192
10193 /**
10194  * ipr_remove - IOA hot plug remove entry point
10195  * @pdev:       pci device struct
10196  *
10197  * Adapter hot plug remove entry point.
10198  *
10199  * Return value:
10200  *      none
10201  **/
10202 static void ipr_remove(struct pci_dev *pdev)
10203 {
10204         struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
10205
10206         ENTER;
10207
10208         ipr_remove_trace_file(&ioa_cfg->host->shost_dev.kobj,
10209                               &ipr_trace_attr);
10210         ipr_remove_dump_file(&ioa_cfg->host->shost_dev.kobj,
10211                              &ipr_dump_attr);
10212         scsi_remove_host(ioa_cfg->host);
10213
10214         __ipr_remove(pdev);
10215
10216         LEAVE;
10217 }
10218
10219 /**
10220  * ipr_probe - Adapter hot plug add entry point
10221  *
10222  * Return value:
10223  *      0 on success / non-zero on failure
10224  **/
10225 static int ipr_probe(struct pci_dev *pdev, const struct pci_device_id *dev_id)
10226 {
10227         struct ipr_ioa_cfg *ioa_cfg;
10228         int rc, i;
10229
10230         rc = ipr_probe_ioa(pdev, dev_id);
10231
10232         if (rc)
10233                 return rc;
10234
10235         ioa_cfg = pci_get_drvdata(pdev);
10236         rc = ipr_probe_ioa_part2(ioa_cfg);
10237
10238         if (rc) {
10239                 __ipr_remove(pdev);
10240                 return rc;
10241         }
10242
10243         rc = scsi_add_host(ioa_cfg->host, &pdev->dev);
10244
10245         if (rc) {
10246                 __ipr_remove(pdev);
10247                 return rc;
10248         }
10249
10250         rc = ipr_create_trace_file(&ioa_cfg->host->shost_dev.kobj,
10251                                    &ipr_trace_attr);
10252
10253         if (rc) {
10254                 scsi_remove_host(ioa_cfg->host);
10255                 __ipr_remove(pdev);
10256                 return rc;
10257         }
10258
10259         rc = ipr_create_dump_file(&ioa_cfg->host->shost_dev.kobj,
10260                                    &ipr_dump_attr);
10261
10262         if (rc) {
10263                 ipr_remove_trace_file(&ioa_cfg->host->shost_dev.kobj,
10264                                       &ipr_trace_attr);
10265                 scsi_remove_host(ioa_cfg->host);
10266                 __ipr_remove(pdev);
10267                 return rc;
10268         }
10269
10270         scsi_scan_host(ioa_cfg->host);
10271         ioa_cfg->iopoll_weight = ioa_cfg->chip_cfg->iopoll_weight;
10272
10273         if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
10274                 for (i = 1; i < ioa_cfg->hrrq_num; i++) {
10275                         blk_iopoll_init(&ioa_cfg->hrrq[i].iopoll,
10276                                         ioa_cfg->iopoll_weight, ipr_iopoll);
10277                         blk_iopoll_enable(&ioa_cfg->hrrq[i].iopoll);
10278                 }
10279         }
10280
10281         schedule_work(&ioa_cfg->work_q);
10282         return 0;
10283 }
10284
10285 /**
10286  * ipr_shutdown - Shutdown handler.
10287  * @pdev:       pci device struct
10288  *
10289  * This function is invoked upon system shutdown/reboot. It will issue
10290  * an adapter shutdown to the adapter to flush the write cache.
10291  *
10292  * Return value:
10293  *      none
10294  **/
10295 static void ipr_shutdown(struct pci_dev *pdev)
10296 {
10297         struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
10298         unsigned long lock_flags = 0;
10299         enum ipr_shutdown_type shutdown_type = IPR_SHUTDOWN_NORMAL;
10300         int i;
10301
10302         spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
10303         if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
10304                 ioa_cfg->iopoll_weight = 0;
10305                 for (i = 1; i < ioa_cfg->hrrq_num; i++)
10306                         blk_iopoll_disable(&ioa_cfg->hrrq[i].iopoll);
10307         }
10308
10309         while (ioa_cfg->in_reset_reload) {
10310                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
10311                 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
10312                 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
10313         }
10314
10315         if (ipr_fast_reboot && system_state == SYSTEM_RESTART && ioa_cfg->sis64)
10316                 shutdown_type = IPR_SHUTDOWN_QUIESCE;
10317
10318         ipr_initiate_ioa_bringdown(ioa_cfg, shutdown_type);
10319         spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
10320         wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
10321         if (ipr_fast_reboot && system_state == SYSTEM_RESTART && ioa_cfg->sis64) {
10322                 ipr_free_irqs(ioa_cfg);
10323                 pci_disable_device(ioa_cfg->pdev);
10324         }
10325 }
10326
10327 static struct pci_device_id ipr_pci_table[] = {
10328         { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
10329                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_5702, 0, 0, 0 },
10330         { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
10331                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_5703, 0, 0, 0 },
10332         { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
10333                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_573D, 0, 0, 0 },
10334         { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
10335                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_573E, 0, 0, 0 },
10336         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
10337                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571B, 0, 0, 0 },
10338         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
10339                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572E, 0, 0, 0 },
10340         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
10341                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571A, 0, 0, 0 },
10342         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
10343                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575B, 0, 0,
10344                 IPR_USE_LONG_TRANSOP_TIMEOUT },
10345         { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN,
10346               PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572A, 0, 0, 0 },
10347         { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN,
10348               PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572B, 0, 0,
10349               IPR_USE_LONG_TRANSOP_TIMEOUT },
10350         { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN,
10351               PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575C, 0, 0,
10352               IPR_USE_LONG_TRANSOP_TIMEOUT },
10353         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN,
10354               PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572A, 0, 0, 0 },
10355         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN,
10356               PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572B, 0, 0,
10357               IPR_USE_LONG_TRANSOP_TIMEOUT},
10358         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN,
10359               PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575C, 0, 0,
10360               IPR_USE_LONG_TRANSOP_TIMEOUT },
10361         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
10362               PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_574E, 0, 0,
10363               IPR_USE_LONG_TRANSOP_TIMEOUT },
10364         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
10365               PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B3, 0, 0, 0 },
10366         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
10367               PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57CC, 0, 0, 0 },
10368         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
10369               PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B7, 0, 0,
10370               IPR_USE_LONG_TRANSOP_TIMEOUT | IPR_USE_PCI_WARM_RESET },
10371         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_SNIPE,
10372                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_2780, 0, 0, 0 },
10373         { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP,
10374                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571E, 0, 0, 0 },
10375         { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP,
10376                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571F, 0, 0,
10377                 IPR_USE_LONG_TRANSOP_TIMEOUT },
10378         { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP,
10379                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572F, 0, 0,
10380                 IPR_USE_LONG_TRANSOP_TIMEOUT },
10381         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
10382                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B5, 0, 0, 0 },
10383         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
10384                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_574D, 0, 0, 0 },
10385         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
10386                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B2, 0, 0, 0 },
10387         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
10388                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C0, 0, 0, 0 },
10389         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
10390                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C3, 0, 0, 0 },
10391         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
10392                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C4, 0, 0, 0 },
10393         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10394                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B4, 0, 0, 0 },
10395         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10396                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B1, 0, 0, 0 },
10397         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10398                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C6, 0, 0, 0 },
10399         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10400                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C8, 0, 0, 0 },
10401         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10402                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57CE, 0, 0, 0 },
10403         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10404                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57D5, 0, 0, 0 },
10405         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10406                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57D6, 0, 0, 0 },
10407         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10408                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57D7, 0, 0, 0 },
10409         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10410                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57D8, 0, 0, 0 },
10411         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10412                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57D9, 0, 0, 0 },
10413         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10414                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57DA, 0, 0, 0 },
10415         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10416                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57EB, 0, 0, 0 },
10417         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10418                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57EC, 0, 0, 0 },
10419         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10420                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57ED, 0, 0, 0 },
10421         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10422                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57EE, 0, 0, 0 },
10423         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10424                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57EF, 0, 0, 0 },
10425         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10426                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57F0, 0, 0, 0 },
10427         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10428                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_2CCA, 0, 0, 0 },
10429         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10430                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_2CD2, 0, 0, 0 },
10431         { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10432                 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_2CCD, 0, 0, 0 },
10433         { }
10434 };
10435 MODULE_DEVICE_TABLE(pci, ipr_pci_table);
10436
10437 static const struct pci_error_handlers ipr_err_handler = {
10438         .error_detected = ipr_pci_error_detected,
10439         .mmio_enabled = ipr_pci_mmio_enabled,
10440         .slot_reset = ipr_pci_slot_reset,
10441 };
10442
10443 static struct pci_driver ipr_driver = {
10444         .name = IPR_NAME,
10445         .id_table = ipr_pci_table,
10446         .probe = ipr_probe,
10447         .remove = ipr_remove,
10448         .shutdown = ipr_shutdown,
10449         .err_handler = &ipr_err_handler,
10450 };
10451
10452 /**
10453  * ipr_halt_done - Shutdown prepare completion
10454  *
10455  * Return value:
10456  *      none
10457  **/
10458 static void ipr_halt_done(struct ipr_cmnd *ipr_cmd)
10459 {
10460         list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
10461 }
10462
10463 /**
10464  * ipr_halt - Issue shutdown prepare to all adapters
10465  *
10466  * Return value:
10467  *      NOTIFY_OK on success / NOTIFY_DONE on failure
10468  **/
10469 static int ipr_halt(struct notifier_block *nb, ulong event, void *buf)
10470 {
10471         struct ipr_cmnd *ipr_cmd;
10472         struct ipr_ioa_cfg *ioa_cfg;
10473         unsigned long flags = 0, driver_lock_flags;
10474
10475         if (event != SYS_RESTART && event != SYS_HALT && event != SYS_POWER_OFF)
10476                 return NOTIFY_DONE;
10477
10478         spin_lock_irqsave(&ipr_driver_lock, driver_lock_flags);
10479
10480         list_for_each_entry(ioa_cfg, &ipr_ioa_head, queue) {
10481                 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
10482                 if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds ||
10483                     (ipr_fast_reboot && event == SYS_RESTART && ioa_cfg->sis64)) {
10484                         spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
10485                         continue;
10486                 }
10487
10488                 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
10489                 ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
10490                 ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
10491                 ipr_cmd->ioarcb.cmd_pkt.cdb[0] = IPR_IOA_SHUTDOWN;
10492                 ipr_cmd->ioarcb.cmd_pkt.cdb[1] = IPR_SHUTDOWN_PREPARE_FOR_NORMAL;
10493
10494                 ipr_do_req(ipr_cmd, ipr_halt_done, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT);
10495                 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
10496         }
10497         spin_unlock_irqrestore(&ipr_driver_lock, driver_lock_flags);
10498
10499         return NOTIFY_OK;
10500 }
10501
10502 static struct notifier_block ipr_notifier = {
10503         ipr_halt, NULL, 0
10504 };
10505
10506 /**
10507  * ipr_init - Module entry point
10508  *
10509  * Return value:
10510  *      0 on success / negative value on failure
10511  **/
10512 static int __init ipr_init(void)
10513 {
10514         ipr_info("IBM Power RAID SCSI Device Driver version: %s %s\n",
10515                  IPR_DRIVER_VERSION, IPR_DRIVER_DATE);
10516
10517         register_reboot_notifier(&ipr_notifier);
10518         return pci_register_driver(&ipr_driver);
10519 }
10520
10521 /**
10522  * ipr_exit - Module unload
10523  *
10524  * Module unload entry point.
10525  *
10526  * Return value:
10527  *      none
10528  **/
10529 static void __exit ipr_exit(void)
10530 {
10531         unregister_reboot_notifier(&ipr_notifier);
10532         pci_unregister_driver(&ipr_driver);
10533 }
10534
10535 module_init(ipr_init);
10536 module_exit(ipr_exit);