These changes are the raw update to linux-4.4.6-rt14. Kernel sources
[kvmfornfv.git] / kernel / drivers / scsi / qla2xxx / qla_mid.c
1 /*
2  * QLogic Fibre Channel HBA Driver
3  * Copyright (c)  2003-2014 QLogic Corporation
4  *
5  * See LICENSE.qla2xxx for copyright and licensing details.
6  */
7 #include "qla_def.h"
8 #include "qla_gbl.h"
9 #include "qla_target.h"
10
11 #include <linux/moduleparam.h>
12 #include <linux/vmalloc.h>
13 #include <linux/slab.h>
14 #include <linux/list.h>
15
16 #include <scsi/scsi_tcq.h>
17 #include <scsi/scsicam.h>
18 #include <linux/delay.h>
19
20 void
21 qla2x00_vp_stop_timer(scsi_qla_host_t *vha)
22 {
23         if (vha->vp_idx && vha->timer_active) {
24                 del_timer_sync(&vha->timer);
25                 vha->timer_active = 0;
26         }
27 }
28
29 static uint32_t
30 qla24xx_allocate_vp_id(scsi_qla_host_t *vha)
31 {
32         uint32_t vp_id;
33         struct qla_hw_data *ha = vha->hw;
34         unsigned long flags;
35
36         /* Find an empty slot and assign an vp_id */
37         mutex_lock(&ha->vport_lock);
38         vp_id = find_first_zero_bit(ha->vp_idx_map, ha->max_npiv_vports + 1);
39         if (vp_id > ha->max_npiv_vports) {
40                 ql_dbg(ql_dbg_vport, vha, 0xa000,
41                     "vp_id %d is bigger than max-supported %d.\n",
42                     vp_id, ha->max_npiv_vports);
43                 mutex_unlock(&ha->vport_lock);
44                 return vp_id;
45         }
46
47         set_bit(vp_id, ha->vp_idx_map);
48         ha->num_vhosts++;
49         vha->vp_idx = vp_id;
50
51         spin_lock_irqsave(&ha->vport_slock, flags);
52         list_add_tail(&vha->list, &ha->vp_list);
53
54         qlt_update_vp_map(vha, SET_VP_IDX);
55
56         spin_unlock_irqrestore(&ha->vport_slock, flags);
57
58         mutex_unlock(&ha->vport_lock);
59         return vp_id;
60 }
61
62 void
63 qla24xx_deallocate_vp_id(scsi_qla_host_t *vha)
64 {
65         uint16_t vp_id;
66         struct qla_hw_data *ha = vha->hw;
67         unsigned long flags = 0;
68
69         mutex_lock(&ha->vport_lock);
70         /*
71          * Wait for all pending activities to finish before removing vport from
72          * the list.
73          * Lock needs to be held for safe removal from the list (it
74          * ensures no active vp_list traversal while the vport is removed
75          * from the queue)
76          */
77         spin_lock_irqsave(&ha->vport_slock, flags);
78         while (atomic_read(&vha->vref_count)) {
79                 spin_unlock_irqrestore(&ha->vport_slock, flags);
80
81                 msleep(500);
82
83                 spin_lock_irqsave(&ha->vport_slock, flags);
84         }
85         list_del(&vha->list);
86         qlt_update_vp_map(vha, RESET_VP_IDX);
87         spin_unlock_irqrestore(&ha->vport_slock, flags);
88
89         vp_id = vha->vp_idx;
90         ha->num_vhosts--;
91         clear_bit(vp_id, ha->vp_idx_map);
92
93         mutex_unlock(&ha->vport_lock);
94 }
95
96 static scsi_qla_host_t *
97 qla24xx_find_vhost_by_name(struct qla_hw_data *ha, uint8_t *port_name)
98 {
99         scsi_qla_host_t *vha;
100         struct scsi_qla_host *tvha;
101         unsigned long flags;
102
103         spin_lock_irqsave(&ha->vport_slock, flags);
104         /* Locate matching device in database. */
105         list_for_each_entry_safe(vha, tvha, &ha->vp_list, list) {
106                 if (!memcmp(port_name, vha->port_name, WWN_SIZE)) {
107                         spin_unlock_irqrestore(&ha->vport_slock, flags);
108                         return vha;
109                 }
110         }
111         spin_unlock_irqrestore(&ha->vport_slock, flags);
112         return NULL;
113 }
114
115 /*
116  * qla2x00_mark_vp_devices_dead
117  *      Updates fcport state when device goes offline.
118  *
119  * Input:
120  *      ha = adapter block pointer.
121  *      fcport = port structure pointer.
122  *
123  * Return:
124  *      None.
125  *
126  * Context:
127  */
128 static void
129 qla2x00_mark_vp_devices_dead(scsi_qla_host_t *vha)
130 {
131         /*
132          * !!! NOTE !!!
133          * This function, if called in contexts other than vp create, disable
134          * or delete, please make sure this is synchronized with the
135          * delete thread.
136          */
137         fc_port_t *fcport;
138
139         list_for_each_entry(fcport, &vha->vp_fcports, list) {
140                 ql_dbg(ql_dbg_vport, vha, 0xa001,
141                     "Marking port dead, loop_id=0x%04x : %x.\n",
142                     fcport->loop_id, fcport->vha->vp_idx);
143
144                 qla2x00_mark_device_lost(vha, fcport, 0, 0);
145                 qla2x00_set_fcport_state(fcport, FCS_UNCONFIGURED);
146         }
147 }
148
149 int
150 qla24xx_disable_vp(scsi_qla_host_t *vha)
151 {
152         unsigned long flags;
153         int ret;
154
155         ret = qla24xx_control_vp(vha, VCE_COMMAND_DISABLE_VPS_LOGO_ALL);
156         atomic_set(&vha->loop_state, LOOP_DOWN);
157         atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
158
159         /* Remove port id from vp target map */
160         spin_lock_irqsave(&vha->hw->vport_slock, flags);
161         qlt_update_vp_map(vha, RESET_AL_PA);
162         spin_unlock_irqrestore(&vha->hw->vport_slock, flags);
163
164         qla2x00_mark_vp_devices_dead(vha);
165         atomic_set(&vha->vp_state, VP_FAILED);
166         vha->flags.management_server_logged_in = 0;
167         if (ret == QLA_SUCCESS) {
168                 fc_vport_set_state(vha->fc_vport, FC_VPORT_DISABLED);
169         } else {
170                 fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED);
171                 return -1;
172         }
173         return 0;
174 }
175
176 int
177 qla24xx_enable_vp(scsi_qla_host_t *vha)
178 {
179         int ret;
180         struct qla_hw_data *ha = vha->hw;
181         scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
182
183         /* Check if physical ha port is Up */
184         if (atomic_read(&base_vha->loop_state) == LOOP_DOWN  ||
185                 atomic_read(&base_vha->loop_state) == LOOP_DEAD ||
186                 !(ha->current_topology & ISP_CFG_F)) {
187                 vha->vp_err_state =  VP_ERR_PORTDWN;
188                 fc_vport_set_state(vha->fc_vport, FC_VPORT_LINKDOWN);
189                 goto enable_failed;
190         }
191
192         /* Initialize the new vport unless it is a persistent port */
193         mutex_lock(&ha->vport_lock);
194         ret = qla24xx_modify_vp_config(vha);
195         mutex_unlock(&ha->vport_lock);
196
197         if (ret != QLA_SUCCESS) {
198                 fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED);
199                 goto enable_failed;
200         }
201
202         ql_dbg(ql_dbg_taskm, vha, 0x801a,
203             "Virtual port with id: %d - Enabled.\n", vha->vp_idx);
204         return 0;
205
206 enable_failed:
207         ql_dbg(ql_dbg_taskm, vha, 0x801b,
208             "Virtual port with id: %d - Disabled.\n", vha->vp_idx);
209         return 1;
210 }
211
212 static void
213 qla24xx_configure_vp(scsi_qla_host_t *vha)
214 {
215         struct fc_vport *fc_vport;
216         int ret;
217
218         fc_vport = vha->fc_vport;
219
220         ql_dbg(ql_dbg_vport, vha, 0xa002,
221             "%s: change request #3.\n", __func__);
222         ret = qla2x00_send_change_request(vha, 0x3, vha->vp_idx);
223         if (ret != QLA_SUCCESS) {
224                 ql_dbg(ql_dbg_vport, vha, 0xa003, "Failed to enable "
225                     "receiving of RSCN requests: 0x%x.\n", ret);
226                 return;
227         } else {
228                 /* Corresponds to SCR enabled */
229                 clear_bit(VP_SCR_NEEDED, &vha->vp_flags);
230         }
231
232         vha->flags.online = 1;
233         if (qla24xx_configure_vhba(vha))
234                 return;
235
236         atomic_set(&vha->vp_state, VP_ACTIVE);
237         fc_vport_set_state(fc_vport, FC_VPORT_ACTIVE);
238 }
239
240 void
241 qla2x00_alert_all_vps(struct rsp_que *rsp, uint16_t *mb)
242 {
243         scsi_qla_host_t *vha;
244         struct qla_hw_data *ha = rsp->hw;
245         int i = 0;
246         unsigned long flags;
247
248         spin_lock_irqsave(&ha->vport_slock, flags);
249         list_for_each_entry(vha, &ha->vp_list, list) {
250                 if (vha->vp_idx) {
251                         atomic_inc(&vha->vref_count);
252                         spin_unlock_irqrestore(&ha->vport_slock, flags);
253
254                         switch (mb[0]) {
255                         case MBA_LIP_OCCURRED:
256                         case MBA_LOOP_UP:
257                         case MBA_LOOP_DOWN:
258                         case MBA_LIP_RESET:
259                         case MBA_POINT_TO_POINT:
260                         case MBA_CHG_IN_CONNECTION:
261                         case MBA_PORT_UPDATE:
262                         case MBA_RSCN_UPDATE:
263                                 ql_dbg(ql_dbg_async, vha, 0x5024,
264                                     "Async_event for VP[%d], mb=0x%x vha=%p.\n",
265                                     i, *mb, vha);
266                                 qla2x00_async_event(vha, rsp, mb);
267                                 break;
268                         }
269
270                         spin_lock_irqsave(&ha->vport_slock, flags);
271                         atomic_dec(&vha->vref_count);
272                 }
273                 i++;
274         }
275         spin_unlock_irqrestore(&ha->vport_slock, flags);
276 }
277
278 int
279 qla2x00_vp_abort_isp(scsi_qla_host_t *vha)
280 {
281         /*
282          * Physical port will do most of the abort and recovery work. We can
283          * just treat it as a loop down
284          */
285         if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
286                 atomic_set(&vha->loop_state, LOOP_DOWN);
287                 qla2x00_mark_all_devices_lost(vha, 0);
288         } else {
289                 if (!atomic_read(&vha->loop_down_timer))
290                         atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
291         }
292
293         /*
294          * To exclusively reset vport, we need to log it out first.  Note: this
295          * control_vp can fail if ISP reset is already issued, this is
296          * expected, as the vp would be already logged out due to ISP reset.
297          */
298         if (!test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags))
299                 qla24xx_control_vp(vha, VCE_COMMAND_DISABLE_VPS_LOGO_ALL);
300
301         ql_dbg(ql_dbg_taskm, vha, 0x801d,
302             "Scheduling enable of Vport %d.\n", vha->vp_idx);
303         return qla24xx_enable_vp(vha);
304 }
305
306 static int
307 qla2x00_do_dpc_vp(scsi_qla_host_t *vha)
308 {
309         struct qla_hw_data *ha = vha->hw;
310         scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
311
312         ql_dbg(ql_dbg_dpc + ql_dbg_verbose, vha, 0x4012,
313             "Entering %s vp_flags: 0x%lx.\n", __func__, vha->vp_flags);
314
315         qla2x00_do_work(vha);
316
317         /* Check if Fw is ready to configure VP first */
318         if (test_bit(VP_CONFIG_OK, &base_vha->vp_flags)) {
319                 if (test_and_clear_bit(VP_IDX_ACQUIRED, &vha->vp_flags)) {
320                         /* VP acquired. complete port configuration */
321                         ql_dbg(ql_dbg_dpc, vha, 0x4014,
322                             "Configure VP scheduled.\n");
323                         qla24xx_configure_vp(vha);
324                         ql_dbg(ql_dbg_dpc, vha, 0x4015,
325                             "Configure VP end.\n");
326                         return 0;
327                 }
328         }
329
330         if (test_bit(FCPORT_UPDATE_NEEDED, &vha->dpc_flags)) {
331                 ql_dbg(ql_dbg_dpc, vha, 0x4016,
332                     "FCPort update scheduled.\n");
333                 qla2x00_update_fcports(vha);
334                 clear_bit(FCPORT_UPDATE_NEEDED, &vha->dpc_flags);
335                 ql_dbg(ql_dbg_dpc, vha, 0x4017,
336                     "FCPort update end.\n");
337         }
338
339         if ((test_and_clear_bit(RELOGIN_NEEDED, &vha->dpc_flags)) &&
340                 !test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags) &&
341                 atomic_read(&vha->loop_state) != LOOP_DOWN) {
342
343                 ql_dbg(ql_dbg_dpc, vha, 0x4018,
344                     "Relogin needed scheduled.\n");
345                 qla2x00_relogin(vha);
346                 ql_dbg(ql_dbg_dpc, vha, 0x4019,
347                     "Relogin needed end.\n");
348         }
349
350         if (test_and_clear_bit(RESET_MARKER_NEEDED, &vha->dpc_flags) &&
351             (!(test_and_set_bit(RESET_ACTIVE, &vha->dpc_flags)))) {
352                 clear_bit(RESET_ACTIVE, &vha->dpc_flags);
353         }
354
355         if (test_and_clear_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)) {
356                 if (!(test_and_set_bit(LOOP_RESYNC_ACTIVE, &vha->dpc_flags))) {
357                         ql_dbg(ql_dbg_dpc, vha, 0x401a,
358                             "Loop resync scheduled.\n");
359                         qla2x00_loop_resync(vha);
360                         clear_bit(LOOP_RESYNC_ACTIVE, &vha->dpc_flags);
361                         ql_dbg(ql_dbg_dpc, vha, 0x401b,
362                             "Loop resync end.\n");
363                 }
364         }
365
366         ql_dbg(ql_dbg_dpc + ql_dbg_verbose, vha, 0x401c,
367             "Exiting %s.\n", __func__);
368         return 0;
369 }
370
371 void
372 qla2x00_do_dpc_all_vps(scsi_qla_host_t *vha)
373 {
374         struct qla_hw_data *ha = vha->hw;
375         scsi_qla_host_t *vp;
376         unsigned long flags = 0;
377
378         if (vha->vp_idx)
379                 return;
380         if (list_empty(&ha->vp_list))
381                 return;
382
383         clear_bit(VP_DPC_NEEDED, &vha->dpc_flags);
384
385         if (!(ha->current_topology & ISP_CFG_F))
386                 return;
387
388         spin_lock_irqsave(&ha->vport_slock, flags);
389         list_for_each_entry(vp, &ha->vp_list, list) {
390                 if (vp->vp_idx) {
391                         atomic_inc(&vp->vref_count);
392                         spin_unlock_irqrestore(&ha->vport_slock, flags);
393
394                         qla2x00_do_dpc_vp(vp);
395
396                         spin_lock_irqsave(&ha->vport_slock, flags);
397                         atomic_dec(&vp->vref_count);
398                 }
399         }
400         spin_unlock_irqrestore(&ha->vport_slock, flags);
401 }
402
403 int
404 qla24xx_vport_create_req_sanity_check(struct fc_vport *fc_vport)
405 {
406         scsi_qla_host_t *base_vha = shost_priv(fc_vport->shost);
407         struct qla_hw_data *ha = base_vha->hw;
408         scsi_qla_host_t *vha;
409         uint8_t port_name[WWN_SIZE];
410
411         if (fc_vport->roles != FC_PORT_ROLE_FCP_INITIATOR)
412                 return VPCERR_UNSUPPORTED;
413
414         /* Check up the F/W and H/W support NPIV */
415         if (!ha->flags.npiv_supported)
416                 return VPCERR_UNSUPPORTED;
417
418         /* Check up whether npiv supported switch presented */
419         if (!(ha->switch_cap & FLOGI_MID_SUPPORT))
420                 return VPCERR_NO_FABRIC_SUPP;
421
422         /* Check up unique WWPN */
423         u64_to_wwn(fc_vport->port_name, port_name);
424         if (!memcmp(port_name, base_vha->port_name, WWN_SIZE))
425                 return VPCERR_BAD_WWN;
426         vha = qla24xx_find_vhost_by_name(ha, port_name);
427         if (vha)
428                 return VPCERR_BAD_WWN;
429
430         /* Check up max-npiv-supports */
431         if (ha->num_vhosts > ha->max_npiv_vports) {
432                 ql_dbg(ql_dbg_vport, vha, 0xa004,
433                     "num_vhosts %ud is bigger "
434                     "than max_npiv_vports %ud.\n",
435                     ha->num_vhosts, ha->max_npiv_vports);
436                 return VPCERR_UNSUPPORTED;
437         }
438         return 0;
439 }
440
441 scsi_qla_host_t *
442 qla24xx_create_vhost(struct fc_vport *fc_vport)
443 {
444         scsi_qla_host_t *base_vha = shost_priv(fc_vport->shost);
445         struct qla_hw_data *ha = base_vha->hw;
446         scsi_qla_host_t *vha;
447         struct scsi_host_template *sht = &qla2xxx_driver_template;
448         struct Scsi_Host *host;
449
450         vha = qla2x00_create_host(sht, ha);
451         if (!vha) {
452                 ql_log(ql_log_warn, vha, 0xa005,
453                     "scsi_host_alloc() failed for vport.\n");
454                 return(NULL);
455         }
456
457         host = vha->host;
458         fc_vport->dd_data = vha;
459         /* New host info */
460         u64_to_wwn(fc_vport->node_name, vha->node_name);
461         u64_to_wwn(fc_vport->port_name, vha->port_name);
462
463         vha->fc_vport = fc_vport;
464         vha->device_flags = 0;
465         vha->vp_idx = qla24xx_allocate_vp_id(vha);
466         if (vha->vp_idx > ha->max_npiv_vports) {
467                 ql_dbg(ql_dbg_vport, vha, 0xa006,
468                     "Couldn't allocate vp_id.\n");
469                 goto create_vhost_failed;
470         }
471         vha->mgmt_svr_loop_id = 10 + vha->vp_idx;
472
473         vha->dpc_flags = 0L;
474
475         /*
476          * To fix the issue of processing a parent's RSCN for the vport before
477          * its SCR is complete.
478          */
479         set_bit(VP_SCR_NEEDED, &vha->vp_flags);
480         atomic_set(&vha->loop_state, LOOP_DOWN);
481         atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
482
483         qla2x00_start_timer(vha, qla2x00_timer, WATCH_INTERVAL);
484
485         vha->req = base_vha->req;
486         host->can_queue = base_vha->req->length + 128;
487         host->cmd_per_lun = 3;
488         if (IS_T10_PI_CAPABLE(ha) && ql2xenabledif)
489                 host->max_cmd_len = 32;
490         else
491                 host->max_cmd_len = MAX_CMDSZ;
492         host->max_channel = MAX_BUSES - 1;
493         host->max_lun = ql2xmaxlun;
494         host->unique_id = host->host_no;
495         host->max_id = ha->max_fibre_devices;
496         host->transportt = qla2xxx_transport_vport_template;
497
498         ql_dbg(ql_dbg_vport, vha, 0xa007,
499             "Detect vport hba %ld at address = %p.\n",
500             vha->host_no, vha);
501
502         vha->flags.init_done = 1;
503
504         mutex_lock(&ha->vport_lock);
505         set_bit(vha->vp_idx, ha->vp_idx_map);
506         ha->cur_vport_count++;
507         mutex_unlock(&ha->vport_lock);
508
509         return vha;
510
511 create_vhost_failed:
512         return NULL;
513 }
514
515 static void
516 qla25xx_free_req_que(struct scsi_qla_host *vha, struct req_que *req)
517 {
518         struct qla_hw_data *ha = vha->hw;
519         uint16_t que_id = req->id;
520
521         dma_free_coherent(&ha->pdev->dev, (req->length + 1) *
522                 sizeof(request_t), req->ring, req->dma);
523         req->ring = NULL;
524         req->dma = 0;
525         if (que_id) {
526                 ha->req_q_map[que_id] = NULL;
527                 mutex_lock(&ha->vport_lock);
528                 clear_bit(que_id, ha->req_qid_map);
529                 mutex_unlock(&ha->vport_lock);
530         }
531         kfree(req->outstanding_cmds);
532         kfree(req);
533         req = NULL;
534 }
535
536 static void
537 qla25xx_free_rsp_que(struct scsi_qla_host *vha, struct rsp_que *rsp)
538 {
539         struct qla_hw_data *ha = vha->hw;
540         uint16_t que_id = rsp->id;
541
542         if (rsp->msix && rsp->msix->have_irq) {
543                 free_irq(rsp->msix->vector, rsp);
544                 rsp->msix->have_irq = 0;
545                 rsp->msix->rsp = NULL;
546         }
547         dma_free_coherent(&ha->pdev->dev, (rsp->length + 1) *
548                 sizeof(response_t), rsp->ring, rsp->dma);
549         rsp->ring = NULL;
550         rsp->dma = 0;
551         if (que_id) {
552                 ha->rsp_q_map[que_id] = NULL;
553                 mutex_lock(&ha->vport_lock);
554                 clear_bit(que_id, ha->rsp_qid_map);
555                 mutex_unlock(&ha->vport_lock);
556         }
557         kfree(rsp);
558         rsp = NULL;
559 }
560
561 int
562 qla25xx_delete_req_que(struct scsi_qla_host *vha, struct req_que *req)
563 {
564         int ret = -1;
565
566         if (req) {
567                 req->options |= BIT_0;
568                 ret = qla25xx_init_req_que(vha, req);
569         }
570         if (ret == QLA_SUCCESS)
571                 qla25xx_free_req_que(vha, req);
572
573         return ret;
574 }
575
576 static int
577 qla25xx_delete_rsp_que(struct scsi_qla_host *vha, struct rsp_que *rsp)
578 {
579         int ret = -1;
580
581         if (rsp) {
582                 rsp->options |= BIT_0;
583                 ret = qla25xx_init_rsp_que(vha, rsp);
584         }
585         if (ret == QLA_SUCCESS)
586                 qla25xx_free_rsp_que(vha, rsp);
587
588         return ret;
589 }
590
591 /* Delete all queues for a given vhost */
592 int
593 qla25xx_delete_queues(struct scsi_qla_host *vha)
594 {
595         int cnt, ret = 0;
596         struct req_que *req = NULL;
597         struct rsp_que *rsp = NULL;
598         struct qla_hw_data *ha = vha->hw;
599
600         /* Delete request queues */
601         for (cnt = 1; cnt < ha->max_req_queues; cnt++) {
602                 req = ha->req_q_map[cnt];
603                 if (req && test_bit(cnt, ha->req_qid_map)) {
604                         ret = qla25xx_delete_req_que(vha, req);
605                         if (ret != QLA_SUCCESS) {
606                                 ql_log(ql_log_warn, vha, 0x00ea,
607                                     "Couldn't delete req que %d.\n",
608                                     req->id);
609                                 return ret;
610                         }
611                 }
612         }
613
614         /* Delete response queues */
615         for (cnt = 1; cnt < ha->max_rsp_queues; cnt++) {
616                 rsp = ha->rsp_q_map[cnt];
617                 if (rsp && test_bit(cnt, ha->rsp_qid_map)) {
618                         ret = qla25xx_delete_rsp_que(vha, rsp);
619                         if (ret != QLA_SUCCESS) {
620                                 ql_log(ql_log_warn, vha, 0x00eb,
621                                     "Couldn't delete rsp que %d.\n",
622                                     rsp->id);
623                                 return ret;
624                         }
625                 }
626         }
627         return ret;
628 }
629
630 int
631 qla25xx_create_req_que(struct qla_hw_data *ha, uint16_t options,
632         uint8_t vp_idx, uint16_t rid, int rsp_que, uint8_t qos)
633 {
634         int ret = 0;
635         struct req_que *req = NULL;
636         struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
637         uint16_t que_id = 0;
638         device_reg_t *reg;
639         uint32_t cnt;
640
641         req = kzalloc(sizeof(struct req_que), GFP_KERNEL);
642         if (req == NULL) {
643                 ql_log(ql_log_fatal, base_vha, 0x00d9,
644                     "Failed to allocate memory for request queue.\n");
645                 goto failed;
646         }
647
648         req->length = REQUEST_ENTRY_CNT_24XX;
649         req->ring = dma_alloc_coherent(&ha->pdev->dev,
650                         (req->length + 1) * sizeof(request_t),
651                         &req->dma, GFP_KERNEL);
652         if (req->ring == NULL) {
653                 ql_log(ql_log_fatal, base_vha, 0x00da,
654                     "Failed to allocate memory for request_ring.\n");
655                 goto que_failed;
656         }
657
658         ret = qla2x00_alloc_outstanding_cmds(ha, req);
659         if (ret != QLA_SUCCESS)
660                 goto que_failed;
661
662         mutex_lock(&ha->vport_lock);
663         que_id = find_first_zero_bit(ha->req_qid_map, ha->max_req_queues);
664         if (que_id >= ha->max_req_queues) {
665                 mutex_unlock(&ha->vport_lock);
666                 ql_log(ql_log_warn, base_vha, 0x00db,
667                     "No resources to create additional request queue.\n");
668                 goto que_failed;
669         }
670         set_bit(que_id, ha->req_qid_map);
671         ha->req_q_map[que_id] = req;
672         req->rid = rid;
673         req->vp_idx = vp_idx;
674         req->qos = qos;
675
676         ql_dbg(ql_dbg_multiq, base_vha, 0xc002,
677             "queue_id=%d rid=%d vp_idx=%d qos=%d.\n",
678             que_id, req->rid, req->vp_idx, req->qos);
679         ql_dbg(ql_dbg_init, base_vha, 0x00dc,
680             "queue_id=%d rid=%d vp_idx=%d qos=%d.\n",
681             que_id, req->rid, req->vp_idx, req->qos);
682         if (rsp_que < 0)
683                 req->rsp = NULL;
684         else
685                 req->rsp = ha->rsp_q_map[rsp_que];
686         /* Use alternate PCI bus number */
687         if (MSB(req->rid))
688                 options |= BIT_4;
689         /* Use alternate PCI devfn */
690         if (LSB(req->rid))
691                 options |= BIT_5;
692         req->options = options;
693
694         ql_dbg(ql_dbg_multiq, base_vha, 0xc003,
695             "options=0x%x.\n", req->options);
696         ql_dbg(ql_dbg_init, base_vha, 0x00dd,
697             "options=0x%x.\n", req->options);
698         for (cnt = 1; cnt < req->num_outstanding_cmds; cnt++)
699                 req->outstanding_cmds[cnt] = NULL;
700         req->current_outstanding_cmd = 1;
701
702         req->ring_ptr = req->ring;
703         req->ring_index = 0;
704         req->cnt = req->length;
705         req->id = que_id;
706         reg = ISP_QUE_REG(ha, que_id);
707         req->req_q_in = &reg->isp25mq.req_q_in;
708         req->req_q_out = &reg->isp25mq.req_q_out;
709         req->max_q_depth = ha->req_q_map[0]->max_q_depth;
710         req->out_ptr = (void *)(req->ring + req->length);
711         mutex_unlock(&ha->vport_lock);
712         ql_dbg(ql_dbg_multiq, base_vha, 0xc004,
713             "ring_ptr=%p ring_index=%d, "
714             "cnt=%d id=%d max_q_depth=%d.\n",
715             req->ring_ptr, req->ring_index,
716             req->cnt, req->id, req->max_q_depth);
717         ql_dbg(ql_dbg_init, base_vha, 0x00de,
718             "ring_ptr=%p ring_index=%d, "
719             "cnt=%d id=%d max_q_depth=%d.\n",
720             req->ring_ptr, req->ring_index, req->cnt,
721             req->id, req->max_q_depth);
722
723         ret = qla25xx_init_req_que(base_vha, req);
724         if (ret != QLA_SUCCESS) {
725                 ql_log(ql_log_fatal, base_vha, 0x00df,
726                     "%s failed.\n", __func__);
727                 mutex_lock(&ha->vport_lock);
728                 clear_bit(que_id, ha->req_qid_map);
729                 mutex_unlock(&ha->vport_lock);
730                 goto que_failed;
731         }
732
733         return req->id;
734
735 que_failed:
736         qla25xx_free_req_que(base_vha, req);
737 failed:
738         return 0;
739 }
740
741 static void qla_do_work(struct work_struct *work)
742 {
743         unsigned long flags;
744         struct rsp_que *rsp = container_of(work, struct rsp_que, q_work);
745         struct scsi_qla_host *vha;
746         struct qla_hw_data *ha = rsp->hw;
747
748         spin_lock_irqsave(&rsp->hw->hardware_lock, flags);
749         vha = pci_get_drvdata(ha->pdev);
750         qla24xx_process_response_queue(vha, rsp);
751         spin_unlock_irqrestore(&rsp->hw->hardware_lock, flags);
752 }
753
754 /* create response queue */
755 int
756 qla25xx_create_rsp_que(struct qla_hw_data *ha, uint16_t options,
757         uint8_t vp_idx, uint16_t rid, int req)
758 {
759         int ret = 0;
760         struct rsp_que *rsp = NULL;
761         struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
762         uint16_t que_id = 0;
763         device_reg_t *reg;
764
765         rsp = kzalloc(sizeof(struct rsp_que), GFP_KERNEL);
766         if (rsp == NULL) {
767                 ql_log(ql_log_warn, base_vha, 0x0066,
768                     "Failed to allocate memory for response queue.\n");
769                 goto failed;
770         }
771
772         rsp->length = RESPONSE_ENTRY_CNT_MQ;
773         rsp->ring = dma_alloc_coherent(&ha->pdev->dev,
774                         (rsp->length + 1) * sizeof(response_t),
775                         &rsp->dma, GFP_KERNEL);
776         if (rsp->ring == NULL) {
777                 ql_log(ql_log_warn, base_vha, 0x00e1,
778                     "Failed to allocate memory for response ring.\n");
779                 goto que_failed;
780         }
781
782         mutex_lock(&ha->vport_lock);
783         que_id = find_first_zero_bit(ha->rsp_qid_map, ha->max_rsp_queues);
784         if (que_id >= ha->max_rsp_queues) {
785                 mutex_unlock(&ha->vport_lock);
786                 ql_log(ql_log_warn, base_vha, 0x00e2,
787                     "No resources to create additional request queue.\n");
788                 goto que_failed;
789         }
790         set_bit(que_id, ha->rsp_qid_map);
791
792         if (ha->flags.msix_enabled)
793                 rsp->msix = &ha->msix_entries[que_id + 1];
794         else
795                 ql_log(ql_log_warn, base_vha, 0x00e3,
796                     "MSIX not enabled.\n");
797
798         ha->rsp_q_map[que_id] = rsp;
799         rsp->rid = rid;
800         rsp->vp_idx = vp_idx;
801         rsp->hw = ha;
802         ql_dbg(ql_dbg_init, base_vha, 0x00e4,
803             "queue_id=%d rid=%d vp_idx=%d hw=%p.\n",
804             que_id, rsp->rid, rsp->vp_idx, rsp->hw);
805         /* Use alternate PCI bus number */
806         if (MSB(rsp->rid))
807                 options |= BIT_4;
808         /* Use alternate PCI devfn */
809         if (LSB(rsp->rid))
810                 options |= BIT_5;
811         /* Enable MSIX handshake mode on for uncapable adapters */
812         if (!IS_MSIX_NACK_CAPABLE(ha))
813                 options |= BIT_6;
814
815         rsp->options = options;
816         rsp->id = que_id;
817         reg = ISP_QUE_REG(ha, que_id);
818         rsp->rsp_q_in = &reg->isp25mq.rsp_q_in;
819         rsp->rsp_q_out = &reg->isp25mq.rsp_q_out;
820         rsp->in_ptr = (void *)(rsp->ring + rsp->length);
821         mutex_unlock(&ha->vport_lock);
822         ql_dbg(ql_dbg_multiq, base_vha, 0xc00b,
823             "options=%x id=%d rsp_q_in=%p rsp_q_out=%p",
824             rsp->options, rsp->id, rsp->rsp_q_in,
825             rsp->rsp_q_out);
826         ql_dbg(ql_dbg_init, base_vha, 0x00e5,
827             "options=%x id=%d rsp_q_in=%p rsp_q_out=%p",
828             rsp->options, rsp->id, rsp->rsp_q_in,
829             rsp->rsp_q_out);
830
831         ret = qla25xx_request_irq(rsp);
832         if (ret)
833                 goto que_failed;
834
835         ret = qla25xx_init_rsp_que(base_vha, rsp);
836         if (ret != QLA_SUCCESS) {
837                 ql_log(ql_log_fatal, base_vha, 0x00e7,
838                     "%s failed.\n", __func__);
839                 mutex_lock(&ha->vport_lock);
840                 clear_bit(que_id, ha->rsp_qid_map);
841                 mutex_unlock(&ha->vport_lock);
842                 goto que_failed;
843         }
844         if (req >= 0)
845                 rsp->req = ha->req_q_map[req];
846         else
847                 rsp->req = NULL;
848
849         qla2x00_init_response_q_entries(rsp);
850         if (rsp->hw->wq)
851                 INIT_WORK(&rsp->q_work, qla_do_work);
852         return rsp->id;
853
854 que_failed:
855         qla25xx_free_rsp_que(base_vha, rsp);
856 failed:
857         return 0;
858 }