Add the rt linux 4.1.3-rt3 as base
[kvmfornfv.git] / kernel / drivers / scsi / bnx2fc / bnx2fc_hwi.c
1 /* bnx2fc_hwi.c: QLogic NetXtreme II Linux FCoE offload driver.
2  * This file contains the code that low level functions that interact
3  * with 57712 FCoE firmware.
4  *
5  * Copyright (c) 2008 - 2013 Broadcom Corporation
6  * Copyright (c) 2014, QLogic Corporation
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License as published by
10  * the Free Software Foundation.
11  *
12  * Written by: Bhanu Prakash Gollapudi (bprakash@broadcom.com)
13  */
14
15 #include "bnx2fc.h"
16
17 DECLARE_PER_CPU(struct bnx2fc_percpu_s, bnx2fc_percpu);
18
19 static void bnx2fc_fastpath_notification(struct bnx2fc_hba *hba,
20                                         struct fcoe_kcqe *new_cqe_kcqe);
21 static void bnx2fc_process_ofld_cmpl(struct bnx2fc_hba *hba,
22                                         struct fcoe_kcqe *ofld_kcqe);
23 static void bnx2fc_process_enable_conn_cmpl(struct bnx2fc_hba *hba,
24                                                 struct fcoe_kcqe *ofld_kcqe);
25 static void bnx2fc_init_failure(struct bnx2fc_hba *hba, u32 err_code);
26 static void bnx2fc_process_conn_destroy_cmpl(struct bnx2fc_hba *hba,
27                                         struct fcoe_kcqe *destroy_kcqe);
28
29 int bnx2fc_send_stat_req(struct bnx2fc_hba *hba)
30 {
31         struct fcoe_kwqe_stat stat_req;
32         struct kwqe *kwqe_arr[2];
33         int num_kwqes = 1;
34         int rc = 0;
35
36         memset(&stat_req, 0x00, sizeof(struct fcoe_kwqe_stat));
37         stat_req.hdr.op_code = FCOE_KWQE_OPCODE_STAT;
38         stat_req.hdr.flags =
39                 (FCOE_KWQE_LAYER_CODE << FCOE_KWQE_HEADER_LAYER_CODE_SHIFT);
40
41         stat_req.stat_params_addr_lo = (u32) hba->stats_buf_dma;
42         stat_req.stat_params_addr_hi = (u32) ((u64)hba->stats_buf_dma >> 32);
43
44         kwqe_arr[0] = (struct kwqe *) &stat_req;
45
46         if (hba->cnic && hba->cnic->submit_kwqes)
47                 rc = hba->cnic->submit_kwqes(hba->cnic, kwqe_arr, num_kwqes);
48
49         return rc;
50 }
51
52 /**
53  * bnx2fc_send_fw_fcoe_init_msg - initiates initial handshake with FCoE f/w
54  *
55  * @hba:        adapter structure pointer
56  *
57  * Send down FCoE firmware init KWQEs which initiates the initial handshake
58  *      with the f/w.
59  *
60  */
61 int bnx2fc_send_fw_fcoe_init_msg(struct bnx2fc_hba *hba)
62 {
63         struct fcoe_kwqe_init1 fcoe_init1;
64         struct fcoe_kwqe_init2 fcoe_init2;
65         struct fcoe_kwqe_init3 fcoe_init3;
66         struct kwqe *kwqe_arr[3];
67         int num_kwqes = 3;
68         int rc = 0;
69
70         if (!hba->cnic) {
71                 printk(KERN_ERR PFX "hba->cnic NULL during fcoe fw init\n");
72                 return -ENODEV;
73         }
74
75         /* fill init1 KWQE */
76         memset(&fcoe_init1, 0x00, sizeof(struct fcoe_kwqe_init1));
77         fcoe_init1.hdr.op_code = FCOE_KWQE_OPCODE_INIT1;
78         fcoe_init1.hdr.flags = (FCOE_KWQE_LAYER_CODE <<
79                                         FCOE_KWQE_HEADER_LAYER_CODE_SHIFT);
80
81         fcoe_init1.num_tasks = hba->max_tasks;
82         fcoe_init1.sq_num_wqes = BNX2FC_SQ_WQES_MAX;
83         fcoe_init1.rq_num_wqes = BNX2FC_RQ_WQES_MAX;
84         fcoe_init1.rq_buffer_log_size = BNX2FC_RQ_BUF_LOG_SZ;
85         fcoe_init1.cq_num_wqes = BNX2FC_CQ_WQES_MAX;
86         fcoe_init1.dummy_buffer_addr_lo = (u32) hba->dummy_buf_dma;
87         fcoe_init1.dummy_buffer_addr_hi = (u32) ((u64)hba->dummy_buf_dma >> 32);
88         fcoe_init1.task_list_pbl_addr_lo = (u32) hba->task_ctx_bd_dma;
89         fcoe_init1.task_list_pbl_addr_hi =
90                                 (u32) ((u64) hba->task_ctx_bd_dma >> 32);
91         fcoe_init1.mtu = BNX2FC_MINI_JUMBO_MTU;
92
93         fcoe_init1.flags = (PAGE_SHIFT <<
94                                 FCOE_KWQE_INIT1_LOG_PAGE_SIZE_SHIFT);
95
96         fcoe_init1.num_sessions_log = BNX2FC_NUM_MAX_SESS_LOG;
97
98         /* fill init2 KWQE */
99         memset(&fcoe_init2, 0x00, sizeof(struct fcoe_kwqe_init2));
100         fcoe_init2.hdr.op_code = FCOE_KWQE_OPCODE_INIT2;
101         fcoe_init2.hdr.flags = (FCOE_KWQE_LAYER_CODE <<
102                                         FCOE_KWQE_HEADER_LAYER_CODE_SHIFT);
103
104         fcoe_init2.hsi_major_version = FCOE_HSI_MAJOR_VERSION;
105         fcoe_init2.hsi_minor_version = FCOE_HSI_MINOR_VERSION;
106
107
108         fcoe_init2.hash_tbl_pbl_addr_lo = (u32) hba->hash_tbl_pbl_dma;
109         fcoe_init2.hash_tbl_pbl_addr_hi = (u32)
110                                            ((u64) hba->hash_tbl_pbl_dma >> 32);
111
112         fcoe_init2.t2_hash_tbl_addr_lo = (u32) hba->t2_hash_tbl_dma;
113         fcoe_init2.t2_hash_tbl_addr_hi = (u32)
114                                           ((u64) hba->t2_hash_tbl_dma >> 32);
115
116         fcoe_init2.t2_ptr_hash_tbl_addr_lo = (u32) hba->t2_hash_tbl_ptr_dma;
117         fcoe_init2.t2_ptr_hash_tbl_addr_hi = (u32)
118                                         ((u64) hba->t2_hash_tbl_ptr_dma >> 32);
119
120         fcoe_init2.free_list_count = BNX2FC_NUM_MAX_SESS;
121
122         /* fill init3 KWQE */
123         memset(&fcoe_init3, 0x00, sizeof(struct fcoe_kwqe_init3));
124         fcoe_init3.hdr.op_code = FCOE_KWQE_OPCODE_INIT3;
125         fcoe_init3.hdr.flags = (FCOE_KWQE_LAYER_CODE <<
126                                         FCOE_KWQE_HEADER_LAYER_CODE_SHIFT);
127         fcoe_init3.error_bit_map_lo = 0xffffffff;
128         fcoe_init3.error_bit_map_hi = 0xffffffff;
129
130         /*
131          * enable both cached connection and cached tasks
132          * 0 = none, 1 = cached connection, 2 = cached tasks, 3 = both
133          */
134         fcoe_init3.perf_config = 3;
135
136         kwqe_arr[0] = (struct kwqe *) &fcoe_init1;
137         kwqe_arr[1] = (struct kwqe *) &fcoe_init2;
138         kwqe_arr[2] = (struct kwqe *) &fcoe_init3;
139
140         if (hba->cnic && hba->cnic->submit_kwqes)
141                 rc = hba->cnic->submit_kwqes(hba->cnic, kwqe_arr, num_kwqes);
142
143         return rc;
144 }
145 int bnx2fc_send_fw_fcoe_destroy_msg(struct bnx2fc_hba *hba)
146 {
147         struct fcoe_kwqe_destroy fcoe_destroy;
148         struct kwqe *kwqe_arr[2];
149         int num_kwqes = 1;
150         int rc = -1;
151
152         /* fill destroy KWQE */
153         memset(&fcoe_destroy, 0x00, sizeof(struct fcoe_kwqe_destroy));
154         fcoe_destroy.hdr.op_code = FCOE_KWQE_OPCODE_DESTROY;
155         fcoe_destroy.hdr.flags = (FCOE_KWQE_LAYER_CODE <<
156                                         FCOE_KWQE_HEADER_LAYER_CODE_SHIFT);
157         kwqe_arr[0] = (struct kwqe *) &fcoe_destroy;
158
159         if (hba->cnic && hba->cnic->submit_kwqes)
160                 rc = hba->cnic->submit_kwqes(hba->cnic, kwqe_arr, num_kwqes);
161         return rc;
162 }
163
164 /**
165  * bnx2fc_send_session_ofld_req - initiates FCoE Session offload process
166  *
167  * @port:               port structure pointer
168  * @tgt:                bnx2fc_rport structure pointer
169  */
170 int bnx2fc_send_session_ofld_req(struct fcoe_port *port,
171                                         struct bnx2fc_rport *tgt)
172 {
173         struct fc_lport *lport = port->lport;
174         struct bnx2fc_interface *interface = port->priv;
175         struct fcoe_ctlr *ctlr = bnx2fc_to_ctlr(interface);
176         struct bnx2fc_hba *hba = interface->hba;
177         struct kwqe *kwqe_arr[4];
178         struct fcoe_kwqe_conn_offload1 ofld_req1;
179         struct fcoe_kwqe_conn_offload2 ofld_req2;
180         struct fcoe_kwqe_conn_offload3 ofld_req3;
181         struct fcoe_kwqe_conn_offload4 ofld_req4;
182         struct fc_rport_priv *rdata = tgt->rdata;
183         struct fc_rport *rport = tgt->rport;
184         int num_kwqes = 4;
185         u32 port_id;
186         int rc = 0;
187         u16 conn_id;
188
189         /* Initialize offload request 1 structure */
190         memset(&ofld_req1, 0x00, sizeof(struct fcoe_kwqe_conn_offload1));
191
192         ofld_req1.hdr.op_code = FCOE_KWQE_OPCODE_OFFLOAD_CONN1;
193         ofld_req1.hdr.flags =
194                 (FCOE_KWQE_LAYER_CODE << FCOE_KWQE_HEADER_LAYER_CODE_SHIFT);
195
196
197         conn_id = (u16)tgt->fcoe_conn_id;
198         ofld_req1.fcoe_conn_id = conn_id;
199
200
201         ofld_req1.sq_addr_lo = (u32) tgt->sq_dma;
202         ofld_req1.sq_addr_hi = (u32)((u64) tgt->sq_dma >> 32);
203
204         ofld_req1.rq_pbl_addr_lo = (u32) tgt->rq_pbl_dma;
205         ofld_req1.rq_pbl_addr_hi = (u32)((u64) tgt->rq_pbl_dma >> 32);
206
207         ofld_req1.rq_first_pbe_addr_lo = (u32) tgt->rq_dma;
208         ofld_req1.rq_first_pbe_addr_hi =
209                                 (u32)((u64) tgt->rq_dma >> 32);
210
211         ofld_req1.rq_prod = 0x8000;
212
213         /* Initialize offload request 2 structure */
214         memset(&ofld_req2, 0x00, sizeof(struct fcoe_kwqe_conn_offload2));
215
216         ofld_req2.hdr.op_code = FCOE_KWQE_OPCODE_OFFLOAD_CONN2;
217         ofld_req2.hdr.flags =
218                 (FCOE_KWQE_LAYER_CODE << FCOE_KWQE_HEADER_LAYER_CODE_SHIFT);
219
220         ofld_req2.tx_max_fc_pay_len = rdata->maxframe_size;
221
222         ofld_req2.cq_addr_lo = (u32) tgt->cq_dma;
223         ofld_req2.cq_addr_hi = (u32)((u64)tgt->cq_dma >> 32);
224
225         ofld_req2.xferq_addr_lo = (u32) tgt->xferq_dma;
226         ofld_req2.xferq_addr_hi = (u32)((u64)tgt->xferq_dma >> 32);
227
228         ofld_req2.conn_db_addr_lo = (u32)tgt->conn_db_dma;
229         ofld_req2.conn_db_addr_hi = (u32)((u64)tgt->conn_db_dma >> 32);
230
231         /* Initialize offload request 3 structure */
232         memset(&ofld_req3, 0x00, sizeof(struct fcoe_kwqe_conn_offload3));
233
234         ofld_req3.hdr.op_code = FCOE_KWQE_OPCODE_OFFLOAD_CONN3;
235         ofld_req3.hdr.flags =
236                 (FCOE_KWQE_LAYER_CODE << FCOE_KWQE_HEADER_LAYER_CODE_SHIFT);
237
238         ofld_req3.vlan_tag = interface->vlan_id <<
239                                 FCOE_KWQE_CONN_OFFLOAD3_VLAN_ID_SHIFT;
240         ofld_req3.vlan_tag |= 3 << FCOE_KWQE_CONN_OFFLOAD3_PRIORITY_SHIFT;
241
242         port_id = fc_host_port_id(lport->host);
243         if (port_id == 0) {
244                 BNX2FC_HBA_DBG(lport, "ofld_req: port_id = 0, link down?\n");
245                 return -EINVAL;
246         }
247
248         /*
249          * Store s_id of the initiator for further reference. This will
250          * be used during disable/destroy during linkdown processing as
251          * when the lport is reset, the port_id also is reset to 0
252          */
253         tgt->sid = port_id;
254         ofld_req3.s_id[0] = (port_id & 0x000000FF);
255         ofld_req3.s_id[1] = (port_id & 0x0000FF00) >> 8;
256         ofld_req3.s_id[2] = (port_id & 0x00FF0000) >> 16;
257
258         port_id = rport->port_id;
259         ofld_req3.d_id[0] = (port_id & 0x000000FF);
260         ofld_req3.d_id[1] = (port_id & 0x0000FF00) >> 8;
261         ofld_req3.d_id[2] = (port_id & 0x00FF0000) >> 16;
262
263         ofld_req3.tx_total_conc_seqs = rdata->max_seq;
264
265         ofld_req3.tx_max_conc_seqs_c3 = rdata->max_seq;
266         ofld_req3.rx_max_fc_pay_len  = lport->mfs;
267
268         ofld_req3.rx_total_conc_seqs = BNX2FC_MAX_SEQS;
269         ofld_req3.rx_max_conc_seqs_c3 = BNX2FC_MAX_SEQS;
270         ofld_req3.rx_open_seqs_exch_c3 = 1;
271
272         ofld_req3.confq_first_pbe_addr_lo = tgt->confq_dma;
273         ofld_req3.confq_first_pbe_addr_hi = (u32)((u64) tgt->confq_dma >> 32);
274
275         /* set mul_n_port_ids supported flag to 0, until it is supported */
276         ofld_req3.flags = 0;
277         /*
278         ofld_req3.flags |= (((lport->send_sp_features & FC_SP_FT_MNA) ? 1:0) <<
279                             FCOE_KWQE_CONN_OFFLOAD3_B_MUL_N_PORT_IDS_SHIFT);
280         */
281         /* Info from PLOGI response */
282         ofld_req3.flags |= (((rdata->sp_features & FC_SP_FT_EDTR) ? 1 : 0) <<
283                              FCOE_KWQE_CONN_OFFLOAD3_B_E_D_TOV_RES_SHIFT);
284
285         ofld_req3.flags |= (((rdata->sp_features & FC_SP_FT_SEQC) ? 1 : 0) <<
286                              FCOE_KWQE_CONN_OFFLOAD3_B_CONT_INCR_SEQ_CNT_SHIFT);
287
288         /*
289          * Info from PRLI response, this info is used for sequence level error
290          * recovery support
291          */
292         if (tgt->dev_type == TYPE_TAPE) {
293                 ofld_req3.flags |= 1 <<
294                                     FCOE_KWQE_CONN_OFFLOAD3_B_CONF_REQ_SHIFT;
295                 ofld_req3.flags |= (((rdata->flags & FC_RP_FLAGS_REC_SUPPORTED)
296                                     ? 1 : 0) <<
297                                     FCOE_KWQE_CONN_OFFLOAD3_B_REC_VALID_SHIFT);
298         }
299
300         /* vlan flag */
301         ofld_req3.flags |= (interface->vlan_enabled <<
302                             FCOE_KWQE_CONN_OFFLOAD3_B_VLAN_FLAG_SHIFT);
303
304         /* C2_VALID and ACK flags are not set as they are not supported */
305
306
307         /* Initialize offload request 4 structure */
308         memset(&ofld_req4, 0x00, sizeof(struct fcoe_kwqe_conn_offload4));
309         ofld_req4.hdr.op_code = FCOE_KWQE_OPCODE_OFFLOAD_CONN4;
310         ofld_req4.hdr.flags =
311                 (FCOE_KWQE_LAYER_CODE << FCOE_KWQE_HEADER_LAYER_CODE_SHIFT);
312
313         ofld_req4.e_d_tov_timer_val = lport->e_d_tov / 20;
314
315
316         ofld_req4.src_mac_addr_lo[0] =  port->data_src_addr[5];
317                                                         /* local mac */
318         ofld_req4.src_mac_addr_lo[1] =  port->data_src_addr[4];
319         ofld_req4.src_mac_addr_mid[0] =  port->data_src_addr[3];
320         ofld_req4.src_mac_addr_mid[1] =  port->data_src_addr[2];
321         ofld_req4.src_mac_addr_hi[0] =  port->data_src_addr[1];
322         ofld_req4.src_mac_addr_hi[1] =  port->data_src_addr[0];
323         ofld_req4.dst_mac_addr_lo[0] =  ctlr->dest_addr[5];
324                                                         /* fcf mac */
325         ofld_req4.dst_mac_addr_lo[1] = ctlr->dest_addr[4];
326         ofld_req4.dst_mac_addr_mid[0] = ctlr->dest_addr[3];
327         ofld_req4.dst_mac_addr_mid[1] = ctlr->dest_addr[2];
328         ofld_req4.dst_mac_addr_hi[0] = ctlr->dest_addr[1];
329         ofld_req4.dst_mac_addr_hi[1] = ctlr->dest_addr[0];
330
331         ofld_req4.lcq_addr_lo = (u32) tgt->lcq_dma;
332         ofld_req4.lcq_addr_hi = (u32)((u64) tgt->lcq_dma >> 32);
333
334         ofld_req4.confq_pbl_base_addr_lo = (u32) tgt->confq_pbl_dma;
335         ofld_req4.confq_pbl_base_addr_hi =
336                                         (u32)((u64) tgt->confq_pbl_dma >> 32);
337
338         kwqe_arr[0] = (struct kwqe *) &ofld_req1;
339         kwqe_arr[1] = (struct kwqe *) &ofld_req2;
340         kwqe_arr[2] = (struct kwqe *) &ofld_req3;
341         kwqe_arr[3] = (struct kwqe *) &ofld_req4;
342
343         if (hba->cnic && hba->cnic->submit_kwqes)
344                 rc = hba->cnic->submit_kwqes(hba->cnic, kwqe_arr, num_kwqes);
345
346         return rc;
347 }
348
349 /**
350  * bnx2fc_send_session_enable_req - initiates FCoE Session enablement
351  *
352  * @port:               port structure pointer
353  * @tgt:                bnx2fc_rport structure pointer
354  */
355 int bnx2fc_send_session_enable_req(struct fcoe_port *port,
356                                         struct bnx2fc_rport *tgt)
357 {
358         struct kwqe *kwqe_arr[2];
359         struct bnx2fc_interface *interface = port->priv;
360         struct fcoe_ctlr *ctlr = bnx2fc_to_ctlr(interface);
361         struct bnx2fc_hba *hba = interface->hba;
362         struct fcoe_kwqe_conn_enable_disable enbl_req;
363         struct fc_lport *lport = port->lport;
364         struct fc_rport *rport = tgt->rport;
365         int num_kwqes = 1;
366         int rc = 0;
367         u32 port_id;
368
369         memset(&enbl_req, 0x00,
370                sizeof(struct fcoe_kwqe_conn_enable_disable));
371         enbl_req.hdr.op_code = FCOE_KWQE_OPCODE_ENABLE_CONN;
372         enbl_req.hdr.flags =
373                 (FCOE_KWQE_LAYER_CODE << FCOE_KWQE_HEADER_LAYER_CODE_SHIFT);
374
375         enbl_req.src_mac_addr_lo[0] =  port->data_src_addr[5];
376                                                         /* local mac */
377         enbl_req.src_mac_addr_lo[1] =  port->data_src_addr[4];
378         enbl_req.src_mac_addr_mid[0] =  port->data_src_addr[3];
379         enbl_req.src_mac_addr_mid[1] =  port->data_src_addr[2];
380         enbl_req.src_mac_addr_hi[0] =  port->data_src_addr[1];
381         enbl_req.src_mac_addr_hi[1] =  port->data_src_addr[0];
382         memcpy(tgt->src_addr, port->data_src_addr, ETH_ALEN);
383
384         enbl_req.dst_mac_addr_lo[0] =  ctlr->dest_addr[5];
385         enbl_req.dst_mac_addr_lo[1] =  ctlr->dest_addr[4];
386         enbl_req.dst_mac_addr_mid[0] = ctlr->dest_addr[3];
387         enbl_req.dst_mac_addr_mid[1] = ctlr->dest_addr[2];
388         enbl_req.dst_mac_addr_hi[0] = ctlr->dest_addr[1];
389         enbl_req.dst_mac_addr_hi[1] = ctlr->dest_addr[0];
390
391         port_id = fc_host_port_id(lport->host);
392         if (port_id != tgt->sid) {
393                 printk(KERN_ERR PFX "WARN: enable_req port_id = 0x%x,"
394                                 "sid = 0x%x\n", port_id, tgt->sid);
395                 port_id = tgt->sid;
396         }
397         enbl_req.s_id[0] = (port_id & 0x000000FF);
398         enbl_req.s_id[1] = (port_id & 0x0000FF00) >> 8;
399         enbl_req.s_id[2] = (port_id & 0x00FF0000) >> 16;
400
401         port_id = rport->port_id;
402         enbl_req.d_id[0] = (port_id & 0x000000FF);
403         enbl_req.d_id[1] = (port_id & 0x0000FF00) >> 8;
404         enbl_req.d_id[2] = (port_id & 0x00FF0000) >> 16;
405         enbl_req.vlan_tag = interface->vlan_id <<
406                                 FCOE_KWQE_CONN_ENABLE_DISABLE_VLAN_ID_SHIFT;
407         enbl_req.vlan_tag |= 3 << FCOE_KWQE_CONN_ENABLE_DISABLE_PRIORITY_SHIFT;
408         enbl_req.vlan_flag = interface->vlan_enabled;
409         enbl_req.context_id = tgt->context_id;
410         enbl_req.conn_id = tgt->fcoe_conn_id;
411
412         kwqe_arr[0] = (struct kwqe *) &enbl_req;
413
414         if (hba->cnic && hba->cnic->submit_kwqes)
415                 rc = hba->cnic->submit_kwqes(hba->cnic, kwqe_arr, num_kwqes);
416         return rc;
417 }
418
419 /**
420  * bnx2fc_send_session_disable_req - initiates FCoE Session disable
421  *
422  * @port:               port structure pointer
423  * @tgt:                bnx2fc_rport structure pointer
424  */
425 int bnx2fc_send_session_disable_req(struct fcoe_port *port,
426                                     struct bnx2fc_rport *tgt)
427 {
428         struct bnx2fc_interface *interface = port->priv;
429         struct fcoe_ctlr *ctlr = bnx2fc_to_ctlr(interface);
430         struct bnx2fc_hba *hba = interface->hba;
431         struct fcoe_kwqe_conn_enable_disable disable_req;
432         struct kwqe *kwqe_arr[2];
433         struct fc_rport *rport = tgt->rport;
434         int num_kwqes = 1;
435         int rc = 0;
436         u32 port_id;
437
438         memset(&disable_req, 0x00,
439                sizeof(struct fcoe_kwqe_conn_enable_disable));
440         disable_req.hdr.op_code = FCOE_KWQE_OPCODE_DISABLE_CONN;
441         disable_req.hdr.flags =
442                 (FCOE_KWQE_LAYER_CODE << FCOE_KWQE_HEADER_LAYER_CODE_SHIFT);
443
444         disable_req.src_mac_addr_lo[0] =  tgt->src_addr[5];
445         disable_req.src_mac_addr_lo[1] =  tgt->src_addr[4];
446         disable_req.src_mac_addr_mid[0] =  tgt->src_addr[3];
447         disable_req.src_mac_addr_mid[1] =  tgt->src_addr[2];
448         disable_req.src_mac_addr_hi[0] =  tgt->src_addr[1];
449         disable_req.src_mac_addr_hi[1] =  tgt->src_addr[0];
450
451         disable_req.dst_mac_addr_lo[0] =  ctlr->dest_addr[5];
452         disable_req.dst_mac_addr_lo[1] =  ctlr->dest_addr[4];
453         disable_req.dst_mac_addr_mid[0] = ctlr->dest_addr[3];
454         disable_req.dst_mac_addr_mid[1] = ctlr->dest_addr[2];
455         disable_req.dst_mac_addr_hi[0] = ctlr->dest_addr[1];
456         disable_req.dst_mac_addr_hi[1] = ctlr->dest_addr[0];
457
458         port_id = tgt->sid;
459         disable_req.s_id[0] = (port_id & 0x000000FF);
460         disable_req.s_id[1] = (port_id & 0x0000FF00) >> 8;
461         disable_req.s_id[2] = (port_id & 0x00FF0000) >> 16;
462
463
464         port_id = rport->port_id;
465         disable_req.d_id[0] = (port_id & 0x000000FF);
466         disable_req.d_id[1] = (port_id & 0x0000FF00) >> 8;
467         disable_req.d_id[2] = (port_id & 0x00FF0000) >> 16;
468         disable_req.context_id = tgt->context_id;
469         disable_req.conn_id = tgt->fcoe_conn_id;
470         disable_req.vlan_tag = interface->vlan_id <<
471                                 FCOE_KWQE_CONN_ENABLE_DISABLE_VLAN_ID_SHIFT;
472         disable_req.vlan_tag |=
473                         3 << FCOE_KWQE_CONN_ENABLE_DISABLE_PRIORITY_SHIFT;
474         disable_req.vlan_flag = interface->vlan_enabled;
475
476         kwqe_arr[0] = (struct kwqe *) &disable_req;
477
478         if (hba->cnic && hba->cnic->submit_kwqes)
479                 rc = hba->cnic->submit_kwqes(hba->cnic, kwqe_arr, num_kwqes);
480
481         return rc;
482 }
483
484 /**
485  * bnx2fc_send_session_destroy_req - initiates FCoE Session destroy
486  *
487  * @port:               port structure pointer
488  * @tgt:                bnx2fc_rport structure pointer
489  */
490 int bnx2fc_send_session_destroy_req(struct bnx2fc_hba *hba,
491                                         struct bnx2fc_rport *tgt)
492 {
493         struct fcoe_kwqe_conn_destroy destroy_req;
494         struct kwqe *kwqe_arr[2];
495         int num_kwqes = 1;
496         int rc = 0;
497
498         memset(&destroy_req, 0x00, sizeof(struct fcoe_kwqe_conn_destroy));
499         destroy_req.hdr.op_code = FCOE_KWQE_OPCODE_DESTROY_CONN;
500         destroy_req.hdr.flags =
501                 (FCOE_KWQE_LAYER_CODE << FCOE_KWQE_HEADER_LAYER_CODE_SHIFT);
502
503         destroy_req.context_id = tgt->context_id;
504         destroy_req.conn_id = tgt->fcoe_conn_id;
505
506         kwqe_arr[0] = (struct kwqe *) &destroy_req;
507
508         if (hba->cnic && hba->cnic->submit_kwqes)
509                 rc = hba->cnic->submit_kwqes(hba->cnic, kwqe_arr, num_kwqes);
510
511         return rc;
512 }
513
514 static bool is_valid_lport(struct bnx2fc_hba *hba, struct fc_lport *lport)
515 {
516         struct bnx2fc_lport *blport;
517
518         spin_lock_bh(&hba->hba_lock);
519         list_for_each_entry(blport, &hba->vports, list) {
520                 if (blport->lport == lport) {
521                         spin_unlock_bh(&hba->hba_lock);
522                         return true;
523                 }
524         }
525         spin_unlock_bh(&hba->hba_lock);
526         return false;
527
528 }
529
530
531 static void bnx2fc_unsol_els_work(struct work_struct *work)
532 {
533         struct bnx2fc_unsol_els *unsol_els;
534         struct fc_lport *lport;
535         struct bnx2fc_hba *hba;
536         struct fc_frame *fp;
537
538         unsol_els = container_of(work, struct bnx2fc_unsol_els, unsol_els_work);
539         lport = unsol_els->lport;
540         fp = unsol_els->fp;
541         hba = unsol_els->hba;
542         if (is_valid_lport(hba, lport))
543                 fc_exch_recv(lport, fp);
544         kfree(unsol_els);
545 }
546
547 void bnx2fc_process_l2_frame_compl(struct bnx2fc_rport *tgt,
548                                    unsigned char *buf,
549                                    u32 frame_len, u16 l2_oxid)
550 {
551         struct fcoe_port *port = tgt->port;
552         struct fc_lport *lport = port->lport;
553         struct bnx2fc_interface *interface = port->priv;
554         struct bnx2fc_unsol_els *unsol_els;
555         struct fc_frame_header *fh;
556         struct fc_frame *fp;
557         struct sk_buff *skb;
558         u32 payload_len;
559         u32 crc;
560         u8 op;
561
562
563         unsol_els = kzalloc(sizeof(*unsol_els), GFP_ATOMIC);
564         if (!unsol_els) {
565                 BNX2FC_TGT_DBG(tgt, "Unable to allocate unsol_work\n");
566                 return;
567         }
568
569         BNX2FC_TGT_DBG(tgt, "l2_frame_compl l2_oxid = 0x%x, frame_len = %d\n",
570                 l2_oxid, frame_len);
571
572         payload_len = frame_len - sizeof(struct fc_frame_header);
573
574         fp = fc_frame_alloc(lport, payload_len);
575         if (!fp) {
576                 printk(KERN_ERR PFX "fc_frame_alloc failure\n");
577                 kfree(unsol_els);
578                 return;
579         }
580
581         fh = (struct fc_frame_header *) fc_frame_header_get(fp);
582         /* Copy FC Frame header and payload into the frame */
583         memcpy(fh, buf, frame_len);
584
585         if (l2_oxid != FC_XID_UNKNOWN)
586                 fh->fh_ox_id = htons(l2_oxid);
587
588         skb = fp_skb(fp);
589
590         if ((fh->fh_r_ctl == FC_RCTL_ELS_REQ) ||
591             (fh->fh_r_ctl == FC_RCTL_ELS_REP)) {
592
593                 if (fh->fh_type == FC_TYPE_ELS) {
594                         op = fc_frame_payload_op(fp);
595                         if ((op == ELS_TEST) || (op == ELS_ESTC) ||
596                             (op == ELS_FAN) || (op == ELS_CSU)) {
597                                 /*
598                                  * No need to reply for these
599                                  * ELS requests
600                                  */
601                                 printk(KERN_ERR PFX "dropping ELS 0x%x\n", op);
602                                 kfree_skb(skb);
603                                 kfree(unsol_els);
604                                 return;
605                         }
606                 }
607                 crc = fcoe_fc_crc(fp);
608                 fc_frame_init(fp);
609                 fr_dev(fp) = lport;
610                 fr_sof(fp) = FC_SOF_I3;
611                 fr_eof(fp) = FC_EOF_T;
612                 fr_crc(fp) = cpu_to_le32(~crc);
613                 unsol_els->lport = lport;
614                 unsol_els->hba = interface->hba;
615                 unsol_els->fp = fp;
616                 INIT_WORK(&unsol_els->unsol_els_work, bnx2fc_unsol_els_work);
617                 queue_work(bnx2fc_wq, &unsol_els->unsol_els_work);
618         } else {
619                 BNX2FC_HBA_DBG(lport, "fh_r_ctl = 0x%x\n", fh->fh_r_ctl);
620                 kfree_skb(skb);
621                 kfree(unsol_els);
622         }
623 }
624
625 static void bnx2fc_process_unsol_compl(struct bnx2fc_rport *tgt, u16 wqe)
626 {
627         u8 num_rq;
628         struct fcoe_err_report_entry *err_entry;
629         unsigned char *rq_data;
630         unsigned char *buf = NULL, *buf1;
631         int i;
632         u16 xid;
633         u32 frame_len, len;
634         struct bnx2fc_cmd *io_req = NULL;
635         struct fcoe_task_ctx_entry *task, *task_page;
636         struct bnx2fc_interface *interface = tgt->port->priv;
637         struct bnx2fc_hba *hba = interface->hba;
638         int task_idx, index;
639         int rc = 0;
640         u64 err_warn_bit_map;
641         u8 err_warn = 0xff;
642
643
644         BNX2FC_TGT_DBG(tgt, "Entered UNSOL COMPLETION wqe = 0x%x\n", wqe);
645         switch (wqe & FCOE_UNSOLICITED_CQE_SUBTYPE) {
646         case FCOE_UNSOLICITED_FRAME_CQE_TYPE:
647                 frame_len = (wqe & FCOE_UNSOLICITED_CQE_PKT_LEN) >>
648                              FCOE_UNSOLICITED_CQE_PKT_LEN_SHIFT;
649
650                 num_rq = (frame_len + BNX2FC_RQ_BUF_SZ - 1) / BNX2FC_RQ_BUF_SZ;
651
652                 spin_lock_bh(&tgt->tgt_lock);
653                 rq_data = (unsigned char *)bnx2fc_get_next_rqe(tgt, num_rq);
654                 spin_unlock_bh(&tgt->tgt_lock);
655
656                 if (rq_data) {
657                         buf = rq_data;
658                 } else {
659                         buf1 = buf = kmalloc((num_rq * BNX2FC_RQ_BUF_SZ),
660                                               GFP_ATOMIC);
661
662                         if (!buf1) {
663                                 BNX2FC_TGT_DBG(tgt, "Memory alloc failure\n");
664                                 break;
665                         }
666
667                         for (i = 0; i < num_rq; i++) {
668                                 spin_lock_bh(&tgt->tgt_lock);
669                                 rq_data = (unsigned char *)
670                                            bnx2fc_get_next_rqe(tgt, 1);
671                                 spin_unlock_bh(&tgt->tgt_lock);
672                                 len = BNX2FC_RQ_BUF_SZ;
673                                 memcpy(buf1, rq_data, len);
674                                 buf1 += len;
675                         }
676                 }
677                 bnx2fc_process_l2_frame_compl(tgt, buf, frame_len,
678                                               FC_XID_UNKNOWN);
679
680                 if (buf != rq_data)
681                         kfree(buf);
682                 spin_lock_bh(&tgt->tgt_lock);
683                 bnx2fc_return_rqe(tgt, num_rq);
684                 spin_unlock_bh(&tgt->tgt_lock);
685                 break;
686
687         case FCOE_ERROR_DETECTION_CQE_TYPE:
688                 /*
689                  * In case of error reporting CQE a single RQ entry
690                  * is consumed.
691                  */
692                 spin_lock_bh(&tgt->tgt_lock);
693                 num_rq = 1;
694                 err_entry = (struct fcoe_err_report_entry *)
695                              bnx2fc_get_next_rqe(tgt, 1);
696                 xid = err_entry->fc_hdr.ox_id;
697                 BNX2FC_TGT_DBG(tgt, "Unsol Error Frame OX_ID = 0x%x\n", xid);
698                 BNX2FC_TGT_DBG(tgt, "err_warn_bitmap = %08x:%08x\n",
699                         err_entry->data.err_warn_bitmap_hi,
700                         err_entry->data.err_warn_bitmap_lo);
701                 BNX2FC_TGT_DBG(tgt, "buf_offsets - tx = 0x%x, rx = 0x%x\n",
702                         err_entry->data.tx_buf_off, err_entry->data.rx_buf_off);
703
704
705                 if (xid > hba->max_xid) {
706                         BNX2FC_TGT_DBG(tgt, "xid(0x%x) out of FW range\n",
707                                    xid);
708                         goto ret_err_rqe;
709                 }
710
711                 task_idx = xid / BNX2FC_TASKS_PER_PAGE;
712                 index = xid % BNX2FC_TASKS_PER_PAGE;
713                 task_page = (struct fcoe_task_ctx_entry *)
714                                         hba->task_ctx[task_idx];
715                 task = &(task_page[index]);
716
717                 io_req = (struct bnx2fc_cmd *)hba->cmd_mgr->cmds[xid];
718                 if (!io_req)
719                         goto ret_err_rqe;
720
721                 if (io_req->cmd_type != BNX2FC_SCSI_CMD) {
722                         printk(KERN_ERR PFX "err_warn: Not a SCSI cmd\n");
723                         goto ret_err_rqe;
724                 }
725
726                 if (test_and_clear_bit(BNX2FC_FLAG_IO_CLEANUP,
727                                        &io_req->req_flags)) {
728                         BNX2FC_IO_DBG(io_req, "unsol_err: cleanup in "
729                                             "progress.. ignore unsol err\n");
730                         goto ret_err_rqe;
731                 }
732
733                 err_warn_bit_map = (u64)
734                         ((u64)err_entry->data.err_warn_bitmap_hi << 32) |
735                         (u64)err_entry->data.err_warn_bitmap_lo;
736                 for (i = 0; i < BNX2FC_NUM_ERR_BITS; i++) {
737                         if (err_warn_bit_map & (u64)((u64)1 << i)) {
738                                 err_warn = i;
739                                 break;
740                         }
741                 }
742
743                 /*
744                  * If ABTS is already in progress, and FW error is
745                  * received after that, do not cancel the timeout_work
746                  * and let the error recovery continue by explicitly
747                  * logging out the target, when the ABTS eventually
748                  * times out.
749                  */
750                 if (test_bit(BNX2FC_FLAG_ISSUE_ABTS, &io_req->req_flags)) {
751                         printk(KERN_ERR PFX "err_warn: io_req (0x%x) already "
752                                             "in ABTS processing\n", xid);
753                         goto ret_err_rqe;
754                 }
755                 BNX2FC_TGT_DBG(tgt, "err = 0x%x\n", err_warn);
756                 if (tgt->dev_type != TYPE_TAPE)
757                         goto skip_rec;
758                 switch (err_warn) {
759                 case FCOE_ERROR_CODE_REC_TOV_TIMER_EXPIRATION:
760                 case FCOE_ERROR_CODE_DATA_OOO_RO:
761                 case FCOE_ERROR_CODE_COMMON_INCORRECT_SEQ_CNT:
762                 case FCOE_ERROR_CODE_DATA_SOFI3_SEQ_ACTIVE_SET:
763                 case FCOE_ERROR_CODE_FCP_RSP_OPENED_SEQ:
764                 case FCOE_ERROR_CODE_DATA_SOFN_SEQ_ACTIVE_RESET:
765                         BNX2FC_TGT_DBG(tgt, "REC TOV popped for xid - 0x%x\n",
766                                    xid);
767                         memcpy(&io_req->err_entry, err_entry,
768                                sizeof(struct fcoe_err_report_entry));
769                         if (!test_bit(BNX2FC_FLAG_SRR_SENT,
770                                       &io_req->req_flags)) {
771                                 spin_unlock_bh(&tgt->tgt_lock);
772                                 rc = bnx2fc_send_rec(io_req);
773                                 spin_lock_bh(&tgt->tgt_lock);
774
775                                 if (rc)
776                                         goto skip_rec;
777                         } else
778                                 printk(KERN_ERR PFX "SRR in progress\n");
779                         goto ret_err_rqe;
780                         break;
781                 default:
782                         break;
783                 }
784
785 skip_rec:
786                 set_bit(BNX2FC_FLAG_ISSUE_ABTS, &io_req->req_flags);
787                 /*
788                  * Cancel the timeout_work, as we received IO
789                  * completion with FW error.
790                  */
791                 if (cancel_delayed_work(&io_req->timeout_work))
792                         kref_put(&io_req->refcount, bnx2fc_cmd_release);
793
794                 rc = bnx2fc_initiate_abts(io_req);
795                 if (rc != SUCCESS) {
796                         printk(KERN_ERR PFX "err_warn: initiate_abts "
797                                 "failed xid = 0x%x. issue cleanup\n",
798                                 io_req->xid);
799                         bnx2fc_initiate_cleanup(io_req);
800                 }
801 ret_err_rqe:
802                 bnx2fc_return_rqe(tgt, 1);
803                 spin_unlock_bh(&tgt->tgt_lock);
804                 break;
805
806         case FCOE_WARNING_DETECTION_CQE_TYPE:
807                 /*
808                  *In case of warning reporting CQE a single RQ entry
809                  * is consumes.
810                  */
811                 spin_lock_bh(&tgt->tgt_lock);
812                 num_rq = 1;
813                 err_entry = (struct fcoe_err_report_entry *)
814                              bnx2fc_get_next_rqe(tgt, 1);
815                 xid = cpu_to_be16(err_entry->fc_hdr.ox_id);
816                 BNX2FC_TGT_DBG(tgt, "Unsol Warning Frame OX_ID = 0x%x\n", xid);
817                 BNX2FC_TGT_DBG(tgt, "err_warn_bitmap = %08x:%08x",
818                         err_entry->data.err_warn_bitmap_hi,
819                         err_entry->data.err_warn_bitmap_lo);
820                 BNX2FC_TGT_DBG(tgt, "buf_offsets - tx = 0x%x, rx = 0x%x",
821                         err_entry->data.tx_buf_off, err_entry->data.rx_buf_off);
822
823                 if (xid > hba->max_xid) {
824                         BNX2FC_TGT_DBG(tgt, "xid(0x%x) out of FW range\n", xid);
825                         goto ret_warn_rqe;
826                 }
827
828                 err_warn_bit_map = (u64)
829                         ((u64)err_entry->data.err_warn_bitmap_hi << 32) |
830                         (u64)err_entry->data.err_warn_bitmap_lo;
831                 for (i = 0; i < BNX2FC_NUM_ERR_BITS; i++) {
832                         if (err_warn_bit_map & (u64) (1 << i)) {
833                                 err_warn = i;
834                                 break;
835                         }
836                 }
837                 BNX2FC_TGT_DBG(tgt, "warn = 0x%x\n", err_warn);
838
839                 task_idx = xid / BNX2FC_TASKS_PER_PAGE;
840                 index = xid % BNX2FC_TASKS_PER_PAGE;
841                 task_page = (struct fcoe_task_ctx_entry *)
842                              interface->hba->task_ctx[task_idx];
843                 task = &(task_page[index]);
844                 io_req = (struct bnx2fc_cmd *)hba->cmd_mgr->cmds[xid];
845                 if (!io_req)
846                         goto ret_warn_rqe;
847
848                 if (io_req->cmd_type != BNX2FC_SCSI_CMD) {
849                         printk(KERN_ERR PFX "err_warn: Not a SCSI cmd\n");
850                         goto ret_warn_rqe;
851                 }
852
853                 memcpy(&io_req->err_entry, err_entry,
854                        sizeof(struct fcoe_err_report_entry));
855
856                 if (err_warn == FCOE_ERROR_CODE_REC_TOV_TIMER_EXPIRATION)
857                         /* REC_TOV is not a warning code */
858                         BUG_ON(1);
859                 else
860                         BNX2FC_TGT_DBG(tgt, "Unsolicited warning\n");
861 ret_warn_rqe:
862                 bnx2fc_return_rqe(tgt, 1);
863                 spin_unlock_bh(&tgt->tgt_lock);
864                 break;
865
866         default:
867                 printk(KERN_ERR PFX "Unsol Compl: Invalid CQE Subtype\n");
868                 break;
869         }
870 }
871
872 void bnx2fc_process_cq_compl(struct bnx2fc_rport *tgt, u16 wqe)
873 {
874         struct fcoe_task_ctx_entry *task;
875         struct fcoe_task_ctx_entry *task_page;
876         struct fcoe_port *port = tgt->port;
877         struct bnx2fc_interface *interface = port->priv;
878         struct bnx2fc_hba *hba = interface->hba;
879         struct bnx2fc_cmd *io_req;
880         int task_idx, index;
881         u16 xid;
882         u8  cmd_type;
883         u8 rx_state = 0;
884         u8 num_rq;
885
886         spin_lock_bh(&tgt->tgt_lock);
887         xid = wqe & FCOE_PEND_WQ_CQE_TASK_ID;
888         if (xid >= hba->max_tasks) {
889                 printk(KERN_ERR PFX "ERROR:xid out of range\n");
890                 spin_unlock_bh(&tgt->tgt_lock);
891                 return;
892         }
893         task_idx = xid / BNX2FC_TASKS_PER_PAGE;
894         index = xid % BNX2FC_TASKS_PER_PAGE;
895         task_page = (struct fcoe_task_ctx_entry *)hba->task_ctx[task_idx];
896         task = &(task_page[index]);
897
898         num_rq = ((task->rxwr_txrd.var_ctx.rx_flags &
899                    FCOE_TCE_RX_WR_TX_RD_VAR_NUM_RQ_WQE) >>
900                    FCOE_TCE_RX_WR_TX_RD_VAR_NUM_RQ_WQE_SHIFT);
901
902         io_req = (struct bnx2fc_cmd *)hba->cmd_mgr->cmds[xid];
903
904         if (io_req == NULL) {
905                 printk(KERN_ERR PFX "ERROR? cq_compl - io_req is NULL\n");
906                 spin_unlock_bh(&tgt->tgt_lock);
907                 return;
908         }
909
910         /* Timestamp IO completion time */
911         cmd_type = io_req->cmd_type;
912
913         rx_state = ((task->rxwr_txrd.var_ctx.rx_flags &
914                     FCOE_TCE_RX_WR_TX_RD_VAR_RX_STATE) >>
915                     FCOE_TCE_RX_WR_TX_RD_VAR_RX_STATE_SHIFT);
916
917         /* Process other IO completion types */
918         switch (cmd_type) {
919         case BNX2FC_SCSI_CMD:
920                 if (rx_state == FCOE_TASK_RX_STATE_COMPLETED) {
921                         bnx2fc_process_scsi_cmd_compl(io_req, task, num_rq);
922                         spin_unlock_bh(&tgt->tgt_lock);
923                         return;
924                 }
925
926                 if (rx_state == FCOE_TASK_RX_STATE_ABTS_COMPLETED)
927                         bnx2fc_process_abts_compl(io_req, task, num_rq);
928                 else if (rx_state ==
929                          FCOE_TASK_RX_STATE_EXCHANGE_CLEANUP_COMPLETED)
930                         bnx2fc_process_cleanup_compl(io_req, task, num_rq);
931                 else
932                         printk(KERN_ERR PFX "Invalid rx state - %d\n",
933                                 rx_state);
934                 break;
935
936         case BNX2FC_TASK_MGMT_CMD:
937                 BNX2FC_IO_DBG(io_req, "Processing TM complete\n");
938                 bnx2fc_process_tm_compl(io_req, task, num_rq);
939                 break;
940
941         case BNX2FC_ABTS:
942                 /*
943                  * ABTS request received by firmware. ABTS response
944                  * will be delivered to the task belonging to the IO
945                  * that was aborted
946                  */
947                 BNX2FC_IO_DBG(io_req, "cq_compl- ABTS sent out by fw\n");
948                 kref_put(&io_req->refcount, bnx2fc_cmd_release);
949                 break;
950
951         case BNX2FC_ELS:
952                 if (rx_state == FCOE_TASK_RX_STATE_COMPLETED)
953                         bnx2fc_process_els_compl(io_req, task, num_rq);
954                 else if (rx_state == FCOE_TASK_RX_STATE_ABTS_COMPLETED)
955                         bnx2fc_process_abts_compl(io_req, task, num_rq);
956                 else if (rx_state ==
957                          FCOE_TASK_RX_STATE_EXCHANGE_CLEANUP_COMPLETED)
958                         bnx2fc_process_cleanup_compl(io_req, task, num_rq);
959                 else
960                         printk(KERN_ERR PFX "Invalid rx state =  %d\n",
961                                 rx_state);
962                 break;
963
964         case BNX2FC_CLEANUP:
965                 BNX2FC_IO_DBG(io_req, "cq_compl- cleanup resp rcvd\n");
966                 kref_put(&io_req->refcount, bnx2fc_cmd_release);
967                 break;
968
969         case BNX2FC_SEQ_CLEANUP:
970                 BNX2FC_IO_DBG(io_req, "cq_compl(0x%x) - seq cleanup resp\n",
971                               io_req->xid);
972                 bnx2fc_process_seq_cleanup_compl(io_req, task, rx_state);
973                 kref_put(&io_req->refcount, bnx2fc_cmd_release);
974                 break;
975
976         default:
977                 printk(KERN_ERR PFX "Invalid cmd_type %d\n", cmd_type);
978                 break;
979         }
980         spin_unlock_bh(&tgt->tgt_lock);
981 }
982
983 void bnx2fc_arm_cq(struct bnx2fc_rport *tgt)
984 {
985         struct b577xx_fcoe_rx_doorbell *rx_db = &tgt->rx_db;
986         u32 msg;
987
988         wmb();
989         rx_db->doorbell_cq_cons = tgt->cq_cons_idx | (tgt->cq_curr_toggle_bit <<
990                         FCOE_CQE_TOGGLE_BIT_SHIFT);
991         msg = *((u32 *)rx_db);
992         writel(cpu_to_le32(msg), tgt->ctx_base);
993         mmiowb();
994
995 }
996
997 struct bnx2fc_work *bnx2fc_alloc_work(struct bnx2fc_rport *tgt, u16 wqe)
998 {
999         struct bnx2fc_work *work;
1000         work = kzalloc(sizeof(struct bnx2fc_work), GFP_ATOMIC);
1001         if (!work)
1002                 return NULL;
1003
1004         INIT_LIST_HEAD(&work->list);
1005         work->tgt = tgt;
1006         work->wqe = wqe;
1007         return work;
1008 }
1009
1010 int bnx2fc_process_new_cqes(struct bnx2fc_rport *tgt)
1011 {
1012         struct fcoe_cqe *cq;
1013         u32 cq_cons;
1014         struct fcoe_cqe *cqe;
1015         u32 num_free_sqes = 0;
1016         u32 num_cqes = 0;
1017         u16 wqe;
1018
1019         /*
1020          * cq_lock is a low contention lock used to protect
1021          * the CQ data structure from being freed up during
1022          * the upload operation
1023          */
1024         spin_lock_bh(&tgt->cq_lock);
1025
1026         if (!tgt->cq) {
1027                 printk(KERN_ERR PFX "process_new_cqes: cq is NULL\n");
1028                 spin_unlock_bh(&tgt->cq_lock);
1029                 return 0;
1030         }
1031         cq = tgt->cq;
1032         cq_cons = tgt->cq_cons_idx;
1033         cqe = &cq[cq_cons];
1034
1035         while (((wqe = cqe->wqe) & FCOE_CQE_TOGGLE_BIT) ==
1036                (tgt->cq_curr_toggle_bit <<
1037                FCOE_CQE_TOGGLE_BIT_SHIFT)) {
1038
1039                 /* new entry on the cq */
1040                 if (wqe & FCOE_CQE_CQE_TYPE) {
1041                         /* Unsolicited event notification */
1042                         bnx2fc_process_unsol_compl(tgt, wqe);
1043                 } else {
1044                         /* Pending work request completion */
1045                         struct bnx2fc_work *work = NULL;
1046                         struct bnx2fc_percpu_s *fps = NULL;
1047                         unsigned int cpu = wqe % num_possible_cpus();
1048
1049                         fps = &per_cpu(bnx2fc_percpu, cpu);
1050                         spin_lock_bh(&fps->fp_work_lock);
1051                         if (unlikely(!fps->iothread))
1052                                 goto unlock;
1053
1054                         work = bnx2fc_alloc_work(tgt, wqe);
1055                         if (work)
1056                                 list_add_tail(&work->list,
1057                                               &fps->work_list);
1058 unlock:
1059                         spin_unlock_bh(&fps->fp_work_lock);
1060
1061                         /* Pending work request completion */
1062                         if (fps->iothread && work)
1063                                 wake_up_process(fps->iothread);
1064                         else
1065                                 bnx2fc_process_cq_compl(tgt, wqe);
1066                         num_free_sqes++;
1067                 }
1068                 cqe++;
1069                 tgt->cq_cons_idx++;
1070                 num_cqes++;
1071
1072                 if (tgt->cq_cons_idx == BNX2FC_CQ_WQES_MAX) {
1073                         tgt->cq_cons_idx = 0;
1074                         cqe = cq;
1075                         tgt->cq_curr_toggle_bit =
1076                                 1 - tgt->cq_curr_toggle_bit;
1077                 }
1078         }
1079         if (num_cqes) {
1080                 /* Arm CQ only if doorbell is mapped */
1081                 if (tgt->ctx_base)
1082                         bnx2fc_arm_cq(tgt);
1083                 atomic_add(num_free_sqes, &tgt->free_sqes);
1084         }
1085         spin_unlock_bh(&tgt->cq_lock);
1086         return 0;
1087 }
1088
1089 /**
1090  * bnx2fc_fastpath_notification - process global event queue (KCQ)
1091  *
1092  * @hba:                adapter structure pointer
1093  * @new_cqe_kcqe:       pointer to newly DMA'd KCQ entry
1094  *
1095  * Fast path event notification handler
1096  */
1097 static void bnx2fc_fastpath_notification(struct bnx2fc_hba *hba,
1098                                         struct fcoe_kcqe *new_cqe_kcqe)
1099 {
1100         u32 conn_id = new_cqe_kcqe->fcoe_conn_id;
1101         struct bnx2fc_rport *tgt = hba->tgt_ofld_list[conn_id];
1102
1103         if (!tgt) {
1104                 printk(KERN_ERR PFX "conn_id 0x%x not valid\n", conn_id);
1105                 return;
1106         }
1107
1108         bnx2fc_process_new_cqes(tgt);
1109 }
1110
1111 /**
1112  * bnx2fc_process_ofld_cmpl - process FCoE session offload completion
1113  *
1114  * @hba:        adapter structure pointer
1115  * @ofld_kcqe:  connection offload kcqe pointer
1116  *
1117  * handle session offload completion, enable the session if offload is
1118  * successful.
1119  */
1120 static void bnx2fc_process_ofld_cmpl(struct bnx2fc_hba *hba,
1121                                         struct fcoe_kcqe *ofld_kcqe)
1122 {
1123         struct bnx2fc_rport             *tgt;
1124         struct fcoe_port                *port;
1125         struct bnx2fc_interface         *interface;
1126         u32                             conn_id;
1127         u32                             context_id;
1128
1129         conn_id = ofld_kcqe->fcoe_conn_id;
1130         context_id = ofld_kcqe->fcoe_conn_context_id;
1131         tgt = hba->tgt_ofld_list[conn_id];
1132         if (!tgt) {
1133                 printk(KERN_ALERT PFX "ERROR:ofld_cmpl: No pending ofld req\n");
1134                 return;
1135         }
1136         BNX2FC_TGT_DBG(tgt, "Entered ofld compl - context_id = 0x%x\n",
1137                 ofld_kcqe->fcoe_conn_context_id);
1138         port = tgt->port;
1139         interface = tgt->port->priv;
1140         if (hba != interface->hba) {
1141                 printk(KERN_ERR PFX "ERROR:ofld_cmpl: HBA mis-match\n");
1142                 goto ofld_cmpl_err;
1143         }
1144         /*
1145          * cnic has allocated a context_id for this session; use this
1146          * while enabling the session.
1147          */
1148         tgt->context_id = context_id;
1149         if (ofld_kcqe->completion_status) {
1150                 if (ofld_kcqe->completion_status ==
1151                                 FCOE_KCQE_COMPLETION_STATUS_CTX_ALLOC_FAILURE) {
1152                         printk(KERN_ERR PFX "unable to allocate FCoE context "
1153                                 "resources\n");
1154                         set_bit(BNX2FC_FLAG_CTX_ALLOC_FAILURE, &tgt->flags);
1155                 }
1156         } else {
1157                 /* FW offload request successfully completed */
1158                 set_bit(BNX2FC_FLAG_OFFLOADED, &tgt->flags);
1159         }
1160 ofld_cmpl_err:
1161         set_bit(BNX2FC_FLAG_OFLD_REQ_CMPL, &tgt->flags);
1162         wake_up_interruptible(&tgt->ofld_wait);
1163 }
1164
1165 /**
1166  * bnx2fc_process_enable_conn_cmpl - process FCoE session enable completion
1167  *
1168  * @hba:        adapter structure pointer
1169  * @ofld_kcqe:  connection offload kcqe pointer
1170  *
1171  * handle session enable completion, mark the rport as ready
1172  */
1173
1174 static void bnx2fc_process_enable_conn_cmpl(struct bnx2fc_hba *hba,
1175                                                 struct fcoe_kcqe *ofld_kcqe)
1176 {
1177         struct bnx2fc_rport             *tgt;
1178         struct bnx2fc_interface         *interface;
1179         u32                             conn_id;
1180         u32                             context_id;
1181
1182         context_id = ofld_kcqe->fcoe_conn_context_id;
1183         conn_id = ofld_kcqe->fcoe_conn_id;
1184         tgt = hba->tgt_ofld_list[conn_id];
1185         if (!tgt) {
1186                 printk(KERN_ERR PFX "ERROR:enbl_cmpl: No pending ofld req\n");
1187                 return;
1188         }
1189
1190         BNX2FC_TGT_DBG(tgt, "Enable compl - context_id = 0x%x\n",
1191                 ofld_kcqe->fcoe_conn_context_id);
1192
1193         /*
1194          * context_id should be the same for this target during offload
1195          * and enable
1196          */
1197         if (tgt->context_id != context_id) {
1198                 printk(KERN_ERR PFX "context id mis-match\n");
1199                 return;
1200         }
1201         interface = tgt->port->priv;
1202         if (hba != interface->hba) {
1203                 printk(KERN_ERR PFX "bnx2fc-enbl_cmpl: HBA mis-match\n");
1204                 goto enbl_cmpl_err;
1205         }
1206         if (!ofld_kcqe->completion_status)
1207                 /* enable successful - rport ready for issuing IOs */
1208                 set_bit(BNX2FC_FLAG_ENABLED, &tgt->flags);
1209
1210 enbl_cmpl_err:
1211         set_bit(BNX2FC_FLAG_OFLD_REQ_CMPL, &tgt->flags);
1212         wake_up_interruptible(&tgt->ofld_wait);
1213 }
1214
1215 static void bnx2fc_process_conn_disable_cmpl(struct bnx2fc_hba *hba,
1216                                         struct fcoe_kcqe *disable_kcqe)
1217 {
1218
1219         struct bnx2fc_rport             *tgt;
1220         u32                             conn_id;
1221
1222         conn_id = disable_kcqe->fcoe_conn_id;
1223         tgt = hba->tgt_ofld_list[conn_id];
1224         if (!tgt) {
1225                 printk(KERN_ERR PFX "ERROR: disable_cmpl: No disable req\n");
1226                 return;
1227         }
1228
1229         BNX2FC_TGT_DBG(tgt, PFX "disable_cmpl: conn_id %d\n", conn_id);
1230
1231         if (disable_kcqe->completion_status) {
1232                 printk(KERN_ERR PFX "Disable failed with cmpl status %d\n",
1233                         disable_kcqe->completion_status);
1234                 set_bit(BNX2FC_FLAG_DISABLE_FAILED, &tgt->flags);
1235                 set_bit(BNX2FC_FLAG_UPLD_REQ_COMPL, &tgt->flags);
1236                 wake_up_interruptible(&tgt->upld_wait);
1237         } else {
1238                 /* disable successful */
1239                 BNX2FC_TGT_DBG(tgt, "disable successful\n");
1240                 clear_bit(BNX2FC_FLAG_OFFLOADED, &tgt->flags);
1241                 clear_bit(BNX2FC_FLAG_ENABLED, &tgt->flags);
1242                 set_bit(BNX2FC_FLAG_DISABLED, &tgt->flags);
1243                 set_bit(BNX2FC_FLAG_UPLD_REQ_COMPL, &tgt->flags);
1244                 wake_up_interruptible(&tgt->upld_wait);
1245         }
1246 }
1247
1248 static void bnx2fc_process_conn_destroy_cmpl(struct bnx2fc_hba *hba,
1249                                         struct fcoe_kcqe *destroy_kcqe)
1250 {
1251         struct bnx2fc_rport             *tgt;
1252         u32                             conn_id;
1253
1254         conn_id = destroy_kcqe->fcoe_conn_id;
1255         tgt = hba->tgt_ofld_list[conn_id];
1256         if (!tgt) {
1257                 printk(KERN_ERR PFX "destroy_cmpl: No destroy req\n");
1258                 return;
1259         }
1260
1261         BNX2FC_TGT_DBG(tgt, "destroy_cmpl: conn_id %d\n", conn_id);
1262
1263         if (destroy_kcqe->completion_status) {
1264                 printk(KERN_ERR PFX "Destroy conn failed, cmpl status %d\n",
1265                         destroy_kcqe->completion_status);
1266                 return;
1267         } else {
1268                 /* destroy successful */
1269                 BNX2FC_TGT_DBG(tgt, "upload successful\n");
1270                 clear_bit(BNX2FC_FLAG_DISABLED, &tgt->flags);
1271                 set_bit(BNX2FC_FLAG_DESTROYED, &tgt->flags);
1272                 set_bit(BNX2FC_FLAG_UPLD_REQ_COMPL, &tgt->flags);
1273                 wake_up_interruptible(&tgt->upld_wait);
1274         }
1275 }
1276
1277 static void bnx2fc_init_failure(struct bnx2fc_hba *hba, u32 err_code)
1278 {
1279         switch (err_code) {
1280         case FCOE_KCQE_COMPLETION_STATUS_INVALID_OPCODE:
1281                 printk(KERN_ERR PFX "init_failure due to invalid opcode\n");
1282                 break;
1283
1284         case FCOE_KCQE_COMPLETION_STATUS_CTX_ALLOC_FAILURE:
1285                 printk(KERN_ERR PFX "init failed due to ctx alloc failure\n");
1286                 break;
1287
1288         case FCOE_KCQE_COMPLETION_STATUS_NIC_ERROR:
1289                 printk(KERN_ERR PFX "init_failure due to NIC error\n");
1290                 break;
1291         case FCOE_KCQE_COMPLETION_STATUS_ERROR:
1292                 printk(KERN_ERR PFX "init failure due to compl status err\n");
1293                 break;
1294         case FCOE_KCQE_COMPLETION_STATUS_WRONG_HSI_VERSION:
1295                 printk(KERN_ERR PFX "init failure due to HSI mismatch\n");
1296                 break;
1297         default:
1298                 printk(KERN_ERR PFX "Unknown Error code %d\n", err_code);
1299         }
1300 }
1301
1302 /**
1303  * bnx2fc_indicae_kcqe - process KCQE
1304  *
1305  * @hba:        adapter structure pointer
1306  * @kcqe:       kcqe pointer
1307  * @num_cqe:    Number of completion queue elements
1308  *
1309  * Generic KCQ event handler
1310  */
1311 void bnx2fc_indicate_kcqe(void *context, struct kcqe *kcq[],
1312                                         u32 num_cqe)
1313 {
1314         struct bnx2fc_hba *hba = (struct bnx2fc_hba *)context;
1315         int i = 0;
1316         struct fcoe_kcqe *kcqe = NULL;
1317
1318         while (i < num_cqe) {
1319                 kcqe = (struct fcoe_kcqe *) kcq[i++];
1320
1321                 switch (kcqe->op_code) {
1322                 case FCOE_KCQE_OPCODE_CQ_EVENT_NOTIFICATION:
1323                         bnx2fc_fastpath_notification(hba, kcqe);
1324                         break;
1325
1326                 case FCOE_KCQE_OPCODE_OFFLOAD_CONN:
1327                         bnx2fc_process_ofld_cmpl(hba, kcqe);
1328                         break;
1329
1330                 case FCOE_KCQE_OPCODE_ENABLE_CONN:
1331                         bnx2fc_process_enable_conn_cmpl(hba, kcqe);
1332                         break;
1333
1334                 case FCOE_KCQE_OPCODE_INIT_FUNC:
1335                         if (kcqe->completion_status !=
1336                                         FCOE_KCQE_COMPLETION_STATUS_SUCCESS) {
1337                                 bnx2fc_init_failure(hba,
1338                                                 kcqe->completion_status);
1339                         } else {
1340                                 set_bit(ADAPTER_STATE_UP, &hba->adapter_state);
1341                                 bnx2fc_get_link_state(hba);
1342                                 printk(KERN_INFO PFX "[%.2x]: FCOE_INIT passed\n",
1343                                         (u8)hba->pcidev->bus->number);
1344                         }
1345                         break;
1346
1347                 case FCOE_KCQE_OPCODE_DESTROY_FUNC:
1348                         if (kcqe->completion_status !=
1349                                         FCOE_KCQE_COMPLETION_STATUS_SUCCESS) {
1350
1351                                 printk(KERN_ERR PFX "DESTROY failed\n");
1352                         } else {
1353                                 printk(KERN_ERR PFX "DESTROY success\n");
1354                         }
1355                         set_bit(BNX2FC_FLAG_DESTROY_CMPL, &hba->flags);
1356                         wake_up_interruptible(&hba->destroy_wait);
1357                         break;
1358
1359                 case FCOE_KCQE_OPCODE_DISABLE_CONN:
1360                         bnx2fc_process_conn_disable_cmpl(hba, kcqe);
1361                         break;
1362
1363                 case FCOE_KCQE_OPCODE_DESTROY_CONN:
1364                         bnx2fc_process_conn_destroy_cmpl(hba, kcqe);
1365                         break;
1366
1367                 case FCOE_KCQE_OPCODE_STAT_FUNC:
1368                         if (kcqe->completion_status !=
1369                             FCOE_KCQE_COMPLETION_STATUS_SUCCESS)
1370                                 printk(KERN_ERR PFX "STAT failed\n");
1371                         complete(&hba->stat_req_done);
1372                         break;
1373
1374                 case FCOE_KCQE_OPCODE_FCOE_ERROR:
1375                         /* fall thru */
1376                 default:
1377                         printk(KERN_ERR PFX "unknown opcode 0x%x\n",
1378                                                                 kcqe->op_code);
1379                 }
1380         }
1381 }
1382
1383 void bnx2fc_add_2_sq(struct bnx2fc_rport *tgt, u16 xid)
1384 {
1385         struct fcoe_sqe *sqe;
1386
1387         sqe = &tgt->sq[tgt->sq_prod_idx];
1388
1389         /* Fill SQ WQE */
1390         sqe->wqe = xid << FCOE_SQE_TASK_ID_SHIFT;
1391         sqe->wqe |= tgt->sq_curr_toggle_bit << FCOE_SQE_TOGGLE_BIT_SHIFT;
1392
1393         /* Advance SQ Prod Idx */
1394         if (++tgt->sq_prod_idx == BNX2FC_SQ_WQES_MAX) {
1395                 tgt->sq_prod_idx = 0;
1396                 tgt->sq_curr_toggle_bit = 1 - tgt->sq_curr_toggle_bit;
1397         }
1398 }
1399
1400 void bnx2fc_ring_doorbell(struct bnx2fc_rport *tgt)
1401 {
1402         struct b577xx_doorbell_set_prod *sq_db = &tgt->sq_db;
1403         u32 msg;
1404
1405         wmb();
1406         sq_db->prod = tgt->sq_prod_idx |
1407                                 (tgt->sq_curr_toggle_bit << 15);
1408         msg = *((u32 *)sq_db);
1409         writel(cpu_to_le32(msg), tgt->ctx_base);
1410         mmiowb();
1411
1412 }
1413
1414 int bnx2fc_map_doorbell(struct bnx2fc_rport *tgt)
1415 {
1416         u32 context_id = tgt->context_id;
1417         struct fcoe_port *port = tgt->port;
1418         u32 reg_off;
1419         resource_size_t reg_base;
1420         struct bnx2fc_interface *interface = port->priv;
1421         struct bnx2fc_hba *hba = interface->hba;
1422
1423         reg_base = pci_resource_start(hba->pcidev,
1424                                         BNX2X_DOORBELL_PCI_BAR);
1425         reg_off = (1 << BNX2X_DB_SHIFT) * (context_id & 0x1FFFF);
1426         tgt->ctx_base = ioremap_nocache(reg_base + reg_off, 4);
1427         if (!tgt->ctx_base)
1428                 return -ENOMEM;
1429         return 0;
1430 }
1431
1432 char *bnx2fc_get_next_rqe(struct bnx2fc_rport *tgt, u8 num_items)
1433 {
1434         char *buf = (char *)tgt->rq + (tgt->rq_cons_idx * BNX2FC_RQ_BUF_SZ);
1435
1436         if (tgt->rq_cons_idx + num_items > BNX2FC_RQ_WQES_MAX)
1437                 return NULL;
1438
1439         tgt->rq_cons_idx += num_items;
1440
1441         if (tgt->rq_cons_idx >= BNX2FC_RQ_WQES_MAX)
1442                 tgt->rq_cons_idx -= BNX2FC_RQ_WQES_MAX;
1443
1444         return buf;
1445 }
1446
1447 void bnx2fc_return_rqe(struct bnx2fc_rport *tgt, u8 num_items)
1448 {
1449         /* return the rq buffer */
1450         u32 next_prod_idx = tgt->rq_prod_idx + num_items;
1451         if ((next_prod_idx & 0x7fff) == BNX2FC_RQ_WQES_MAX) {
1452                 /* Wrap around RQ */
1453                 next_prod_idx += 0x8000 - BNX2FC_RQ_WQES_MAX;
1454         }
1455         tgt->rq_prod_idx = next_prod_idx;
1456         tgt->conn_db->rq_prod = tgt->rq_prod_idx;
1457 }
1458
1459 void bnx2fc_init_seq_cleanup_task(struct bnx2fc_cmd *seq_clnp_req,
1460                                   struct fcoe_task_ctx_entry *task,
1461                                   struct bnx2fc_cmd *orig_io_req,
1462                                   u32 offset)
1463 {
1464         struct scsi_cmnd *sc_cmd = orig_io_req->sc_cmd;
1465         struct bnx2fc_rport *tgt = seq_clnp_req->tgt;
1466         struct bnx2fc_interface *interface = tgt->port->priv;
1467         struct fcoe_bd_ctx *bd = orig_io_req->bd_tbl->bd_tbl;
1468         struct fcoe_task_ctx_entry *orig_task;
1469         struct fcoe_task_ctx_entry *task_page;
1470         struct fcoe_ext_mul_sges_ctx *sgl;
1471         u8 task_type = FCOE_TASK_TYPE_SEQUENCE_CLEANUP;
1472         u8 orig_task_type;
1473         u16 orig_xid = orig_io_req->xid;
1474         u32 context_id = tgt->context_id;
1475         u64 phys_addr = (u64)orig_io_req->bd_tbl->bd_tbl_dma;
1476         u32 orig_offset = offset;
1477         int bd_count;
1478         int orig_task_idx, index;
1479         int i;
1480
1481         memset(task, 0, sizeof(struct fcoe_task_ctx_entry));
1482
1483         if (sc_cmd->sc_data_direction == DMA_TO_DEVICE)
1484                 orig_task_type = FCOE_TASK_TYPE_WRITE;
1485         else
1486                 orig_task_type = FCOE_TASK_TYPE_READ;
1487
1488         /* Tx flags */
1489         task->txwr_rxrd.const_ctx.tx_flags =
1490                                 FCOE_TASK_TX_STATE_SEQUENCE_CLEANUP <<
1491                                 FCOE_TCE_TX_WR_RX_RD_CONST_TX_STATE_SHIFT;
1492         /* init flags */
1493         task->txwr_rxrd.const_ctx.init_flags = task_type <<
1494                                 FCOE_TCE_TX_WR_RX_RD_CONST_TASK_TYPE_SHIFT;
1495         task->txwr_rxrd.const_ctx.init_flags |= FCOE_TASK_CLASS_TYPE_3 <<
1496                                 FCOE_TCE_TX_WR_RX_RD_CONST_CLASS_TYPE_SHIFT;
1497         task->rxwr_txrd.const_ctx.init_flags = context_id <<
1498                                 FCOE_TCE_RX_WR_TX_RD_CONST_CID_SHIFT;
1499         task->rxwr_txrd.const_ctx.init_flags = context_id <<
1500                                 FCOE_TCE_RX_WR_TX_RD_CONST_CID_SHIFT;
1501
1502         task->txwr_rxrd.union_ctx.cleanup.ctx.cleaned_task_id = orig_xid;
1503
1504         task->txwr_rxrd.union_ctx.cleanup.ctx.rolled_tx_seq_cnt = 0;
1505         task->txwr_rxrd.union_ctx.cleanup.ctx.rolled_tx_data_offset = offset;
1506
1507         bd_count = orig_io_req->bd_tbl->bd_valid;
1508
1509         /* obtain the appropriate bd entry from relative offset */
1510         for (i = 0; i < bd_count; i++) {
1511                 if (offset < bd[i].buf_len)
1512                         break;
1513                 offset -= bd[i].buf_len;
1514         }
1515         phys_addr += (i * sizeof(struct fcoe_bd_ctx));
1516
1517         if (orig_task_type == FCOE_TASK_TYPE_WRITE) {
1518                 task->txwr_only.sgl_ctx.sgl.mul_sgl.cur_sge_addr.lo =
1519                                 (u32)phys_addr;
1520                 task->txwr_only.sgl_ctx.sgl.mul_sgl.cur_sge_addr.hi =
1521                                 (u32)((u64)phys_addr >> 32);
1522                 task->txwr_only.sgl_ctx.sgl.mul_sgl.sgl_size =
1523                                 bd_count;
1524                 task->txwr_only.sgl_ctx.sgl.mul_sgl.cur_sge_off =
1525                                 offset; /* adjusted offset */
1526                 task->txwr_only.sgl_ctx.sgl.mul_sgl.cur_sge_idx = i;
1527         } else {
1528                 orig_task_idx = orig_xid / BNX2FC_TASKS_PER_PAGE;
1529                 index = orig_xid % BNX2FC_TASKS_PER_PAGE;
1530
1531                 task_page = (struct fcoe_task_ctx_entry *)
1532                              interface->hba->task_ctx[orig_task_idx];
1533                 orig_task = &(task_page[index]);
1534
1535                 /* Multiple SGEs were used for this IO */
1536                 sgl = &task->rxwr_only.union_ctx.read_info.sgl_ctx.sgl;
1537                 sgl->mul_sgl.cur_sge_addr.lo = (u32)phys_addr;
1538                 sgl->mul_sgl.cur_sge_addr.hi = (u32)((u64)phys_addr >> 32);
1539                 sgl->mul_sgl.sgl_size = bd_count;
1540                 sgl->mul_sgl.cur_sge_off = offset; /*adjusted offset */
1541                 sgl->mul_sgl.cur_sge_idx = i;
1542
1543                 memset(&task->rxwr_only.rx_seq_ctx, 0,
1544                        sizeof(struct fcoe_rx_seq_ctx));
1545                 task->rxwr_only.rx_seq_ctx.low_exp_ro = orig_offset;
1546                 task->rxwr_only.rx_seq_ctx.high_exp_ro = orig_offset;
1547         }
1548 }
1549 void bnx2fc_init_cleanup_task(struct bnx2fc_cmd *io_req,
1550                               struct fcoe_task_ctx_entry *task,
1551                               u16 orig_xid)
1552 {
1553         u8 task_type = FCOE_TASK_TYPE_EXCHANGE_CLEANUP;
1554         struct bnx2fc_rport *tgt = io_req->tgt;
1555         u32 context_id = tgt->context_id;
1556
1557         memset(task, 0, sizeof(struct fcoe_task_ctx_entry));
1558
1559         /* Tx Write Rx Read */
1560         /* init flags */
1561         task->txwr_rxrd.const_ctx.init_flags = task_type <<
1562                                 FCOE_TCE_TX_WR_RX_RD_CONST_TASK_TYPE_SHIFT;
1563         task->txwr_rxrd.const_ctx.init_flags |= FCOE_TASK_CLASS_TYPE_3 <<
1564                                 FCOE_TCE_TX_WR_RX_RD_CONST_CLASS_TYPE_SHIFT;
1565         if (tgt->dev_type == TYPE_TAPE)
1566                 task->txwr_rxrd.const_ctx.init_flags |=
1567                                 FCOE_TASK_DEV_TYPE_TAPE <<
1568                                 FCOE_TCE_TX_WR_RX_RD_CONST_DEV_TYPE_SHIFT;
1569         else
1570                 task->txwr_rxrd.const_ctx.init_flags |=
1571                                 FCOE_TASK_DEV_TYPE_DISK <<
1572                                 FCOE_TCE_TX_WR_RX_RD_CONST_DEV_TYPE_SHIFT;
1573         task->txwr_rxrd.union_ctx.cleanup.ctx.cleaned_task_id = orig_xid;
1574
1575         /* Tx flags */
1576         task->txwr_rxrd.const_ctx.tx_flags =
1577                                 FCOE_TASK_TX_STATE_EXCHANGE_CLEANUP <<
1578                                 FCOE_TCE_TX_WR_RX_RD_CONST_TX_STATE_SHIFT;
1579
1580         /* Rx Read Tx Write */
1581         task->rxwr_txrd.const_ctx.init_flags = context_id <<
1582                                 FCOE_TCE_RX_WR_TX_RD_CONST_CID_SHIFT;
1583         task->rxwr_txrd.var_ctx.rx_flags |= 1 <<
1584                                 FCOE_TCE_RX_WR_TX_RD_VAR_EXP_FIRST_FRAME_SHIFT;
1585 }
1586
1587 void bnx2fc_init_mp_task(struct bnx2fc_cmd *io_req,
1588                                 struct fcoe_task_ctx_entry *task)
1589 {
1590         struct bnx2fc_mp_req *mp_req = &(io_req->mp_req);
1591         struct bnx2fc_rport *tgt = io_req->tgt;
1592         struct fc_frame_header *fc_hdr;
1593         struct fcoe_ext_mul_sges_ctx *sgl;
1594         u8 task_type = 0;
1595         u64 *hdr;
1596         u64 temp_hdr[3];
1597         u32 context_id;
1598
1599
1600         /* Obtain task_type */
1601         if ((io_req->cmd_type == BNX2FC_TASK_MGMT_CMD) ||
1602             (io_req->cmd_type == BNX2FC_ELS)) {
1603                 task_type = FCOE_TASK_TYPE_MIDPATH;
1604         } else if (io_req->cmd_type == BNX2FC_ABTS) {
1605                 task_type = FCOE_TASK_TYPE_ABTS;
1606         }
1607
1608         memset(task, 0, sizeof(struct fcoe_task_ctx_entry));
1609
1610         /* Setup the task from io_req for easy reference */
1611         io_req->task = task;
1612
1613         BNX2FC_IO_DBG(io_req, "Init MP task for cmd_type = %d task_type = %d\n",
1614                 io_req->cmd_type, task_type);
1615
1616         /* Tx only */
1617         if ((task_type == FCOE_TASK_TYPE_MIDPATH) ||
1618             (task_type == FCOE_TASK_TYPE_UNSOLICITED)) {
1619                 task->txwr_only.sgl_ctx.sgl.mul_sgl.cur_sge_addr.lo =
1620                                 (u32)mp_req->mp_req_bd_dma;
1621                 task->txwr_only.sgl_ctx.sgl.mul_sgl.cur_sge_addr.hi =
1622                                 (u32)((u64)mp_req->mp_req_bd_dma >> 32);
1623                 task->txwr_only.sgl_ctx.sgl.mul_sgl.sgl_size = 1;
1624         }
1625
1626         /* Tx Write Rx Read */
1627         /* init flags */
1628         task->txwr_rxrd.const_ctx.init_flags = task_type <<
1629                                 FCOE_TCE_TX_WR_RX_RD_CONST_TASK_TYPE_SHIFT;
1630         if (tgt->dev_type == TYPE_TAPE)
1631                 task->txwr_rxrd.const_ctx.init_flags |=
1632                                 FCOE_TASK_DEV_TYPE_TAPE <<
1633                                 FCOE_TCE_TX_WR_RX_RD_CONST_DEV_TYPE_SHIFT;
1634         else
1635                 task->txwr_rxrd.const_ctx.init_flags |=
1636                                 FCOE_TASK_DEV_TYPE_DISK <<
1637                                 FCOE_TCE_TX_WR_RX_RD_CONST_DEV_TYPE_SHIFT;
1638         task->txwr_rxrd.const_ctx.init_flags |= FCOE_TASK_CLASS_TYPE_3 <<
1639                                 FCOE_TCE_TX_WR_RX_RD_CONST_CLASS_TYPE_SHIFT;
1640
1641         /* tx flags */
1642         task->txwr_rxrd.const_ctx.tx_flags = FCOE_TASK_TX_STATE_INIT <<
1643                                 FCOE_TCE_TX_WR_RX_RD_CONST_TX_STATE_SHIFT;
1644
1645         /* Rx Write Tx Read */
1646         task->rxwr_txrd.const_ctx.data_2_trns = io_req->data_xfer_len;
1647
1648         /* rx flags */
1649         task->rxwr_txrd.var_ctx.rx_flags |= 1 <<
1650                                 FCOE_TCE_RX_WR_TX_RD_VAR_EXP_FIRST_FRAME_SHIFT;
1651
1652         context_id = tgt->context_id;
1653         task->rxwr_txrd.const_ctx.init_flags = context_id <<
1654                                 FCOE_TCE_RX_WR_TX_RD_CONST_CID_SHIFT;
1655
1656         fc_hdr = &(mp_req->req_fc_hdr);
1657         if (task_type == FCOE_TASK_TYPE_MIDPATH) {
1658                 fc_hdr->fh_ox_id = cpu_to_be16(io_req->xid);
1659                 fc_hdr->fh_rx_id = htons(0xffff);
1660                 task->rxwr_txrd.var_ctx.rx_id = 0xffff;
1661         } else if (task_type == FCOE_TASK_TYPE_UNSOLICITED) {
1662                 fc_hdr->fh_rx_id = cpu_to_be16(io_req->xid);
1663         }
1664
1665         /* Fill FC Header into middle path buffer */
1666         hdr = (u64 *) &task->txwr_rxrd.union_ctx.tx_frame.fc_hdr;
1667         memcpy(temp_hdr, fc_hdr, sizeof(temp_hdr));
1668         hdr[0] = cpu_to_be64(temp_hdr[0]);
1669         hdr[1] = cpu_to_be64(temp_hdr[1]);
1670         hdr[2] = cpu_to_be64(temp_hdr[2]);
1671
1672         /* Rx Only */
1673         if (task_type == FCOE_TASK_TYPE_MIDPATH) {
1674                 sgl = &task->rxwr_only.union_ctx.read_info.sgl_ctx.sgl;
1675
1676                 sgl->mul_sgl.cur_sge_addr.lo = (u32)mp_req->mp_resp_bd_dma;
1677                 sgl->mul_sgl.cur_sge_addr.hi =
1678                                 (u32)((u64)mp_req->mp_resp_bd_dma >> 32);
1679                 sgl->mul_sgl.sgl_size = 1;
1680         }
1681 }
1682
1683 void bnx2fc_init_task(struct bnx2fc_cmd *io_req,
1684                              struct fcoe_task_ctx_entry *task)
1685 {
1686         u8 task_type;
1687         struct scsi_cmnd *sc_cmd = io_req->sc_cmd;
1688         struct io_bdt *bd_tbl = io_req->bd_tbl;
1689         struct bnx2fc_rport *tgt = io_req->tgt;
1690         struct fcoe_cached_sge_ctx *cached_sge;
1691         struct fcoe_ext_mul_sges_ctx *sgl;
1692         int dev_type = tgt->dev_type;
1693         u64 *fcp_cmnd;
1694         u64 tmp_fcp_cmnd[4];
1695         u32 context_id;
1696         int cnt, i;
1697         int bd_count;
1698
1699         memset(task, 0, sizeof(struct fcoe_task_ctx_entry));
1700
1701         /* Setup the task from io_req for easy reference */
1702         io_req->task = task;
1703
1704         if (sc_cmd->sc_data_direction == DMA_TO_DEVICE)
1705                 task_type = FCOE_TASK_TYPE_WRITE;
1706         else
1707                 task_type = FCOE_TASK_TYPE_READ;
1708
1709         /* Tx only */
1710         bd_count = bd_tbl->bd_valid;
1711         cached_sge = &task->rxwr_only.union_ctx.read_info.sgl_ctx.cached_sge;
1712         if (task_type == FCOE_TASK_TYPE_WRITE) {
1713                 if ((dev_type == TYPE_DISK) && (bd_count == 1)) {
1714                         struct fcoe_bd_ctx *fcoe_bd_tbl = bd_tbl->bd_tbl;
1715
1716                         task->txwr_only.sgl_ctx.cached_sge.cur_buf_addr.lo =
1717                         cached_sge->cur_buf_addr.lo =
1718                                         fcoe_bd_tbl->buf_addr_lo;
1719                         task->txwr_only.sgl_ctx.cached_sge.cur_buf_addr.hi =
1720                         cached_sge->cur_buf_addr.hi =
1721                                         fcoe_bd_tbl->buf_addr_hi;
1722                         task->txwr_only.sgl_ctx.cached_sge.cur_buf_rem =
1723                         cached_sge->cur_buf_rem =
1724                                         fcoe_bd_tbl->buf_len;
1725
1726                         task->txwr_rxrd.const_ctx.init_flags |= 1 <<
1727                                 FCOE_TCE_TX_WR_RX_RD_CONST_CACHED_SGE_SHIFT;
1728                 } else {
1729                         task->txwr_only.sgl_ctx.sgl.mul_sgl.cur_sge_addr.lo =
1730                                         (u32)bd_tbl->bd_tbl_dma;
1731                         task->txwr_only.sgl_ctx.sgl.mul_sgl.cur_sge_addr.hi =
1732                                         (u32)((u64)bd_tbl->bd_tbl_dma >> 32);
1733                         task->txwr_only.sgl_ctx.sgl.mul_sgl.sgl_size =
1734                                         bd_tbl->bd_valid;
1735                 }
1736         }
1737
1738         /*Tx Write Rx Read */
1739         /* Init state to NORMAL */
1740         task->txwr_rxrd.const_ctx.init_flags |= task_type <<
1741                                 FCOE_TCE_TX_WR_RX_RD_CONST_TASK_TYPE_SHIFT;
1742         if (dev_type == TYPE_TAPE) {
1743                 task->txwr_rxrd.const_ctx.init_flags |=
1744                                 FCOE_TASK_DEV_TYPE_TAPE <<
1745                                 FCOE_TCE_TX_WR_RX_RD_CONST_DEV_TYPE_SHIFT;
1746                 io_req->rec_retry = 0;
1747                 io_req->rec_retry = 0;
1748         } else
1749                 task->txwr_rxrd.const_ctx.init_flags |=
1750                                 FCOE_TASK_DEV_TYPE_DISK <<
1751                                 FCOE_TCE_TX_WR_RX_RD_CONST_DEV_TYPE_SHIFT;
1752         task->txwr_rxrd.const_ctx.init_flags |= FCOE_TASK_CLASS_TYPE_3 <<
1753                                 FCOE_TCE_TX_WR_RX_RD_CONST_CLASS_TYPE_SHIFT;
1754         /* tx flags */
1755         task->txwr_rxrd.const_ctx.tx_flags = FCOE_TASK_TX_STATE_NORMAL <<
1756                                 FCOE_TCE_TX_WR_RX_RD_CONST_TX_STATE_SHIFT;
1757
1758         /* Set initial seq counter */
1759         task->txwr_rxrd.union_ctx.tx_seq.ctx.seq_cnt = 1;
1760
1761         /* Fill FCP_CMND IU */
1762         fcp_cmnd = (u64 *)
1763                     task->txwr_rxrd.union_ctx.fcp_cmd.opaque;
1764         bnx2fc_build_fcp_cmnd(io_req, (struct fcp_cmnd *)&tmp_fcp_cmnd);
1765
1766         /* swap fcp_cmnd */
1767         cnt = sizeof(struct fcp_cmnd) / sizeof(u64);
1768
1769         for (i = 0; i < cnt; i++) {
1770                 *fcp_cmnd = cpu_to_be64(tmp_fcp_cmnd[i]);
1771                 fcp_cmnd++;
1772         }
1773
1774         /* Rx Write Tx Read */
1775         task->rxwr_txrd.const_ctx.data_2_trns = io_req->data_xfer_len;
1776
1777         context_id = tgt->context_id;
1778         task->rxwr_txrd.const_ctx.init_flags = context_id <<
1779                                 FCOE_TCE_RX_WR_TX_RD_CONST_CID_SHIFT;
1780
1781         /* rx flags */
1782         /* Set state to "waiting for the first packet" */
1783         task->rxwr_txrd.var_ctx.rx_flags |= 1 <<
1784                                 FCOE_TCE_RX_WR_TX_RD_VAR_EXP_FIRST_FRAME_SHIFT;
1785
1786         task->rxwr_txrd.var_ctx.rx_id = 0xffff;
1787
1788         /* Rx Only */
1789         if (task_type != FCOE_TASK_TYPE_READ)
1790                 return;
1791
1792         sgl = &task->rxwr_only.union_ctx.read_info.sgl_ctx.sgl;
1793         bd_count = bd_tbl->bd_valid;
1794
1795         if (dev_type == TYPE_DISK) {
1796                 if (bd_count == 1) {
1797
1798                         struct fcoe_bd_ctx *fcoe_bd_tbl = bd_tbl->bd_tbl;
1799
1800                         cached_sge->cur_buf_addr.lo = fcoe_bd_tbl->buf_addr_lo;
1801                         cached_sge->cur_buf_addr.hi = fcoe_bd_tbl->buf_addr_hi;
1802                         cached_sge->cur_buf_rem = fcoe_bd_tbl->buf_len;
1803                         task->txwr_rxrd.const_ctx.init_flags |= 1 <<
1804                                 FCOE_TCE_TX_WR_RX_RD_CONST_CACHED_SGE_SHIFT;
1805                 } else if (bd_count == 2) {
1806                         struct fcoe_bd_ctx *fcoe_bd_tbl = bd_tbl->bd_tbl;
1807
1808                         cached_sge->cur_buf_addr.lo = fcoe_bd_tbl->buf_addr_lo;
1809                         cached_sge->cur_buf_addr.hi = fcoe_bd_tbl->buf_addr_hi;
1810                         cached_sge->cur_buf_rem = fcoe_bd_tbl->buf_len;
1811
1812                         fcoe_bd_tbl++;
1813                         cached_sge->second_buf_addr.lo =
1814                                                  fcoe_bd_tbl->buf_addr_lo;
1815                         cached_sge->second_buf_addr.hi =
1816                                                 fcoe_bd_tbl->buf_addr_hi;
1817                         cached_sge->second_buf_rem = fcoe_bd_tbl->buf_len;
1818                         task->txwr_rxrd.const_ctx.init_flags |= 1 <<
1819                                 FCOE_TCE_TX_WR_RX_RD_CONST_CACHED_SGE_SHIFT;
1820                 } else {
1821
1822                         sgl->mul_sgl.cur_sge_addr.lo = (u32)bd_tbl->bd_tbl_dma;
1823                         sgl->mul_sgl.cur_sge_addr.hi =
1824                                         (u32)((u64)bd_tbl->bd_tbl_dma >> 32);
1825                         sgl->mul_sgl.sgl_size = bd_count;
1826                 }
1827         } else {
1828                 sgl->mul_sgl.cur_sge_addr.lo = (u32)bd_tbl->bd_tbl_dma;
1829                 sgl->mul_sgl.cur_sge_addr.hi =
1830                                 (u32)((u64)bd_tbl->bd_tbl_dma >> 32);
1831                 sgl->mul_sgl.sgl_size = bd_count;
1832         }
1833 }
1834
1835 /**
1836  * bnx2fc_setup_task_ctx - allocate and map task context
1837  *
1838  * @hba:        pointer to adapter structure
1839  *
1840  * allocate memory for task context, and associated BD table to be used
1841  * by firmware
1842  *
1843  */
1844 int bnx2fc_setup_task_ctx(struct bnx2fc_hba *hba)
1845 {
1846         int rc = 0;
1847         struct regpair *task_ctx_bdt;
1848         dma_addr_t addr;
1849         int task_ctx_arr_sz;
1850         int i;
1851
1852         /*
1853          * Allocate task context bd table. A page size of bd table
1854          * can map 256 buffers. Each buffer contains 32 task context
1855          * entries. Hence the limit with one page is 8192 task context
1856          * entries.
1857          */
1858         hba->task_ctx_bd_tbl = dma_alloc_coherent(&hba->pcidev->dev,
1859                                                   PAGE_SIZE,
1860                                                   &hba->task_ctx_bd_dma,
1861                                                   GFP_KERNEL);
1862         if (!hba->task_ctx_bd_tbl) {
1863                 printk(KERN_ERR PFX "unable to allocate task context BDT\n");
1864                 rc = -1;
1865                 goto out;
1866         }
1867         memset(hba->task_ctx_bd_tbl, 0, PAGE_SIZE);
1868
1869         /*
1870          * Allocate task_ctx which is an array of pointers pointing to
1871          * a page containing 32 task contexts
1872          */
1873         task_ctx_arr_sz = (hba->max_tasks / BNX2FC_TASKS_PER_PAGE);
1874         hba->task_ctx = kzalloc((task_ctx_arr_sz * sizeof(void *)),
1875                                  GFP_KERNEL);
1876         if (!hba->task_ctx) {
1877                 printk(KERN_ERR PFX "unable to allocate task context array\n");
1878                 rc = -1;
1879                 goto out1;
1880         }
1881
1882         /*
1883          * Allocate task_ctx_dma which is an array of dma addresses
1884          */
1885         hba->task_ctx_dma = kmalloc((task_ctx_arr_sz *
1886                                         sizeof(dma_addr_t)), GFP_KERNEL);
1887         if (!hba->task_ctx_dma) {
1888                 printk(KERN_ERR PFX "unable to alloc context mapping array\n");
1889                 rc = -1;
1890                 goto out2;
1891         }
1892
1893         task_ctx_bdt = (struct regpair *)hba->task_ctx_bd_tbl;
1894         for (i = 0; i < task_ctx_arr_sz; i++) {
1895
1896                 hba->task_ctx[i] = dma_alloc_coherent(&hba->pcidev->dev,
1897                                                       PAGE_SIZE,
1898                                                       &hba->task_ctx_dma[i],
1899                                                       GFP_KERNEL);
1900                 if (!hba->task_ctx[i]) {
1901                         printk(KERN_ERR PFX "unable to alloc task context\n");
1902                         rc = -1;
1903                         goto out3;
1904                 }
1905                 memset(hba->task_ctx[i], 0, PAGE_SIZE);
1906                 addr = (u64)hba->task_ctx_dma[i];
1907                 task_ctx_bdt->hi = cpu_to_le32((u64)addr >> 32);
1908                 task_ctx_bdt->lo = cpu_to_le32((u32)addr);
1909                 task_ctx_bdt++;
1910         }
1911         return 0;
1912
1913 out3:
1914         for (i = 0; i < task_ctx_arr_sz; i++) {
1915                 if (hba->task_ctx[i]) {
1916
1917                         dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE,
1918                                 hba->task_ctx[i], hba->task_ctx_dma[i]);
1919                         hba->task_ctx[i] = NULL;
1920                 }
1921         }
1922
1923         kfree(hba->task_ctx_dma);
1924         hba->task_ctx_dma = NULL;
1925 out2:
1926         kfree(hba->task_ctx);
1927         hba->task_ctx = NULL;
1928 out1:
1929         dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE,
1930                         hba->task_ctx_bd_tbl, hba->task_ctx_bd_dma);
1931         hba->task_ctx_bd_tbl = NULL;
1932 out:
1933         return rc;
1934 }
1935
1936 void bnx2fc_free_task_ctx(struct bnx2fc_hba *hba)
1937 {
1938         int task_ctx_arr_sz;
1939         int i;
1940
1941         if (hba->task_ctx_bd_tbl) {
1942                 dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE,
1943                                     hba->task_ctx_bd_tbl,
1944                                     hba->task_ctx_bd_dma);
1945                 hba->task_ctx_bd_tbl = NULL;
1946         }
1947
1948         task_ctx_arr_sz = (hba->max_tasks / BNX2FC_TASKS_PER_PAGE);
1949         if (hba->task_ctx) {
1950                 for (i = 0; i < task_ctx_arr_sz; i++) {
1951                         if (hba->task_ctx[i]) {
1952                                 dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE,
1953                                                     hba->task_ctx[i],
1954                                                     hba->task_ctx_dma[i]);
1955                                 hba->task_ctx[i] = NULL;
1956                         }
1957                 }
1958                 kfree(hba->task_ctx);
1959                 hba->task_ctx = NULL;
1960         }
1961
1962         kfree(hba->task_ctx_dma);
1963         hba->task_ctx_dma = NULL;
1964 }
1965
1966 static void bnx2fc_free_hash_table(struct bnx2fc_hba *hba)
1967 {
1968         int i;
1969         int segment_count;
1970         u32 *pbl;
1971
1972         if (hba->hash_tbl_segments) {
1973
1974                 pbl = hba->hash_tbl_pbl;
1975                 if (pbl) {
1976                         segment_count = hba->hash_tbl_segment_count;
1977                         for (i = 0; i < segment_count; ++i) {
1978                                 dma_addr_t dma_address;
1979
1980                                 dma_address = le32_to_cpu(*pbl);
1981                                 ++pbl;
1982                                 dma_address += ((u64)le32_to_cpu(*pbl)) << 32;
1983                                 ++pbl;
1984                                 dma_free_coherent(&hba->pcidev->dev,
1985                                                   BNX2FC_HASH_TBL_CHUNK_SIZE,
1986                                                   hba->hash_tbl_segments[i],
1987                                                   dma_address);
1988                         }
1989                 }
1990
1991                 kfree(hba->hash_tbl_segments);
1992                 hba->hash_tbl_segments = NULL;
1993         }
1994
1995         if (hba->hash_tbl_pbl) {
1996                 dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE,
1997                                     hba->hash_tbl_pbl,
1998                                     hba->hash_tbl_pbl_dma);
1999                 hba->hash_tbl_pbl = NULL;
2000         }
2001 }
2002
2003 static int bnx2fc_allocate_hash_table(struct bnx2fc_hba *hba)
2004 {
2005         int i;
2006         int hash_table_size;
2007         int segment_count;
2008         int segment_array_size;
2009         int dma_segment_array_size;
2010         dma_addr_t *dma_segment_array;
2011         u32 *pbl;
2012
2013         hash_table_size = BNX2FC_NUM_MAX_SESS * BNX2FC_MAX_ROWS_IN_HASH_TBL *
2014                 sizeof(struct fcoe_hash_table_entry);
2015
2016         segment_count = hash_table_size + BNX2FC_HASH_TBL_CHUNK_SIZE - 1;
2017         segment_count /= BNX2FC_HASH_TBL_CHUNK_SIZE;
2018         hba->hash_tbl_segment_count = segment_count;
2019
2020         segment_array_size = segment_count * sizeof(*hba->hash_tbl_segments);
2021         hba->hash_tbl_segments = kzalloc(segment_array_size, GFP_KERNEL);
2022         if (!hba->hash_tbl_segments) {
2023                 printk(KERN_ERR PFX "hash table pointers alloc failed\n");
2024                 return -ENOMEM;
2025         }
2026         dma_segment_array_size = segment_count * sizeof(*dma_segment_array);
2027         dma_segment_array = kzalloc(dma_segment_array_size, GFP_KERNEL);
2028         if (!dma_segment_array) {
2029                 printk(KERN_ERR PFX "hash table pointers (dma) alloc failed\n");
2030                 goto cleanup_ht;
2031         }
2032
2033         for (i = 0; i < segment_count; ++i) {
2034                 hba->hash_tbl_segments[i] =
2035                         dma_alloc_coherent(&hba->pcidev->dev,
2036                                            BNX2FC_HASH_TBL_CHUNK_SIZE,
2037                                            &dma_segment_array[i],
2038                                            GFP_KERNEL);
2039                 if (!hba->hash_tbl_segments[i]) {
2040                         printk(KERN_ERR PFX "hash segment alloc failed\n");
2041                         goto cleanup_dma;
2042                 }
2043                 memset(hba->hash_tbl_segments[i], 0,
2044                        BNX2FC_HASH_TBL_CHUNK_SIZE);
2045         }
2046
2047         hba->hash_tbl_pbl = dma_alloc_coherent(&hba->pcidev->dev,
2048                                                PAGE_SIZE,
2049                                                &hba->hash_tbl_pbl_dma,
2050                                                GFP_KERNEL);
2051         if (!hba->hash_tbl_pbl) {
2052                 printk(KERN_ERR PFX "hash table pbl alloc failed\n");
2053                 goto cleanup_dma;
2054         }
2055         memset(hba->hash_tbl_pbl, 0, PAGE_SIZE);
2056
2057         pbl = hba->hash_tbl_pbl;
2058         for (i = 0; i < segment_count; ++i) {
2059                 u64 paddr = dma_segment_array[i];
2060                 *pbl = cpu_to_le32((u32) paddr);
2061                 ++pbl;
2062                 *pbl = cpu_to_le32((u32) (paddr >> 32));
2063                 ++pbl;
2064         }
2065         pbl = hba->hash_tbl_pbl;
2066         i = 0;
2067         while (*pbl && *(pbl + 1)) {
2068                 u32 lo;
2069                 u32 hi;
2070                 lo = *pbl;
2071                 ++pbl;
2072                 hi = *pbl;
2073                 ++pbl;
2074                 ++i;
2075         }
2076         kfree(dma_segment_array);
2077         return 0;
2078
2079 cleanup_dma:
2080         for (i = 0; i < segment_count; ++i) {
2081                 if (hba->hash_tbl_segments[i])
2082                         dma_free_coherent(&hba->pcidev->dev,
2083                                             BNX2FC_HASH_TBL_CHUNK_SIZE,
2084                                             hba->hash_tbl_segments[i],
2085                                             dma_segment_array[i]);
2086         }
2087
2088         kfree(dma_segment_array);
2089
2090 cleanup_ht:
2091         kfree(hba->hash_tbl_segments);
2092         hba->hash_tbl_segments = NULL;
2093         return -ENOMEM;
2094 }
2095
2096 /**
2097  * bnx2fc_setup_fw_resc - Allocate and map hash table and dummy buffer
2098  *
2099  * @hba:        Pointer to adapter structure
2100  *
2101  */
2102 int bnx2fc_setup_fw_resc(struct bnx2fc_hba *hba)
2103 {
2104         u64 addr;
2105         u32 mem_size;
2106         int i;
2107
2108         if (bnx2fc_allocate_hash_table(hba))
2109                 return -ENOMEM;
2110
2111         mem_size = BNX2FC_NUM_MAX_SESS * sizeof(struct regpair);
2112         hba->t2_hash_tbl_ptr = dma_alloc_coherent(&hba->pcidev->dev, mem_size,
2113                                                   &hba->t2_hash_tbl_ptr_dma,
2114                                                   GFP_KERNEL);
2115         if (!hba->t2_hash_tbl_ptr) {
2116                 printk(KERN_ERR PFX "unable to allocate t2 hash table ptr\n");
2117                 bnx2fc_free_fw_resc(hba);
2118                 return -ENOMEM;
2119         }
2120         memset(hba->t2_hash_tbl_ptr, 0x00, mem_size);
2121
2122         mem_size = BNX2FC_NUM_MAX_SESS *
2123                                 sizeof(struct fcoe_t2_hash_table_entry);
2124         hba->t2_hash_tbl = dma_alloc_coherent(&hba->pcidev->dev, mem_size,
2125                                               &hba->t2_hash_tbl_dma,
2126                                               GFP_KERNEL);
2127         if (!hba->t2_hash_tbl) {
2128                 printk(KERN_ERR PFX "unable to allocate t2 hash table\n");
2129                 bnx2fc_free_fw_resc(hba);
2130                 return -ENOMEM;
2131         }
2132         memset(hba->t2_hash_tbl, 0x00, mem_size);
2133         for (i = 0; i < BNX2FC_NUM_MAX_SESS; i++) {
2134                 addr = (unsigned long) hba->t2_hash_tbl_dma +
2135                          ((i+1) * sizeof(struct fcoe_t2_hash_table_entry));
2136                 hba->t2_hash_tbl[i].next.lo = addr & 0xffffffff;
2137                 hba->t2_hash_tbl[i].next.hi = addr >> 32;
2138         }
2139
2140         hba->dummy_buffer = dma_alloc_coherent(&hba->pcidev->dev,
2141                                                PAGE_SIZE, &hba->dummy_buf_dma,
2142                                                GFP_KERNEL);
2143         if (!hba->dummy_buffer) {
2144                 printk(KERN_ERR PFX "unable to alloc MP Dummy Buffer\n");
2145                 bnx2fc_free_fw_resc(hba);
2146                 return -ENOMEM;
2147         }
2148
2149         hba->stats_buffer = dma_alloc_coherent(&hba->pcidev->dev,
2150                                                PAGE_SIZE,
2151                                                &hba->stats_buf_dma,
2152                                                GFP_KERNEL);
2153         if (!hba->stats_buffer) {
2154                 printk(KERN_ERR PFX "unable to alloc Stats Buffer\n");
2155                 bnx2fc_free_fw_resc(hba);
2156                 return -ENOMEM;
2157         }
2158         memset(hba->stats_buffer, 0x00, PAGE_SIZE);
2159
2160         return 0;
2161 }
2162
2163 void bnx2fc_free_fw_resc(struct bnx2fc_hba *hba)
2164 {
2165         u32 mem_size;
2166
2167         if (hba->stats_buffer) {
2168                 dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE,
2169                                   hba->stats_buffer, hba->stats_buf_dma);
2170                 hba->stats_buffer = NULL;
2171         }
2172
2173         if (hba->dummy_buffer) {
2174                 dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE,
2175                                   hba->dummy_buffer, hba->dummy_buf_dma);
2176                 hba->dummy_buffer = NULL;
2177         }
2178
2179         if (hba->t2_hash_tbl_ptr) {
2180                 mem_size = BNX2FC_NUM_MAX_SESS * sizeof(struct regpair);
2181                 dma_free_coherent(&hba->pcidev->dev, mem_size,
2182                                     hba->t2_hash_tbl_ptr,
2183                                     hba->t2_hash_tbl_ptr_dma);
2184                 hba->t2_hash_tbl_ptr = NULL;
2185         }
2186
2187         if (hba->t2_hash_tbl) {
2188                 mem_size = BNX2FC_NUM_MAX_SESS *
2189                             sizeof(struct fcoe_t2_hash_table_entry);
2190                 dma_free_coherent(&hba->pcidev->dev, mem_size,
2191                                     hba->t2_hash_tbl, hba->t2_hash_tbl_dma);
2192                 hba->t2_hash_tbl = NULL;
2193         }
2194         bnx2fc_free_hash_table(hba);
2195 }