Add the rt linux 4.1.3-rt3 as base
[kvmfornfv.git] / kernel / drivers / scsi / bfa / bfa_svc.c
1 /*
2  * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
3  * All rights reserved
4  * www.brocade.com
5  *
6  * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7  *
8  * This program is free software; you can redistribute it and/or modify it
9  * under the terms of the GNU General Public License (GPL) Version 2 as
10  * published by the Free Software Foundation
11  *
12  * This program is distributed in the hope that it will be useful, but
13  * WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15  * General Public License for more details.
16  */
17
18 #include "bfad_drv.h"
19 #include "bfad_im.h"
20 #include "bfa_plog.h"
21 #include "bfa_cs.h"
22 #include "bfa_modules.h"
23
24 BFA_TRC_FILE(HAL, FCXP);
25 BFA_MODULE(fcdiag);
26 BFA_MODULE(fcxp);
27 BFA_MODULE(sgpg);
28 BFA_MODULE(lps);
29 BFA_MODULE(fcport);
30 BFA_MODULE(rport);
31 BFA_MODULE(uf);
32
33 /*
34  * LPS related definitions
35  */
36 #define BFA_LPS_MIN_LPORTS      (1)
37 #define BFA_LPS_MAX_LPORTS      (256)
38
39 /*
40  * Maximum Vports supported per physical port or vf.
41  */
42 #define BFA_LPS_MAX_VPORTS_SUPP_CB  255
43 #define BFA_LPS_MAX_VPORTS_SUPP_CT  190
44
45
46 /*
47  * FC PORT related definitions
48  */
49 /*
50  * The port is considered disabled if corresponding physical port or IOC are
51  * disabled explicitly
52  */
53 #define BFA_PORT_IS_DISABLED(bfa) \
54         ((bfa_fcport_is_disabled(bfa) == BFA_TRUE) || \
55         (bfa_ioc_is_disabled(&bfa->ioc) == BFA_TRUE))
56
57 /*
58  * BFA port state machine events
59  */
60 enum bfa_fcport_sm_event {
61         BFA_FCPORT_SM_START     = 1,    /*  start port state machine    */
62         BFA_FCPORT_SM_STOP      = 2,    /*  stop port state machine     */
63         BFA_FCPORT_SM_ENABLE    = 3,    /*  enable port         */
64         BFA_FCPORT_SM_DISABLE   = 4,    /*  disable port state machine */
65         BFA_FCPORT_SM_FWRSP     = 5,    /*  firmware enable/disable rsp */
66         BFA_FCPORT_SM_LINKUP    = 6,    /*  firmware linkup event       */
67         BFA_FCPORT_SM_LINKDOWN  = 7,    /*  firmware linkup down        */
68         BFA_FCPORT_SM_QRESUME   = 8,    /*  CQ space available  */
69         BFA_FCPORT_SM_HWFAIL    = 9,    /*  IOC h/w failure             */
70         BFA_FCPORT_SM_DPORTENABLE = 10, /*  enable dport      */
71         BFA_FCPORT_SM_DPORTDISABLE = 11,/*  disable dport     */
72         BFA_FCPORT_SM_FAA_MISCONFIG = 12,       /* FAA misconfiguratin */
73         BFA_FCPORT_SM_DDPORTENABLE  = 13,       /* enable ddport        */
74         BFA_FCPORT_SM_DDPORTDISABLE = 14,       /* disable ddport       */
75 };
76
77 /*
78  * BFA port link notification state machine events
79  */
80
81 enum bfa_fcport_ln_sm_event {
82         BFA_FCPORT_LN_SM_LINKUP         = 1,    /*  linkup event        */
83         BFA_FCPORT_LN_SM_LINKDOWN       = 2,    /*  linkdown event      */
84         BFA_FCPORT_LN_SM_NOTIFICATION   = 3     /*  done notification   */
85 };
86
87 /*
88  * RPORT related definitions
89  */
90 #define bfa_rport_offline_cb(__rp) do {                                 \
91         if ((__rp)->bfa->fcs)                                           \
92                 bfa_cb_rport_offline((__rp)->rport_drv);      \
93         else {                                                          \
94                 bfa_cb_queue((__rp)->bfa, &(__rp)->hcb_qe,              \
95                                 __bfa_cb_rport_offline, (__rp));      \
96         }                                                               \
97 } while (0)
98
99 #define bfa_rport_online_cb(__rp) do {                                  \
100         if ((__rp)->bfa->fcs)                                           \
101                 bfa_cb_rport_online((__rp)->rport_drv);      \
102         else {                                                          \
103                 bfa_cb_queue((__rp)->bfa, &(__rp)->hcb_qe,              \
104                                   __bfa_cb_rport_online, (__rp));      \
105                 }                                                       \
106 } while (0)
107
108 /*
109  * forward declarations FCXP related functions
110  */
111 static void     __bfa_fcxp_send_cbfn(void *cbarg, bfa_boolean_t complete);
112 static void     hal_fcxp_rx_plog(struct bfa_s *bfa, struct bfa_fcxp_s *fcxp,
113                                 struct bfi_fcxp_send_rsp_s *fcxp_rsp);
114 static void     hal_fcxp_tx_plog(struct bfa_s *bfa, u32 reqlen,
115                                 struct bfa_fcxp_s *fcxp, struct fchs_s *fchs);
116 static void     bfa_fcxp_qresume(void *cbarg);
117 static void     bfa_fcxp_queue(struct bfa_fcxp_s *fcxp,
118                                 struct bfi_fcxp_send_req_s *send_req);
119
120 /*
121  * forward declarations for LPS functions
122  */
123 static void bfa_lps_meminfo(struct bfa_iocfc_cfg_s *cfg,
124                 struct bfa_meminfo_s *minfo, struct bfa_s *bfa);
125 static void bfa_lps_attach(struct bfa_s *bfa, void *bfad,
126                                 struct bfa_iocfc_cfg_s *cfg,
127                                 struct bfa_pcidev_s *pcidev);
128 static void bfa_lps_detach(struct bfa_s *bfa);
129 static void bfa_lps_start(struct bfa_s *bfa);
130 static void bfa_lps_stop(struct bfa_s *bfa);
131 static void bfa_lps_iocdisable(struct bfa_s *bfa);
132 static void bfa_lps_login_rsp(struct bfa_s *bfa,
133                                 struct bfi_lps_login_rsp_s *rsp);
134 static void bfa_lps_no_res(struct bfa_lps_s *first_lps, u8 count);
135 static void bfa_lps_logout_rsp(struct bfa_s *bfa,
136                                 struct bfi_lps_logout_rsp_s *rsp);
137 static void bfa_lps_reqq_resume(void *lps_arg);
138 static void bfa_lps_free(struct bfa_lps_s *lps);
139 static void bfa_lps_send_login(struct bfa_lps_s *lps);
140 static void bfa_lps_send_logout(struct bfa_lps_s *lps);
141 static void bfa_lps_send_set_n2n_pid(struct bfa_lps_s *lps);
142 static void bfa_lps_login_comp(struct bfa_lps_s *lps);
143 static void bfa_lps_logout_comp(struct bfa_lps_s *lps);
144 static void bfa_lps_cvl_event(struct bfa_lps_s *lps);
145
146 /*
147  * forward declaration for LPS state machine
148  */
149 static void bfa_lps_sm_init(struct bfa_lps_s *lps, enum bfa_lps_event event);
150 static void bfa_lps_sm_login(struct bfa_lps_s *lps, enum bfa_lps_event event);
151 static void bfa_lps_sm_loginwait(struct bfa_lps_s *lps, enum bfa_lps_event
152                                         event);
153 static void bfa_lps_sm_online(struct bfa_lps_s *lps, enum bfa_lps_event event);
154 static void bfa_lps_sm_online_n2n_pid_wait(struct bfa_lps_s *lps,
155                                         enum bfa_lps_event event);
156 static void bfa_lps_sm_logout(struct bfa_lps_s *lps, enum bfa_lps_event event);
157 static void bfa_lps_sm_logowait(struct bfa_lps_s *lps, enum bfa_lps_event
158                                         event);
159
160 /*
161  * forward declaration for FC Port functions
162  */
163 static bfa_boolean_t bfa_fcport_send_enable(struct bfa_fcport_s *fcport);
164 static bfa_boolean_t bfa_fcport_send_disable(struct bfa_fcport_s *fcport);
165 static void bfa_fcport_update_linkinfo(struct bfa_fcport_s *fcport);
166 static void bfa_fcport_reset_linkinfo(struct bfa_fcport_s *fcport);
167 static void bfa_fcport_set_wwns(struct bfa_fcport_s *fcport);
168 static void __bfa_cb_fcport_event(void *cbarg, bfa_boolean_t complete);
169 static void bfa_fcport_scn(struct bfa_fcport_s *fcport,
170                         enum bfa_port_linkstate event, bfa_boolean_t trunk);
171 static void bfa_fcport_queue_cb(struct bfa_fcport_ln_s *ln,
172                                 enum bfa_port_linkstate event);
173 static void __bfa_cb_fcport_stats_clr(void *cbarg, bfa_boolean_t complete);
174 static void bfa_fcport_stats_get_timeout(void *cbarg);
175 static void bfa_fcport_stats_clr_timeout(void *cbarg);
176 static void bfa_trunk_iocdisable(struct bfa_s *bfa);
177
178 /*
179  * forward declaration for FC PORT state machine
180  */
181 static void     bfa_fcport_sm_uninit(struct bfa_fcport_s *fcport,
182                                         enum bfa_fcport_sm_event event);
183 static void     bfa_fcport_sm_enabling_qwait(struct bfa_fcport_s *fcport,
184                                         enum bfa_fcport_sm_event event);
185 static void     bfa_fcport_sm_enabling(struct bfa_fcport_s *fcport,
186                                         enum bfa_fcport_sm_event event);
187 static void     bfa_fcport_sm_linkdown(struct bfa_fcport_s *fcport,
188                                         enum bfa_fcport_sm_event event);
189 static void     bfa_fcport_sm_linkup(struct bfa_fcport_s *fcport,
190                                         enum bfa_fcport_sm_event event);
191 static void     bfa_fcport_sm_disabling(struct bfa_fcport_s *fcport,
192                                         enum bfa_fcport_sm_event event);
193 static void     bfa_fcport_sm_disabling_qwait(struct bfa_fcport_s *fcport,
194                                         enum bfa_fcport_sm_event event);
195 static void     bfa_fcport_sm_toggling_qwait(struct bfa_fcport_s *fcport,
196                                         enum bfa_fcport_sm_event event);
197 static void     bfa_fcport_sm_disabled(struct bfa_fcport_s *fcport,
198                                         enum bfa_fcport_sm_event event);
199 static void     bfa_fcport_sm_stopped(struct bfa_fcport_s *fcport,
200                                         enum bfa_fcport_sm_event event);
201 static void     bfa_fcport_sm_iocdown(struct bfa_fcport_s *fcport,
202                                         enum bfa_fcport_sm_event event);
203 static void     bfa_fcport_sm_iocfail(struct bfa_fcport_s *fcport,
204                                         enum bfa_fcport_sm_event event);
205 static void     bfa_fcport_sm_dport(struct bfa_fcport_s *fcport,
206                                         enum bfa_fcport_sm_event event);
207 static void     bfa_fcport_sm_ddport(struct bfa_fcport_s *fcport,
208                                         enum bfa_fcport_sm_event event);
209 static void     bfa_fcport_sm_faa_misconfig(struct bfa_fcport_s *fcport,
210                                         enum bfa_fcport_sm_event event);
211
212 static void     bfa_fcport_ln_sm_dn(struct bfa_fcport_ln_s *ln,
213                                         enum bfa_fcport_ln_sm_event event);
214 static void     bfa_fcport_ln_sm_dn_nf(struct bfa_fcport_ln_s *ln,
215                                         enum bfa_fcport_ln_sm_event event);
216 static void     bfa_fcport_ln_sm_dn_up_nf(struct bfa_fcport_ln_s *ln,
217                                         enum bfa_fcport_ln_sm_event event);
218 static void     bfa_fcport_ln_sm_up(struct bfa_fcport_ln_s *ln,
219                                         enum bfa_fcport_ln_sm_event event);
220 static void     bfa_fcport_ln_sm_up_nf(struct bfa_fcport_ln_s *ln,
221                                         enum bfa_fcport_ln_sm_event event);
222 static void     bfa_fcport_ln_sm_up_dn_nf(struct bfa_fcport_ln_s *ln,
223                                         enum bfa_fcport_ln_sm_event event);
224 static void     bfa_fcport_ln_sm_up_dn_up_nf(struct bfa_fcport_ln_s *ln,
225                                         enum bfa_fcport_ln_sm_event event);
226
227 static struct bfa_sm_table_s hal_port_sm_table[] = {
228         {BFA_SM(bfa_fcport_sm_uninit), BFA_PORT_ST_UNINIT},
229         {BFA_SM(bfa_fcport_sm_enabling_qwait), BFA_PORT_ST_ENABLING_QWAIT},
230         {BFA_SM(bfa_fcport_sm_enabling), BFA_PORT_ST_ENABLING},
231         {BFA_SM(bfa_fcport_sm_linkdown), BFA_PORT_ST_LINKDOWN},
232         {BFA_SM(bfa_fcport_sm_linkup), BFA_PORT_ST_LINKUP},
233         {BFA_SM(bfa_fcport_sm_disabling_qwait), BFA_PORT_ST_DISABLING_QWAIT},
234         {BFA_SM(bfa_fcport_sm_toggling_qwait), BFA_PORT_ST_TOGGLING_QWAIT},
235         {BFA_SM(bfa_fcport_sm_disabling), BFA_PORT_ST_DISABLING},
236         {BFA_SM(bfa_fcport_sm_disabled), BFA_PORT_ST_DISABLED},
237         {BFA_SM(bfa_fcport_sm_stopped), BFA_PORT_ST_STOPPED},
238         {BFA_SM(bfa_fcport_sm_iocdown), BFA_PORT_ST_IOCDOWN},
239         {BFA_SM(bfa_fcport_sm_iocfail), BFA_PORT_ST_IOCDOWN},
240         {BFA_SM(bfa_fcport_sm_dport), BFA_PORT_ST_DPORT},
241         {BFA_SM(bfa_fcport_sm_ddport), BFA_PORT_ST_DDPORT},
242         {BFA_SM(bfa_fcport_sm_faa_misconfig), BFA_PORT_ST_FAA_MISCONFIG},
243 };
244
245
246 /*
247  * forward declaration for RPORT related functions
248  */
249 static struct bfa_rport_s *bfa_rport_alloc(struct bfa_rport_mod_s *rp_mod);
250 static void             bfa_rport_free(struct bfa_rport_s *rport);
251 static bfa_boolean_t    bfa_rport_send_fwcreate(struct bfa_rport_s *rp);
252 static bfa_boolean_t    bfa_rport_send_fwdelete(struct bfa_rport_s *rp);
253 static bfa_boolean_t    bfa_rport_send_fwspeed(struct bfa_rport_s *rp);
254 static void             __bfa_cb_rport_online(void *cbarg,
255                                                 bfa_boolean_t complete);
256 static void             __bfa_cb_rport_offline(void *cbarg,
257                                                 bfa_boolean_t complete);
258
259 /*
260  * forward declaration for RPORT state machine
261  */
262 static void     bfa_rport_sm_uninit(struct bfa_rport_s *rp,
263                                         enum bfa_rport_event event);
264 static void     bfa_rport_sm_created(struct bfa_rport_s *rp,
265                                         enum bfa_rport_event event);
266 static void     bfa_rport_sm_fwcreate(struct bfa_rport_s *rp,
267                                         enum bfa_rport_event event);
268 static void     bfa_rport_sm_online(struct bfa_rport_s *rp,
269                                         enum bfa_rport_event event);
270 static void     bfa_rport_sm_fwdelete(struct bfa_rport_s *rp,
271                                         enum bfa_rport_event event);
272 static void     bfa_rport_sm_offline(struct bfa_rport_s *rp,
273                                         enum bfa_rport_event event);
274 static void     bfa_rport_sm_deleting(struct bfa_rport_s *rp,
275                                         enum bfa_rport_event event);
276 static void     bfa_rport_sm_offline_pending(struct bfa_rport_s *rp,
277                                         enum bfa_rport_event event);
278 static void     bfa_rport_sm_delete_pending(struct bfa_rport_s *rp,
279                                         enum bfa_rport_event event);
280 static void     bfa_rport_sm_iocdisable(struct bfa_rport_s *rp,
281                                         enum bfa_rport_event event);
282 static void     bfa_rport_sm_fwcreate_qfull(struct bfa_rport_s *rp,
283                                         enum bfa_rport_event event);
284 static void     bfa_rport_sm_fwdelete_qfull(struct bfa_rport_s *rp,
285                                         enum bfa_rport_event event);
286 static void     bfa_rport_sm_deleting_qfull(struct bfa_rport_s *rp,
287                                         enum bfa_rport_event event);
288
289 /*
290  * PLOG related definitions
291  */
292 static int
293 plkd_validate_logrec(struct bfa_plog_rec_s *pl_rec)
294 {
295         if ((pl_rec->log_type != BFA_PL_LOG_TYPE_INT) &&
296                 (pl_rec->log_type != BFA_PL_LOG_TYPE_STRING))
297                 return 1;
298
299         if ((pl_rec->log_type != BFA_PL_LOG_TYPE_INT) &&
300                 (pl_rec->log_num_ints > BFA_PL_INT_LOG_SZ))
301                 return 1;
302
303         return 0;
304 }
305
306 static u64
307 bfa_get_log_time(void)
308 {
309         u64 system_time = 0;
310         struct timeval tv;
311         do_gettimeofday(&tv);
312
313         /* We are interested in seconds only. */
314         system_time = tv.tv_sec;
315         return system_time;
316 }
317
318 static void
319 bfa_plog_add(struct bfa_plog_s *plog, struct bfa_plog_rec_s *pl_rec)
320 {
321         u16 tail;
322         struct bfa_plog_rec_s *pl_recp;
323
324         if (plog->plog_enabled == 0)
325                 return;
326
327         if (plkd_validate_logrec(pl_rec)) {
328                 WARN_ON(1);
329                 return;
330         }
331
332         tail = plog->tail;
333
334         pl_recp = &(plog->plog_recs[tail]);
335
336         memcpy(pl_recp, pl_rec, sizeof(struct bfa_plog_rec_s));
337
338         pl_recp->tv = bfa_get_log_time();
339         BFA_PL_LOG_REC_INCR(plog->tail);
340
341         if (plog->head == plog->tail)
342                 BFA_PL_LOG_REC_INCR(plog->head);
343 }
344
345 void
346 bfa_plog_init(struct bfa_plog_s *plog)
347 {
348         memset((char *)plog, 0, sizeof(struct bfa_plog_s));
349
350         memcpy(plog->plog_sig, BFA_PL_SIG_STR, BFA_PL_SIG_LEN);
351         plog->head = plog->tail = 0;
352         plog->plog_enabled = 1;
353 }
354
355 void
356 bfa_plog_str(struct bfa_plog_s *plog, enum bfa_plog_mid mid,
357                 enum bfa_plog_eid event,
358                 u16 misc, char *log_str)
359 {
360         struct bfa_plog_rec_s  lp;
361
362         if (plog->plog_enabled) {
363                 memset(&lp, 0, sizeof(struct bfa_plog_rec_s));
364                 lp.mid = mid;
365                 lp.eid = event;
366                 lp.log_type = BFA_PL_LOG_TYPE_STRING;
367                 lp.misc = misc;
368                 strncpy(lp.log_entry.string_log, log_str,
369                         BFA_PL_STRING_LOG_SZ - 1);
370                 lp.log_entry.string_log[BFA_PL_STRING_LOG_SZ - 1] = '\0';
371                 bfa_plog_add(plog, &lp);
372         }
373 }
374
375 void
376 bfa_plog_intarr(struct bfa_plog_s *plog, enum bfa_plog_mid mid,
377                 enum bfa_plog_eid event,
378                 u16 misc, u32 *intarr, u32 num_ints)
379 {
380         struct bfa_plog_rec_s  lp;
381         u32 i;
382
383         if (num_ints > BFA_PL_INT_LOG_SZ)
384                 num_ints = BFA_PL_INT_LOG_SZ;
385
386         if (plog->plog_enabled) {
387                 memset(&lp, 0, sizeof(struct bfa_plog_rec_s));
388                 lp.mid = mid;
389                 lp.eid = event;
390                 lp.log_type = BFA_PL_LOG_TYPE_INT;
391                 lp.misc = misc;
392
393                 for (i = 0; i < num_ints; i++)
394                         lp.log_entry.int_log[i] = intarr[i];
395
396                 lp.log_num_ints = (u8) num_ints;
397
398                 bfa_plog_add(plog, &lp);
399         }
400 }
401
402 void
403 bfa_plog_fchdr(struct bfa_plog_s *plog, enum bfa_plog_mid mid,
404                         enum bfa_plog_eid event,
405                         u16 misc, struct fchs_s *fchdr)
406 {
407         struct bfa_plog_rec_s  lp;
408         u32     *tmp_int = (u32 *) fchdr;
409         u32     ints[BFA_PL_INT_LOG_SZ];
410
411         if (plog->plog_enabled) {
412                 memset(&lp, 0, sizeof(struct bfa_plog_rec_s));
413
414                 ints[0] = tmp_int[0];
415                 ints[1] = tmp_int[1];
416                 ints[2] = tmp_int[4];
417
418                 bfa_plog_intarr(plog, mid, event, misc, ints, 3);
419         }
420 }
421
422 void
423 bfa_plog_fchdr_and_pl(struct bfa_plog_s *plog, enum bfa_plog_mid mid,
424                       enum bfa_plog_eid event, u16 misc, struct fchs_s *fchdr,
425                       u32 pld_w0)
426 {
427         struct bfa_plog_rec_s  lp;
428         u32     *tmp_int = (u32 *) fchdr;
429         u32     ints[BFA_PL_INT_LOG_SZ];
430
431         if (plog->plog_enabled) {
432                 memset(&lp, 0, sizeof(struct bfa_plog_rec_s));
433
434                 ints[0] = tmp_int[0];
435                 ints[1] = tmp_int[1];
436                 ints[2] = tmp_int[4];
437                 ints[3] = pld_w0;
438
439                 bfa_plog_intarr(plog, mid, event, misc, ints, 4);
440         }
441 }
442
443
444 /*
445  *  fcxp_pvt BFA FCXP private functions
446  */
447
448 static void
449 claim_fcxps_mem(struct bfa_fcxp_mod_s *mod)
450 {
451         u16     i;
452         struct bfa_fcxp_s *fcxp;
453
454         fcxp = (struct bfa_fcxp_s *) bfa_mem_kva_curp(mod);
455         memset(fcxp, 0, sizeof(struct bfa_fcxp_s) * mod->num_fcxps);
456
457         INIT_LIST_HEAD(&mod->fcxp_req_free_q);
458         INIT_LIST_HEAD(&mod->fcxp_rsp_free_q);
459         INIT_LIST_HEAD(&mod->fcxp_active_q);
460         INIT_LIST_HEAD(&mod->fcxp_req_unused_q);
461         INIT_LIST_HEAD(&mod->fcxp_rsp_unused_q);
462
463         mod->fcxp_list = fcxp;
464
465         for (i = 0; i < mod->num_fcxps; i++) {
466                 fcxp->fcxp_mod = mod;
467                 fcxp->fcxp_tag = i;
468
469                 if (i < (mod->num_fcxps / 2)) {
470                         list_add_tail(&fcxp->qe, &mod->fcxp_req_free_q);
471                         fcxp->req_rsp = BFA_TRUE;
472                 } else {
473                         list_add_tail(&fcxp->qe, &mod->fcxp_rsp_free_q);
474                         fcxp->req_rsp = BFA_FALSE;
475                 }
476
477                 bfa_reqq_winit(&fcxp->reqq_wqe, bfa_fcxp_qresume, fcxp);
478                 fcxp->reqq_waiting = BFA_FALSE;
479
480                 fcxp = fcxp + 1;
481         }
482
483         bfa_mem_kva_curp(mod) = (void *)fcxp;
484 }
485
486 static void
487 bfa_fcxp_meminfo(struct bfa_iocfc_cfg_s *cfg, struct bfa_meminfo_s *minfo,
488                 struct bfa_s *bfa)
489 {
490         struct bfa_fcxp_mod_s *fcxp_mod = BFA_FCXP_MOD(bfa);
491         struct bfa_mem_kva_s *fcxp_kva = BFA_MEM_FCXP_KVA(bfa);
492         struct bfa_mem_dma_s *seg_ptr;
493         u16     nsegs, idx, per_seg_fcxp;
494         u16     num_fcxps = cfg->fwcfg.num_fcxp_reqs;
495         u32     per_fcxp_sz;
496
497         if (num_fcxps == 0)
498                 return;
499
500         if (cfg->drvcfg.min_cfg)
501                 per_fcxp_sz = 2 * BFA_FCXP_MAX_IBUF_SZ;
502         else
503                 per_fcxp_sz = BFA_FCXP_MAX_IBUF_SZ + BFA_FCXP_MAX_LBUF_SZ;
504
505         /* dma memory */
506         nsegs = BFI_MEM_DMA_NSEGS(num_fcxps, per_fcxp_sz);
507         per_seg_fcxp = BFI_MEM_NREQS_SEG(per_fcxp_sz);
508
509         bfa_mem_dma_seg_iter(fcxp_mod, seg_ptr, nsegs, idx) {
510                 if (num_fcxps >= per_seg_fcxp) {
511                         num_fcxps -= per_seg_fcxp;
512                         bfa_mem_dma_setup(minfo, seg_ptr,
513                                 per_seg_fcxp * per_fcxp_sz);
514                 } else
515                         bfa_mem_dma_setup(minfo, seg_ptr,
516                                 num_fcxps * per_fcxp_sz);
517         }
518
519         /* kva memory */
520         bfa_mem_kva_setup(minfo, fcxp_kva,
521                 cfg->fwcfg.num_fcxp_reqs * sizeof(struct bfa_fcxp_s));
522 }
523
524 static void
525 bfa_fcxp_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
526                 struct bfa_pcidev_s *pcidev)
527 {
528         struct bfa_fcxp_mod_s *mod = BFA_FCXP_MOD(bfa);
529
530         mod->bfa = bfa;
531         mod->num_fcxps = cfg->fwcfg.num_fcxp_reqs;
532
533         /*
534          * Initialize FCXP request and response payload sizes.
535          */
536         mod->req_pld_sz = mod->rsp_pld_sz = BFA_FCXP_MAX_IBUF_SZ;
537         if (!cfg->drvcfg.min_cfg)
538                 mod->rsp_pld_sz = BFA_FCXP_MAX_LBUF_SZ;
539
540         INIT_LIST_HEAD(&mod->req_wait_q);
541         INIT_LIST_HEAD(&mod->rsp_wait_q);
542
543         claim_fcxps_mem(mod);
544 }
545
546 static void
547 bfa_fcxp_detach(struct bfa_s *bfa)
548 {
549 }
550
551 static void
552 bfa_fcxp_start(struct bfa_s *bfa)
553 {
554 }
555
556 static void
557 bfa_fcxp_stop(struct bfa_s *bfa)
558 {
559 }
560
561 static void
562 bfa_fcxp_iocdisable(struct bfa_s *bfa)
563 {
564         struct bfa_fcxp_mod_s *mod = BFA_FCXP_MOD(bfa);
565         struct bfa_fcxp_s *fcxp;
566         struct list_head              *qe, *qen;
567
568         /* Enqueue unused fcxp resources to free_q */
569         list_splice_tail_init(&mod->fcxp_req_unused_q, &mod->fcxp_req_free_q);
570         list_splice_tail_init(&mod->fcxp_rsp_unused_q, &mod->fcxp_rsp_free_q);
571
572         list_for_each_safe(qe, qen, &mod->fcxp_active_q) {
573                 fcxp = (struct bfa_fcxp_s *) qe;
574                 if (fcxp->caller == NULL) {
575                         fcxp->send_cbfn(fcxp->caller, fcxp, fcxp->send_cbarg,
576                                         BFA_STATUS_IOC_FAILURE, 0, 0, NULL);
577                         bfa_fcxp_free(fcxp);
578                 } else {
579                         fcxp->rsp_status = BFA_STATUS_IOC_FAILURE;
580                         bfa_cb_queue(bfa, &fcxp->hcb_qe,
581                                      __bfa_fcxp_send_cbfn, fcxp);
582                 }
583         }
584 }
585
586 static struct bfa_fcxp_s *
587 bfa_fcxp_get(struct bfa_fcxp_mod_s *fm, bfa_boolean_t req)
588 {
589         struct bfa_fcxp_s *fcxp;
590
591         if (req)
592                 bfa_q_deq(&fm->fcxp_req_free_q, &fcxp);
593         else
594                 bfa_q_deq(&fm->fcxp_rsp_free_q, &fcxp);
595
596         if (fcxp)
597                 list_add_tail(&fcxp->qe, &fm->fcxp_active_q);
598
599         return fcxp;
600 }
601
602 static void
603 bfa_fcxp_init_reqrsp(struct bfa_fcxp_s *fcxp,
604                struct bfa_s *bfa,
605                u8 *use_ibuf,
606                u32 *nr_sgles,
607                bfa_fcxp_get_sgaddr_t *r_sga_cbfn,
608                bfa_fcxp_get_sglen_t *r_sglen_cbfn,
609                struct list_head *r_sgpg_q,
610                int n_sgles,
611                bfa_fcxp_get_sgaddr_t sga_cbfn,
612                bfa_fcxp_get_sglen_t sglen_cbfn)
613 {
614
615         WARN_ON(bfa == NULL);
616
617         bfa_trc(bfa, fcxp->fcxp_tag);
618
619         if (n_sgles == 0) {
620                 *use_ibuf = 1;
621         } else {
622                 WARN_ON(*sga_cbfn == NULL);
623                 WARN_ON(*sglen_cbfn == NULL);
624
625                 *use_ibuf = 0;
626                 *r_sga_cbfn = sga_cbfn;
627                 *r_sglen_cbfn = sglen_cbfn;
628
629                 *nr_sgles = n_sgles;
630
631                 /*
632                  * alloc required sgpgs
633                  */
634                 if (n_sgles > BFI_SGE_INLINE)
635                         WARN_ON(1);
636         }
637
638 }
639
640 static void
641 bfa_fcxp_init(struct bfa_fcxp_s *fcxp,
642                void *caller, struct bfa_s *bfa, int nreq_sgles,
643                int nrsp_sgles, bfa_fcxp_get_sgaddr_t req_sga_cbfn,
644                bfa_fcxp_get_sglen_t req_sglen_cbfn,
645                bfa_fcxp_get_sgaddr_t rsp_sga_cbfn,
646                bfa_fcxp_get_sglen_t rsp_sglen_cbfn)
647 {
648
649         WARN_ON(bfa == NULL);
650
651         bfa_trc(bfa, fcxp->fcxp_tag);
652
653         fcxp->caller = caller;
654
655         bfa_fcxp_init_reqrsp(fcxp, bfa,
656                 &fcxp->use_ireqbuf, &fcxp->nreq_sgles, &fcxp->req_sga_cbfn,
657                 &fcxp->req_sglen_cbfn, &fcxp->req_sgpg_q,
658                 nreq_sgles, req_sga_cbfn, req_sglen_cbfn);
659
660         bfa_fcxp_init_reqrsp(fcxp, bfa,
661                 &fcxp->use_irspbuf, &fcxp->nrsp_sgles, &fcxp->rsp_sga_cbfn,
662                 &fcxp->rsp_sglen_cbfn, &fcxp->rsp_sgpg_q,
663                 nrsp_sgles, rsp_sga_cbfn, rsp_sglen_cbfn);
664
665 }
666
667 static void
668 bfa_fcxp_put(struct bfa_fcxp_s *fcxp)
669 {
670         struct bfa_fcxp_mod_s *mod = fcxp->fcxp_mod;
671         struct bfa_fcxp_wqe_s *wqe;
672
673         if (fcxp->req_rsp)
674                 bfa_q_deq(&mod->req_wait_q, &wqe);
675         else
676                 bfa_q_deq(&mod->rsp_wait_q, &wqe);
677
678         if (wqe) {
679                 bfa_trc(mod->bfa, fcxp->fcxp_tag);
680
681                 bfa_fcxp_init(fcxp, wqe->caller, wqe->bfa, wqe->nreq_sgles,
682                         wqe->nrsp_sgles, wqe->req_sga_cbfn,
683                         wqe->req_sglen_cbfn, wqe->rsp_sga_cbfn,
684                         wqe->rsp_sglen_cbfn);
685
686                 wqe->alloc_cbfn(wqe->alloc_cbarg, fcxp);
687                 return;
688         }
689
690         WARN_ON(!bfa_q_is_on_q(&mod->fcxp_active_q, fcxp));
691         list_del(&fcxp->qe);
692
693         if (fcxp->req_rsp)
694                 list_add_tail(&fcxp->qe, &mod->fcxp_req_free_q);
695         else
696                 list_add_tail(&fcxp->qe, &mod->fcxp_rsp_free_q);
697 }
698
699 static void
700 bfa_fcxp_null_comp(void *bfad_fcxp, struct bfa_fcxp_s *fcxp, void *cbarg,
701                    bfa_status_t req_status, u32 rsp_len,
702                    u32 resid_len, struct fchs_s *rsp_fchs)
703 {
704         /* discarded fcxp completion */
705 }
706
707 static void
708 __bfa_fcxp_send_cbfn(void *cbarg, bfa_boolean_t complete)
709 {
710         struct bfa_fcxp_s *fcxp = cbarg;
711
712         if (complete) {
713                 fcxp->send_cbfn(fcxp->caller, fcxp, fcxp->send_cbarg,
714                                 fcxp->rsp_status, fcxp->rsp_len,
715                                 fcxp->residue_len, &fcxp->rsp_fchs);
716         } else {
717                 bfa_fcxp_free(fcxp);
718         }
719 }
720
721 static void
722 hal_fcxp_send_comp(struct bfa_s *bfa, struct bfi_fcxp_send_rsp_s *fcxp_rsp)
723 {
724         struct bfa_fcxp_mod_s   *mod = BFA_FCXP_MOD(bfa);
725         struct bfa_fcxp_s       *fcxp;
726         u16             fcxp_tag = be16_to_cpu(fcxp_rsp->fcxp_tag);
727
728         bfa_trc(bfa, fcxp_tag);
729
730         fcxp_rsp->rsp_len = be32_to_cpu(fcxp_rsp->rsp_len);
731
732         /*
733          * @todo f/w should not set residue to non-0 when everything
734          *       is received.
735          */
736         if (fcxp_rsp->req_status == BFA_STATUS_OK)
737                 fcxp_rsp->residue_len = 0;
738         else
739                 fcxp_rsp->residue_len = be32_to_cpu(fcxp_rsp->residue_len);
740
741         fcxp = BFA_FCXP_FROM_TAG(mod, fcxp_tag);
742
743         WARN_ON(fcxp->send_cbfn == NULL);
744
745         hal_fcxp_rx_plog(mod->bfa, fcxp, fcxp_rsp);
746
747         if (fcxp->send_cbfn != NULL) {
748                 bfa_trc(mod->bfa, (NULL == fcxp->caller));
749                 if (fcxp->caller == NULL) {
750                         fcxp->send_cbfn(fcxp->caller, fcxp, fcxp->send_cbarg,
751                                         fcxp_rsp->req_status, fcxp_rsp->rsp_len,
752                                         fcxp_rsp->residue_len, &fcxp_rsp->fchs);
753                         /*
754                          * fcxp automatically freed on return from the callback
755                          */
756                         bfa_fcxp_free(fcxp);
757                 } else {
758                         fcxp->rsp_status = fcxp_rsp->req_status;
759                         fcxp->rsp_len = fcxp_rsp->rsp_len;
760                         fcxp->residue_len = fcxp_rsp->residue_len;
761                         fcxp->rsp_fchs = fcxp_rsp->fchs;
762
763                         bfa_cb_queue(bfa, &fcxp->hcb_qe,
764                                         __bfa_fcxp_send_cbfn, fcxp);
765                 }
766         } else {
767                 bfa_trc(bfa, (NULL == fcxp->send_cbfn));
768         }
769 }
770
771 static void
772 hal_fcxp_tx_plog(struct bfa_s *bfa, u32 reqlen, struct bfa_fcxp_s *fcxp,
773                  struct fchs_s *fchs)
774 {
775         /*
776          * TODO: TX ox_id
777          */
778         if (reqlen > 0) {
779                 if (fcxp->use_ireqbuf) {
780                         u32     pld_w0 =
781                                 *((u32 *) BFA_FCXP_REQ_PLD(fcxp));
782
783                         bfa_plog_fchdr_and_pl(bfa->plog, BFA_PL_MID_HAL_FCXP,
784                                         BFA_PL_EID_TX,
785                                         reqlen + sizeof(struct fchs_s), fchs,
786                                         pld_w0);
787                 } else {
788                         bfa_plog_fchdr(bfa->plog, BFA_PL_MID_HAL_FCXP,
789                                         BFA_PL_EID_TX,
790                                         reqlen + sizeof(struct fchs_s),
791                                         fchs);
792                 }
793         } else {
794                 bfa_plog_fchdr(bfa->plog, BFA_PL_MID_HAL_FCXP, BFA_PL_EID_TX,
795                                reqlen + sizeof(struct fchs_s), fchs);
796         }
797 }
798
799 static void
800 hal_fcxp_rx_plog(struct bfa_s *bfa, struct bfa_fcxp_s *fcxp,
801                  struct bfi_fcxp_send_rsp_s *fcxp_rsp)
802 {
803         if (fcxp_rsp->rsp_len > 0) {
804                 if (fcxp->use_irspbuf) {
805                         u32     pld_w0 =
806                                 *((u32 *) BFA_FCXP_RSP_PLD(fcxp));
807
808                         bfa_plog_fchdr_and_pl(bfa->plog, BFA_PL_MID_HAL_FCXP,
809                                               BFA_PL_EID_RX,
810                                               (u16) fcxp_rsp->rsp_len,
811                                               &fcxp_rsp->fchs, pld_w0);
812                 } else {
813                         bfa_plog_fchdr(bfa->plog, BFA_PL_MID_HAL_FCXP,
814                                        BFA_PL_EID_RX,
815                                        (u16) fcxp_rsp->rsp_len,
816                                        &fcxp_rsp->fchs);
817                 }
818         } else {
819                 bfa_plog_fchdr(bfa->plog, BFA_PL_MID_HAL_FCXP, BFA_PL_EID_RX,
820                                (u16) fcxp_rsp->rsp_len, &fcxp_rsp->fchs);
821         }
822 }
823
824 /*
825  * Handler to resume sending fcxp when space in available in cpe queue.
826  */
827 static void
828 bfa_fcxp_qresume(void *cbarg)
829 {
830         struct bfa_fcxp_s               *fcxp = cbarg;
831         struct bfa_s                    *bfa = fcxp->fcxp_mod->bfa;
832         struct bfi_fcxp_send_req_s      *send_req;
833
834         fcxp->reqq_waiting = BFA_FALSE;
835         send_req = bfa_reqq_next(bfa, BFA_REQQ_FCXP);
836         bfa_fcxp_queue(fcxp, send_req);
837 }
838
839 /*
840  * Queue fcxp send request to foimrware.
841  */
842 static void
843 bfa_fcxp_queue(struct bfa_fcxp_s *fcxp, struct bfi_fcxp_send_req_s *send_req)
844 {
845         struct bfa_s                    *bfa = fcxp->fcxp_mod->bfa;
846         struct bfa_fcxp_req_info_s      *reqi = &fcxp->req_info;
847         struct bfa_fcxp_rsp_info_s      *rspi = &fcxp->rsp_info;
848         struct bfa_rport_s              *rport = reqi->bfa_rport;
849
850         bfi_h2i_set(send_req->mh, BFI_MC_FCXP, BFI_FCXP_H2I_SEND_REQ,
851                     bfa_fn_lpu(bfa));
852
853         send_req->fcxp_tag = cpu_to_be16(fcxp->fcxp_tag);
854         if (rport) {
855                 send_req->rport_fw_hndl = rport->fw_handle;
856                 send_req->max_frmsz = cpu_to_be16(rport->rport_info.max_frmsz);
857                 if (send_req->max_frmsz == 0)
858                         send_req->max_frmsz = cpu_to_be16(FC_MAX_PDUSZ);
859         } else {
860                 send_req->rport_fw_hndl = 0;
861                 send_req->max_frmsz = cpu_to_be16(FC_MAX_PDUSZ);
862         }
863
864         send_req->vf_id = cpu_to_be16(reqi->vf_id);
865         send_req->lp_fwtag = bfa_lps_get_fwtag(bfa, reqi->lp_tag);
866         send_req->class = reqi->class;
867         send_req->rsp_timeout = rspi->rsp_timeout;
868         send_req->cts = reqi->cts;
869         send_req->fchs = reqi->fchs;
870
871         send_req->req_len = cpu_to_be32(reqi->req_tot_len);
872         send_req->rsp_maxlen = cpu_to_be32(rspi->rsp_maxlen);
873
874         /*
875          * setup req sgles
876          */
877         if (fcxp->use_ireqbuf == 1) {
878                 bfa_alen_set(&send_req->req_alen, reqi->req_tot_len,
879                                         BFA_FCXP_REQ_PLD_PA(fcxp));
880         } else {
881                 if (fcxp->nreq_sgles > 0) {
882                         WARN_ON(fcxp->nreq_sgles != 1);
883                         bfa_alen_set(&send_req->req_alen, reqi->req_tot_len,
884                                 fcxp->req_sga_cbfn(fcxp->caller, 0));
885                 } else {
886                         WARN_ON(reqi->req_tot_len != 0);
887                         bfa_alen_set(&send_req->rsp_alen, 0, 0);
888                 }
889         }
890
891         /*
892          * setup rsp sgles
893          */
894         if (fcxp->use_irspbuf == 1) {
895                 WARN_ON(rspi->rsp_maxlen > BFA_FCXP_MAX_LBUF_SZ);
896
897                 bfa_alen_set(&send_req->rsp_alen, rspi->rsp_maxlen,
898                                         BFA_FCXP_RSP_PLD_PA(fcxp));
899         } else {
900                 if (fcxp->nrsp_sgles > 0) {
901                         WARN_ON(fcxp->nrsp_sgles != 1);
902                         bfa_alen_set(&send_req->rsp_alen, rspi->rsp_maxlen,
903                                 fcxp->rsp_sga_cbfn(fcxp->caller, 0));
904
905                 } else {
906                         WARN_ON(rspi->rsp_maxlen != 0);
907                         bfa_alen_set(&send_req->rsp_alen, 0, 0);
908                 }
909         }
910
911         hal_fcxp_tx_plog(bfa, reqi->req_tot_len, fcxp, &reqi->fchs);
912
913         bfa_reqq_produce(bfa, BFA_REQQ_FCXP, send_req->mh);
914
915         bfa_trc(bfa, bfa_reqq_pi(bfa, BFA_REQQ_FCXP));
916         bfa_trc(bfa, bfa_reqq_ci(bfa, BFA_REQQ_FCXP));
917 }
918
919 /*
920  * Allocate an FCXP instance to send a response or to send a request
921  * that has a response. Request/response buffers are allocated by caller.
922  *
923  * @param[in]   bfa             BFA bfa instance
924  * @param[in]   nreq_sgles      Number of SG elements required for request
925  *                              buffer. 0, if fcxp internal buffers are used.
926  *                              Use bfa_fcxp_get_reqbuf() to get the
927  *                              internal req buffer.
928  * @param[in]   req_sgles       SG elements describing request buffer. Will be
929  *                              copied in by BFA and hence can be freed on
930  *                              return from this function.
931  * @param[in]   get_req_sga     function ptr to be called to get a request SG
932  *                              Address (given the sge index).
933  * @param[in]   get_req_sglen   function ptr to be called to get a request SG
934  *                              len (given the sge index).
935  * @param[in]   get_rsp_sga     function ptr to be called to get a response SG
936  *                              Address (given the sge index).
937  * @param[in]   get_rsp_sglen   function ptr to be called to get a response SG
938  *                              len (given the sge index).
939  * @param[in]   req             Allocated FCXP is used to send req or rsp?
940  *                              request - BFA_TRUE, response - BFA_FALSE
941  *
942  * @return FCXP instance. NULL on failure.
943  */
944 struct bfa_fcxp_s *
945 bfa_fcxp_req_rsp_alloc(void *caller, struct bfa_s *bfa, int nreq_sgles,
946                 int nrsp_sgles, bfa_fcxp_get_sgaddr_t req_sga_cbfn,
947                 bfa_fcxp_get_sglen_t req_sglen_cbfn,
948                 bfa_fcxp_get_sgaddr_t rsp_sga_cbfn,
949                 bfa_fcxp_get_sglen_t rsp_sglen_cbfn, bfa_boolean_t req)
950 {
951         struct bfa_fcxp_s *fcxp = NULL;
952
953         WARN_ON(bfa == NULL);
954
955         fcxp = bfa_fcxp_get(BFA_FCXP_MOD(bfa), req);
956         if (fcxp == NULL)
957                 return NULL;
958
959         bfa_trc(bfa, fcxp->fcxp_tag);
960
961         bfa_fcxp_init(fcxp, caller, bfa, nreq_sgles, nrsp_sgles, req_sga_cbfn,
962                         req_sglen_cbfn, rsp_sga_cbfn, rsp_sglen_cbfn);
963
964         return fcxp;
965 }
966
967 /*
968  * Get the internal request buffer pointer
969  *
970  * @param[in]   fcxp    BFA fcxp pointer
971  *
972  * @return              pointer to the internal request buffer
973  */
974 void *
975 bfa_fcxp_get_reqbuf(struct bfa_fcxp_s *fcxp)
976 {
977         struct bfa_fcxp_mod_s *mod = fcxp->fcxp_mod;
978         void    *reqbuf;
979
980         WARN_ON(fcxp->use_ireqbuf != 1);
981         reqbuf = bfa_mem_get_dmabuf_kva(mod, fcxp->fcxp_tag,
982                                 mod->req_pld_sz + mod->rsp_pld_sz);
983         return reqbuf;
984 }
985
986 u32
987 bfa_fcxp_get_reqbufsz(struct bfa_fcxp_s *fcxp)
988 {
989         struct bfa_fcxp_mod_s *mod = fcxp->fcxp_mod;
990
991         return mod->req_pld_sz;
992 }
993
994 /*
995  * Get the internal response buffer pointer
996  *
997  * @param[in]   fcxp    BFA fcxp pointer
998  *
999  * @return              pointer to the internal request buffer
1000  */
1001 void *
1002 bfa_fcxp_get_rspbuf(struct bfa_fcxp_s *fcxp)
1003 {
1004         struct bfa_fcxp_mod_s *mod = fcxp->fcxp_mod;
1005         void    *fcxp_buf;
1006
1007         WARN_ON(fcxp->use_irspbuf != 1);
1008
1009         fcxp_buf = bfa_mem_get_dmabuf_kva(mod, fcxp->fcxp_tag,
1010                                 mod->req_pld_sz + mod->rsp_pld_sz);
1011
1012         /* fcxp_buf = req_buf + rsp_buf :- add req_buf_sz to get to rsp_buf */
1013         return ((u8 *) fcxp_buf) + mod->req_pld_sz;
1014 }
1015
1016 /*
1017  * Free the BFA FCXP
1018  *
1019  * @param[in]   fcxp                    BFA fcxp pointer
1020  *
1021  * @return              void
1022  */
1023 void
1024 bfa_fcxp_free(struct bfa_fcxp_s *fcxp)
1025 {
1026         struct bfa_fcxp_mod_s *mod = fcxp->fcxp_mod;
1027
1028         WARN_ON(fcxp == NULL);
1029         bfa_trc(mod->bfa, fcxp->fcxp_tag);
1030         bfa_fcxp_put(fcxp);
1031 }
1032
1033 /*
1034  * Send a FCXP request
1035  *
1036  * @param[in]   fcxp    BFA fcxp pointer
1037  * @param[in]   rport   BFA rport pointer. Could be left NULL for WKA rports
1038  * @param[in]   vf_id   virtual Fabric ID
1039  * @param[in]   lp_tag  lport tag
1040  * @param[in]   cts     use Continuous sequence
1041  * @param[in]   cos     fc Class of Service
1042  * @param[in]   reqlen  request length, does not include FCHS length
1043  * @param[in]   fchs    fc Header Pointer. The header content will be copied
1044  *                      in by BFA.
1045  *
1046  * @param[in]   cbfn    call back function to be called on receiving
1047  *                                                              the response
1048  * @param[in]   cbarg   arg for cbfn
1049  * @param[in]   rsp_timeout
1050  *                      response timeout
1051  *
1052  * @return              bfa_status_t
1053  */
1054 void
1055 bfa_fcxp_send(struct bfa_fcxp_s *fcxp, struct bfa_rport_s *rport,
1056               u16 vf_id, u8 lp_tag, bfa_boolean_t cts, enum fc_cos cos,
1057               u32 reqlen, struct fchs_s *fchs, bfa_cb_fcxp_send_t cbfn,
1058               void *cbarg, u32 rsp_maxlen, u8 rsp_timeout)
1059 {
1060         struct bfa_s                    *bfa  = fcxp->fcxp_mod->bfa;
1061         struct bfa_fcxp_req_info_s      *reqi = &fcxp->req_info;
1062         struct bfa_fcxp_rsp_info_s      *rspi = &fcxp->rsp_info;
1063         struct bfi_fcxp_send_req_s      *send_req;
1064
1065         bfa_trc(bfa, fcxp->fcxp_tag);
1066
1067         /*
1068          * setup request/response info
1069          */
1070         reqi->bfa_rport = rport;
1071         reqi->vf_id = vf_id;
1072         reqi->lp_tag = lp_tag;
1073         reqi->class = cos;
1074         rspi->rsp_timeout = rsp_timeout;
1075         reqi->cts = cts;
1076         reqi->fchs = *fchs;
1077         reqi->req_tot_len = reqlen;
1078         rspi->rsp_maxlen = rsp_maxlen;
1079         fcxp->send_cbfn = cbfn ? cbfn : bfa_fcxp_null_comp;
1080         fcxp->send_cbarg = cbarg;
1081
1082         /*
1083          * If no room in CPE queue, wait for space in request queue
1084          */
1085         send_req = bfa_reqq_next(bfa, BFA_REQQ_FCXP);
1086         if (!send_req) {
1087                 bfa_trc(bfa, fcxp->fcxp_tag);
1088                 fcxp->reqq_waiting = BFA_TRUE;
1089                 bfa_reqq_wait(bfa, BFA_REQQ_FCXP, &fcxp->reqq_wqe);
1090                 return;
1091         }
1092
1093         bfa_fcxp_queue(fcxp, send_req);
1094 }
1095
1096 /*
1097  * Abort a BFA FCXP
1098  *
1099  * @param[in]   fcxp    BFA fcxp pointer
1100  *
1101  * @return              void
1102  */
1103 bfa_status_t
1104 bfa_fcxp_abort(struct bfa_fcxp_s *fcxp)
1105 {
1106         bfa_trc(fcxp->fcxp_mod->bfa, fcxp->fcxp_tag);
1107         WARN_ON(1);
1108         return BFA_STATUS_OK;
1109 }
1110
1111 void
1112 bfa_fcxp_req_rsp_alloc_wait(struct bfa_s *bfa, struct bfa_fcxp_wqe_s *wqe,
1113                bfa_fcxp_alloc_cbfn_t alloc_cbfn, void *alloc_cbarg,
1114                void *caller, int nreq_sgles,
1115                int nrsp_sgles, bfa_fcxp_get_sgaddr_t req_sga_cbfn,
1116                bfa_fcxp_get_sglen_t req_sglen_cbfn,
1117                bfa_fcxp_get_sgaddr_t rsp_sga_cbfn,
1118                bfa_fcxp_get_sglen_t rsp_sglen_cbfn, bfa_boolean_t req)
1119 {
1120         struct bfa_fcxp_mod_s *mod = BFA_FCXP_MOD(bfa);
1121
1122         if (req)
1123                 WARN_ON(!list_empty(&mod->fcxp_req_free_q));
1124         else
1125                 WARN_ON(!list_empty(&mod->fcxp_rsp_free_q));
1126
1127         wqe->alloc_cbfn = alloc_cbfn;
1128         wqe->alloc_cbarg = alloc_cbarg;
1129         wqe->caller = caller;
1130         wqe->bfa = bfa;
1131         wqe->nreq_sgles = nreq_sgles;
1132         wqe->nrsp_sgles = nrsp_sgles;
1133         wqe->req_sga_cbfn = req_sga_cbfn;
1134         wqe->req_sglen_cbfn = req_sglen_cbfn;
1135         wqe->rsp_sga_cbfn = rsp_sga_cbfn;
1136         wqe->rsp_sglen_cbfn = rsp_sglen_cbfn;
1137
1138         if (req)
1139                 list_add_tail(&wqe->qe, &mod->req_wait_q);
1140         else
1141                 list_add_tail(&wqe->qe, &mod->rsp_wait_q);
1142 }
1143
1144 void
1145 bfa_fcxp_walloc_cancel(struct bfa_s *bfa, struct bfa_fcxp_wqe_s *wqe)
1146 {
1147         struct bfa_fcxp_mod_s *mod = BFA_FCXP_MOD(bfa);
1148
1149         WARN_ON(!bfa_q_is_on_q(&mod->req_wait_q, wqe) ||
1150                 !bfa_q_is_on_q(&mod->rsp_wait_q, wqe));
1151         list_del(&wqe->qe);
1152 }
1153
1154 void
1155 bfa_fcxp_discard(struct bfa_fcxp_s *fcxp)
1156 {
1157         /*
1158          * If waiting for room in request queue, cancel reqq wait
1159          * and free fcxp.
1160          */
1161         if (fcxp->reqq_waiting) {
1162                 fcxp->reqq_waiting = BFA_FALSE;
1163                 bfa_reqq_wcancel(&fcxp->reqq_wqe);
1164                 bfa_fcxp_free(fcxp);
1165                 return;
1166         }
1167
1168         fcxp->send_cbfn = bfa_fcxp_null_comp;
1169 }
1170
1171 void
1172 bfa_fcxp_isr(struct bfa_s *bfa, struct bfi_msg_s *msg)
1173 {
1174         switch (msg->mhdr.msg_id) {
1175         case BFI_FCXP_I2H_SEND_RSP:
1176                 hal_fcxp_send_comp(bfa, (struct bfi_fcxp_send_rsp_s *) msg);
1177                 break;
1178
1179         default:
1180                 bfa_trc(bfa, msg->mhdr.msg_id);
1181                 WARN_ON(1);
1182         }
1183 }
1184
1185 u32
1186 bfa_fcxp_get_maxrsp(struct bfa_s *bfa)
1187 {
1188         struct bfa_fcxp_mod_s *mod = BFA_FCXP_MOD(bfa);
1189
1190         return mod->rsp_pld_sz;
1191 }
1192
1193 void
1194 bfa_fcxp_res_recfg(struct bfa_s *bfa, u16 num_fcxp_fw)
1195 {
1196         struct bfa_fcxp_mod_s   *mod = BFA_FCXP_MOD(bfa);
1197         struct list_head        *qe;
1198         int     i;
1199
1200         for (i = 0; i < (mod->num_fcxps - num_fcxp_fw); i++) {
1201                 if (i < ((mod->num_fcxps - num_fcxp_fw) / 2)) {
1202                         bfa_q_deq_tail(&mod->fcxp_req_free_q, &qe);
1203                         list_add_tail(qe, &mod->fcxp_req_unused_q);
1204                 } else {
1205                         bfa_q_deq_tail(&mod->fcxp_rsp_free_q, &qe);
1206                         list_add_tail(qe, &mod->fcxp_rsp_unused_q);
1207                 }
1208         }
1209 }
1210
1211 /*
1212  *  BFA LPS state machine functions
1213  */
1214
1215 /*
1216  * Init state -- no login
1217  */
1218 static void
1219 bfa_lps_sm_init(struct bfa_lps_s *lps, enum bfa_lps_event event)
1220 {
1221         bfa_trc(lps->bfa, lps->bfa_tag);
1222         bfa_trc(lps->bfa, event);
1223
1224         switch (event) {
1225         case BFA_LPS_SM_LOGIN:
1226                 if (bfa_reqq_full(lps->bfa, lps->reqq)) {
1227                         bfa_sm_set_state(lps, bfa_lps_sm_loginwait);
1228                         bfa_reqq_wait(lps->bfa, lps->reqq, &lps->wqe);
1229                 } else {
1230                         bfa_sm_set_state(lps, bfa_lps_sm_login);
1231                         bfa_lps_send_login(lps);
1232                 }
1233
1234                 if (lps->fdisc)
1235                         bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS,
1236                                 BFA_PL_EID_LOGIN, 0, "FDISC Request");
1237                 else
1238                         bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS,
1239                                 BFA_PL_EID_LOGIN, 0, "FLOGI Request");
1240                 break;
1241
1242         case BFA_LPS_SM_LOGOUT:
1243                 bfa_lps_logout_comp(lps);
1244                 break;
1245
1246         case BFA_LPS_SM_DELETE:
1247                 bfa_lps_free(lps);
1248                 break;
1249
1250         case BFA_LPS_SM_RX_CVL:
1251         case BFA_LPS_SM_OFFLINE:
1252                 break;
1253
1254         case BFA_LPS_SM_FWRSP:
1255                 /*
1256                  * Could happen when fabric detects loopback and discards
1257                  * the lps request. Fw will eventually sent out the timeout
1258                  * Just ignore
1259                  */
1260                 break;
1261         case BFA_LPS_SM_SET_N2N_PID:
1262                 /*
1263                  * When topology is set to loop, bfa_lps_set_n2n_pid() sends
1264                  * this event. Ignore this event.
1265                  */
1266                 break;
1267
1268         default:
1269                 bfa_sm_fault(lps->bfa, event);
1270         }
1271 }
1272
1273 /*
1274  * login is in progress -- awaiting response from firmware
1275  */
1276 static void
1277 bfa_lps_sm_login(struct bfa_lps_s *lps, enum bfa_lps_event event)
1278 {
1279         bfa_trc(lps->bfa, lps->bfa_tag);
1280         bfa_trc(lps->bfa, event);
1281
1282         switch (event) {
1283         case BFA_LPS_SM_FWRSP:
1284                 if (lps->status == BFA_STATUS_OK) {
1285                         bfa_sm_set_state(lps, bfa_lps_sm_online);
1286                         if (lps->fdisc)
1287                                 bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS,
1288                                         BFA_PL_EID_LOGIN, 0, "FDISC Accept");
1289                         else
1290                                 bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS,
1291                                         BFA_PL_EID_LOGIN, 0, "FLOGI Accept");
1292                         /* If N2N, send the assigned PID to FW */
1293                         bfa_trc(lps->bfa, lps->fport);
1294                         bfa_trc(lps->bfa, lps->lp_pid);
1295
1296                         if (!lps->fport && lps->lp_pid)
1297                                 bfa_sm_send_event(lps, BFA_LPS_SM_SET_N2N_PID);
1298                 } else {
1299                         bfa_sm_set_state(lps, bfa_lps_sm_init);
1300                         if (lps->fdisc)
1301                                 bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS,
1302                                         BFA_PL_EID_LOGIN, 0,
1303                                         "FDISC Fail (RJT or timeout)");
1304                         else
1305                                 bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS,
1306                                         BFA_PL_EID_LOGIN, 0,
1307                                         "FLOGI Fail (RJT or timeout)");
1308                 }
1309                 bfa_lps_login_comp(lps);
1310                 break;
1311
1312         case BFA_LPS_SM_OFFLINE:
1313         case BFA_LPS_SM_DELETE:
1314                 bfa_sm_set_state(lps, bfa_lps_sm_init);
1315                 break;
1316
1317         case BFA_LPS_SM_SET_N2N_PID:
1318                 bfa_trc(lps->bfa, lps->fport);
1319                 bfa_trc(lps->bfa, lps->lp_pid);
1320                 break;
1321
1322         default:
1323                 bfa_sm_fault(lps->bfa, event);
1324         }
1325 }
1326
1327 /*
1328  * login pending - awaiting space in request queue
1329  */
1330 static void
1331 bfa_lps_sm_loginwait(struct bfa_lps_s *lps, enum bfa_lps_event event)
1332 {
1333         bfa_trc(lps->bfa, lps->bfa_tag);
1334         bfa_trc(lps->bfa, event);
1335
1336         switch (event) {
1337         case BFA_LPS_SM_RESUME:
1338                 bfa_sm_set_state(lps, bfa_lps_sm_login);
1339                 bfa_lps_send_login(lps);
1340                 break;
1341
1342         case BFA_LPS_SM_OFFLINE:
1343         case BFA_LPS_SM_DELETE:
1344                 bfa_sm_set_state(lps, bfa_lps_sm_init);
1345                 bfa_reqq_wcancel(&lps->wqe);
1346                 break;
1347
1348         case BFA_LPS_SM_RX_CVL:
1349                 /*
1350                  * Login was not even sent out; so when getting out
1351                  * of this state, it will appear like a login retry
1352                  * after Clear virtual link
1353                  */
1354                 break;
1355
1356         default:
1357                 bfa_sm_fault(lps->bfa, event);
1358         }
1359 }
1360
1361 /*
1362  * login complete
1363  */
1364 static void
1365 bfa_lps_sm_online(struct bfa_lps_s *lps, enum bfa_lps_event event)
1366 {
1367         bfa_trc(lps->bfa, lps->bfa_tag);
1368         bfa_trc(lps->bfa, event);
1369
1370         switch (event) {
1371         case BFA_LPS_SM_LOGOUT:
1372                 if (bfa_reqq_full(lps->bfa, lps->reqq)) {
1373                         bfa_sm_set_state(lps, bfa_lps_sm_logowait);
1374                         bfa_reqq_wait(lps->bfa, lps->reqq, &lps->wqe);
1375                 } else {
1376                         bfa_sm_set_state(lps, bfa_lps_sm_logout);
1377                         bfa_lps_send_logout(lps);
1378                 }
1379                 bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS,
1380                         BFA_PL_EID_LOGO, 0, "Logout");
1381                 break;
1382
1383         case BFA_LPS_SM_RX_CVL:
1384                 bfa_sm_set_state(lps, bfa_lps_sm_init);
1385
1386                 /* Let the vport module know about this event */
1387                 bfa_lps_cvl_event(lps);
1388                 bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS,
1389                         BFA_PL_EID_FIP_FCF_CVL, 0, "FCF Clear Virt. Link Rx");
1390                 break;
1391
1392         case BFA_LPS_SM_SET_N2N_PID:
1393                 if (bfa_reqq_full(lps->bfa, lps->reqq)) {
1394                         bfa_sm_set_state(lps, bfa_lps_sm_online_n2n_pid_wait);
1395                         bfa_reqq_wait(lps->bfa, lps->reqq, &lps->wqe);
1396                 } else
1397                         bfa_lps_send_set_n2n_pid(lps);
1398                 break;
1399
1400         case BFA_LPS_SM_OFFLINE:
1401         case BFA_LPS_SM_DELETE:
1402                 bfa_sm_set_state(lps, bfa_lps_sm_init);
1403                 break;
1404
1405         default:
1406                 bfa_sm_fault(lps->bfa, event);
1407         }
1408 }
1409
1410 /*
1411  * login complete
1412  */
1413 static void
1414 bfa_lps_sm_online_n2n_pid_wait(struct bfa_lps_s *lps, enum bfa_lps_event event)
1415 {
1416         bfa_trc(lps->bfa, lps->bfa_tag);
1417         bfa_trc(lps->bfa, event);
1418
1419         switch (event) {
1420         case BFA_LPS_SM_RESUME:
1421                 bfa_sm_set_state(lps, bfa_lps_sm_online);
1422                 bfa_lps_send_set_n2n_pid(lps);
1423                 break;
1424
1425         case BFA_LPS_SM_LOGOUT:
1426                 bfa_sm_set_state(lps, bfa_lps_sm_logowait);
1427                 bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS,
1428                         BFA_PL_EID_LOGO, 0, "Logout");
1429                 break;
1430
1431         case BFA_LPS_SM_RX_CVL:
1432                 bfa_sm_set_state(lps, bfa_lps_sm_init);
1433                 bfa_reqq_wcancel(&lps->wqe);
1434
1435                 /* Let the vport module know about this event */
1436                 bfa_lps_cvl_event(lps);
1437                 bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS,
1438                         BFA_PL_EID_FIP_FCF_CVL, 0, "FCF Clear Virt. Link Rx");
1439                 break;
1440
1441         case BFA_LPS_SM_OFFLINE:
1442         case BFA_LPS_SM_DELETE:
1443                 bfa_sm_set_state(lps, bfa_lps_sm_init);
1444                 bfa_reqq_wcancel(&lps->wqe);
1445                 break;
1446
1447         default:
1448                 bfa_sm_fault(lps->bfa, event);
1449         }
1450 }
1451
1452 /*
1453  * logout in progress - awaiting firmware response
1454  */
1455 static void
1456 bfa_lps_sm_logout(struct bfa_lps_s *lps, enum bfa_lps_event event)
1457 {
1458         bfa_trc(lps->bfa, lps->bfa_tag);
1459         bfa_trc(lps->bfa, event);
1460
1461         switch (event) {
1462         case BFA_LPS_SM_FWRSP:
1463         case BFA_LPS_SM_OFFLINE:
1464                 bfa_sm_set_state(lps, bfa_lps_sm_init);
1465                 bfa_lps_logout_comp(lps);
1466                 break;
1467
1468         case BFA_LPS_SM_DELETE:
1469                 bfa_sm_set_state(lps, bfa_lps_sm_init);
1470                 break;
1471
1472         default:
1473                 bfa_sm_fault(lps->bfa, event);
1474         }
1475 }
1476
1477 /*
1478  * logout pending -- awaiting space in request queue
1479  */
1480 static void
1481 bfa_lps_sm_logowait(struct bfa_lps_s *lps, enum bfa_lps_event event)
1482 {
1483         bfa_trc(lps->bfa, lps->bfa_tag);
1484         bfa_trc(lps->bfa, event);
1485
1486         switch (event) {
1487         case BFA_LPS_SM_RESUME:
1488                 bfa_sm_set_state(lps, bfa_lps_sm_logout);
1489                 bfa_lps_send_logout(lps);
1490                 break;
1491
1492         case BFA_LPS_SM_OFFLINE:
1493         case BFA_LPS_SM_DELETE:
1494                 bfa_sm_set_state(lps, bfa_lps_sm_init);
1495                 bfa_reqq_wcancel(&lps->wqe);
1496                 break;
1497
1498         default:
1499                 bfa_sm_fault(lps->bfa, event);
1500         }
1501 }
1502
1503
1504
1505 /*
1506  *  lps_pvt BFA LPS private functions
1507  */
1508
1509 /*
1510  * return memory requirement
1511  */
1512 static void
1513 bfa_lps_meminfo(struct bfa_iocfc_cfg_s *cfg, struct bfa_meminfo_s *minfo,
1514                 struct bfa_s *bfa)
1515 {
1516         struct bfa_mem_kva_s *lps_kva = BFA_MEM_LPS_KVA(bfa);
1517
1518         if (cfg->drvcfg.min_cfg)
1519                 bfa_mem_kva_setup(minfo, lps_kva,
1520                         sizeof(struct bfa_lps_s) * BFA_LPS_MIN_LPORTS);
1521         else
1522                 bfa_mem_kva_setup(minfo, lps_kva,
1523                         sizeof(struct bfa_lps_s) * BFA_LPS_MAX_LPORTS);
1524 }
1525
1526 /*
1527  * bfa module attach at initialization time
1528  */
1529 static void
1530 bfa_lps_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
1531         struct bfa_pcidev_s *pcidev)
1532 {
1533         struct bfa_lps_mod_s    *mod = BFA_LPS_MOD(bfa);
1534         struct bfa_lps_s        *lps;
1535         int                     i;
1536
1537         mod->num_lps = BFA_LPS_MAX_LPORTS;
1538         if (cfg->drvcfg.min_cfg)
1539                 mod->num_lps = BFA_LPS_MIN_LPORTS;
1540         else
1541                 mod->num_lps = BFA_LPS_MAX_LPORTS;
1542         mod->lps_arr = lps = (struct bfa_lps_s *) bfa_mem_kva_curp(mod);
1543
1544         bfa_mem_kva_curp(mod) += mod->num_lps * sizeof(struct bfa_lps_s);
1545
1546         INIT_LIST_HEAD(&mod->lps_free_q);
1547         INIT_LIST_HEAD(&mod->lps_active_q);
1548         INIT_LIST_HEAD(&mod->lps_login_q);
1549
1550         for (i = 0; i < mod->num_lps; i++, lps++) {
1551                 lps->bfa        = bfa;
1552                 lps->bfa_tag    = (u8) i;
1553                 lps->reqq       = BFA_REQQ_LPS;
1554                 bfa_reqq_winit(&lps->wqe, bfa_lps_reqq_resume, lps);
1555                 list_add_tail(&lps->qe, &mod->lps_free_q);
1556         }
1557 }
1558
1559 static void
1560 bfa_lps_detach(struct bfa_s *bfa)
1561 {
1562 }
1563
1564 static void
1565 bfa_lps_start(struct bfa_s *bfa)
1566 {
1567 }
1568
1569 static void
1570 bfa_lps_stop(struct bfa_s *bfa)
1571 {
1572 }
1573
1574 /*
1575  * IOC in disabled state -- consider all lps offline
1576  */
1577 static void
1578 bfa_lps_iocdisable(struct bfa_s *bfa)
1579 {
1580         struct bfa_lps_mod_s    *mod = BFA_LPS_MOD(bfa);
1581         struct bfa_lps_s        *lps;
1582         struct list_head                *qe, *qen;
1583
1584         list_for_each_safe(qe, qen, &mod->lps_active_q) {
1585                 lps = (struct bfa_lps_s *) qe;
1586                 bfa_sm_send_event(lps, BFA_LPS_SM_OFFLINE);
1587         }
1588         list_for_each_safe(qe, qen, &mod->lps_login_q) {
1589                 lps = (struct bfa_lps_s *) qe;
1590                 bfa_sm_send_event(lps, BFA_LPS_SM_OFFLINE);
1591         }
1592         list_splice_tail_init(&mod->lps_login_q, &mod->lps_active_q);
1593 }
1594
1595 /*
1596  * Firmware login response
1597  */
1598 static void
1599 bfa_lps_login_rsp(struct bfa_s *bfa, struct bfi_lps_login_rsp_s *rsp)
1600 {
1601         struct bfa_lps_mod_s    *mod = BFA_LPS_MOD(bfa);
1602         struct bfa_lps_s        *lps;
1603
1604         WARN_ON(rsp->bfa_tag >= mod->num_lps);
1605         lps = BFA_LPS_FROM_TAG(mod, rsp->bfa_tag);
1606
1607         lps->status = rsp->status;
1608         switch (rsp->status) {
1609         case BFA_STATUS_OK:
1610                 lps->fw_tag     = rsp->fw_tag;
1611                 lps->fport      = rsp->f_port;
1612                 if (lps->fport)
1613                         lps->lp_pid = rsp->lp_pid;
1614                 lps->npiv_en    = rsp->npiv_en;
1615                 lps->pr_bbcred  = be16_to_cpu(rsp->bb_credit);
1616                 lps->pr_pwwn    = rsp->port_name;
1617                 lps->pr_nwwn    = rsp->node_name;
1618                 lps->auth_req   = rsp->auth_req;
1619                 lps->lp_mac     = rsp->lp_mac;
1620                 lps->brcd_switch = rsp->brcd_switch;
1621                 lps->fcf_mac    = rsp->fcf_mac;
1622
1623                 break;
1624
1625         case BFA_STATUS_FABRIC_RJT:
1626                 lps->lsrjt_rsn = rsp->lsrjt_rsn;
1627                 lps->lsrjt_expl = rsp->lsrjt_expl;
1628
1629                 break;
1630
1631         case BFA_STATUS_EPROTOCOL:
1632                 lps->ext_status = rsp->ext_status;
1633
1634                 break;
1635
1636         case BFA_STATUS_VPORT_MAX:
1637                 if (rsp->ext_status)
1638                         bfa_lps_no_res(lps, rsp->ext_status);
1639                 break;
1640
1641         default:
1642                 /* Nothing to do with other status */
1643                 break;
1644         }
1645
1646         list_del(&lps->qe);
1647         list_add_tail(&lps->qe, &mod->lps_active_q);
1648         bfa_sm_send_event(lps, BFA_LPS_SM_FWRSP);
1649 }
1650
1651 static void
1652 bfa_lps_no_res(struct bfa_lps_s *first_lps, u8 count)
1653 {
1654         struct bfa_s            *bfa = first_lps->bfa;
1655         struct bfa_lps_mod_s    *mod = BFA_LPS_MOD(bfa);
1656         struct list_head        *qe, *qe_next;
1657         struct bfa_lps_s        *lps;
1658
1659         bfa_trc(bfa, count);
1660
1661         qe = bfa_q_next(first_lps);
1662
1663         while (count && qe) {
1664                 qe_next = bfa_q_next(qe);
1665                 lps = (struct bfa_lps_s *)qe;
1666                 bfa_trc(bfa, lps->bfa_tag);
1667                 lps->status = first_lps->status;
1668                 list_del(&lps->qe);
1669                 list_add_tail(&lps->qe, &mod->lps_active_q);
1670                 bfa_sm_send_event(lps, BFA_LPS_SM_FWRSP);
1671                 qe = qe_next;
1672                 count--;
1673         }
1674 }
1675
1676 /*
1677  * Firmware logout response
1678  */
1679 static void
1680 bfa_lps_logout_rsp(struct bfa_s *bfa, struct bfi_lps_logout_rsp_s *rsp)
1681 {
1682         struct bfa_lps_mod_s    *mod = BFA_LPS_MOD(bfa);
1683         struct bfa_lps_s        *lps;
1684
1685         WARN_ON(rsp->bfa_tag >= mod->num_lps);
1686         lps = BFA_LPS_FROM_TAG(mod, rsp->bfa_tag);
1687
1688         bfa_sm_send_event(lps, BFA_LPS_SM_FWRSP);
1689 }
1690
1691 /*
1692  * Firmware received a Clear virtual link request (for FCoE)
1693  */
1694 static void
1695 bfa_lps_rx_cvl_event(struct bfa_s *bfa, struct bfi_lps_cvl_event_s *cvl)
1696 {
1697         struct bfa_lps_mod_s    *mod = BFA_LPS_MOD(bfa);
1698         struct bfa_lps_s        *lps;
1699
1700         lps = BFA_LPS_FROM_TAG(mod, cvl->bfa_tag);
1701
1702         bfa_sm_send_event(lps, BFA_LPS_SM_RX_CVL);
1703 }
1704
1705 /*
1706  * Space is available in request queue, resume queueing request to firmware.
1707  */
1708 static void
1709 bfa_lps_reqq_resume(void *lps_arg)
1710 {
1711         struct bfa_lps_s        *lps = lps_arg;
1712
1713         bfa_sm_send_event(lps, BFA_LPS_SM_RESUME);
1714 }
1715
1716 /*
1717  * lps is freed -- triggered by vport delete
1718  */
1719 static void
1720 bfa_lps_free(struct bfa_lps_s *lps)
1721 {
1722         struct bfa_lps_mod_s    *mod = BFA_LPS_MOD(lps->bfa);
1723
1724         lps->lp_pid = 0;
1725         list_del(&lps->qe);
1726         list_add_tail(&lps->qe, &mod->lps_free_q);
1727 }
1728
1729 /*
1730  * send login request to firmware
1731  */
1732 static void
1733 bfa_lps_send_login(struct bfa_lps_s *lps)
1734 {
1735         struct bfa_lps_mod_s    *mod = BFA_LPS_MOD(lps->bfa);
1736         struct bfi_lps_login_req_s      *m;
1737
1738         m = bfa_reqq_next(lps->bfa, lps->reqq);
1739         WARN_ON(!m);
1740
1741         bfi_h2i_set(m->mh, BFI_MC_LPS, BFI_LPS_H2I_LOGIN_REQ,
1742                 bfa_fn_lpu(lps->bfa));
1743
1744         m->bfa_tag      = lps->bfa_tag;
1745         m->alpa         = lps->alpa;
1746         m->pdu_size     = cpu_to_be16(lps->pdusz);
1747         m->pwwn         = lps->pwwn;
1748         m->nwwn         = lps->nwwn;
1749         m->fdisc        = lps->fdisc;
1750         m->auth_en      = lps->auth_en;
1751
1752         bfa_reqq_produce(lps->bfa, lps->reqq, m->mh);
1753         list_del(&lps->qe);
1754         list_add_tail(&lps->qe, &mod->lps_login_q);
1755 }
1756
1757 /*
1758  * send logout request to firmware
1759  */
1760 static void
1761 bfa_lps_send_logout(struct bfa_lps_s *lps)
1762 {
1763         struct bfi_lps_logout_req_s *m;
1764
1765         m = bfa_reqq_next(lps->bfa, lps->reqq);
1766         WARN_ON(!m);
1767
1768         bfi_h2i_set(m->mh, BFI_MC_LPS, BFI_LPS_H2I_LOGOUT_REQ,
1769                 bfa_fn_lpu(lps->bfa));
1770
1771         m->fw_tag = lps->fw_tag;
1772         m->port_name = lps->pwwn;
1773         bfa_reqq_produce(lps->bfa, lps->reqq, m->mh);
1774 }
1775
1776 /*
1777  * send n2n pid set request to firmware
1778  */
1779 static void
1780 bfa_lps_send_set_n2n_pid(struct bfa_lps_s *lps)
1781 {
1782         struct bfi_lps_n2n_pid_req_s *m;
1783
1784         m = bfa_reqq_next(lps->bfa, lps->reqq);
1785         WARN_ON(!m);
1786
1787         bfi_h2i_set(m->mh, BFI_MC_LPS, BFI_LPS_H2I_N2N_PID_REQ,
1788                 bfa_fn_lpu(lps->bfa));
1789
1790         m->fw_tag = lps->fw_tag;
1791         m->lp_pid = lps->lp_pid;
1792         bfa_reqq_produce(lps->bfa, lps->reqq, m->mh);
1793 }
1794
1795 /*
1796  * Indirect login completion handler for non-fcs
1797  */
1798 static void
1799 bfa_lps_login_comp_cb(void *arg, bfa_boolean_t complete)
1800 {
1801         struct bfa_lps_s *lps   = arg;
1802
1803         if (!complete)
1804                 return;
1805
1806         if (lps->fdisc)
1807                 bfa_cb_lps_fdisc_comp(lps->bfa->bfad, lps->uarg, lps->status);
1808         else
1809                 bfa_cb_lps_flogi_comp(lps->bfa->bfad, lps->uarg, lps->status);
1810 }
1811
1812 /*
1813  * Login completion handler -- direct call for fcs, queue for others
1814  */
1815 static void
1816 bfa_lps_login_comp(struct bfa_lps_s *lps)
1817 {
1818         if (!lps->bfa->fcs) {
1819                 bfa_cb_queue(lps->bfa, &lps->hcb_qe, bfa_lps_login_comp_cb,
1820                         lps);
1821                 return;
1822         }
1823
1824         if (lps->fdisc)
1825                 bfa_cb_lps_fdisc_comp(lps->bfa->bfad, lps->uarg, lps->status);
1826         else
1827                 bfa_cb_lps_flogi_comp(lps->bfa->bfad, lps->uarg, lps->status);
1828 }
1829
1830 /*
1831  * Indirect logout completion handler for non-fcs
1832  */
1833 static void
1834 bfa_lps_logout_comp_cb(void *arg, bfa_boolean_t complete)
1835 {
1836         struct bfa_lps_s *lps   = arg;
1837
1838         if (!complete)
1839                 return;
1840
1841         if (lps->fdisc)
1842                 bfa_cb_lps_fdisclogo_comp(lps->bfa->bfad, lps->uarg);
1843         else
1844                 bfa_cb_lps_flogo_comp(lps->bfa->bfad, lps->uarg);
1845 }
1846
1847 /*
1848  * Logout completion handler -- direct call for fcs, queue for others
1849  */
1850 static void
1851 bfa_lps_logout_comp(struct bfa_lps_s *lps)
1852 {
1853         if (!lps->bfa->fcs) {
1854                 bfa_cb_queue(lps->bfa, &lps->hcb_qe, bfa_lps_logout_comp_cb,
1855                         lps);
1856                 return;
1857         }
1858         if (lps->fdisc)
1859                 bfa_cb_lps_fdisclogo_comp(lps->bfa->bfad, lps->uarg);
1860 }
1861
1862 /*
1863  * Clear virtual link completion handler for non-fcs
1864  */
1865 static void
1866 bfa_lps_cvl_event_cb(void *arg, bfa_boolean_t complete)
1867 {
1868         struct bfa_lps_s *lps   = arg;
1869
1870         if (!complete)
1871                 return;
1872
1873         /* Clear virtual link to base port will result in link down */
1874         if (lps->fdisc)
1875                 bfa_cb_lps_cvl_event(lps->bfa->bfad, lps->uarg);
1876 }
1877
1878 /*
1879  * Received Clear virtual link event --direct call for fcs,
1880  * queue for others
1881  */
1882 static void
1883 bfa_lps_cvl_event(struct bfa_lps_s *lps)
1884 {
1885         if (!lps->bfa->fcs) {
1886                 bfa_cb_queue(lps->bfa, &lps->hcb_qe, bfa_lps_cvl_event_cb,
1887                         lps);
1888                 return;
1889         }
1890
1891         /* Clear virtual link to base port will result in link down */
1892         if (lps->fdisc)
1893                 bfa_cb_lps_cvl_event(lps->bfa->bfad, lps->uarg);
1894 }
1895
1896
1897
1898 /*
1899  *  lps_public BFA LPS public functions
1900  */
1901
1902 u32
1903 bfa_lps_get_max_vport(struct bfa_s *bfa)
1904 {
1905         if (bfa_ioc_devid(&bfa->ioc) == BFA_PCI_DEVICE_ID_CT)
1906                 return BFA_LPS_MAX_VPORTS_SUPP_CT;
1907         else
1908                 return BFA_LPS_MAX_VPORTS_SUPP_CB;
1909 }
1910
1911 /*
1912  * Allocate a lport srvice tag.
1913  */
1914 struct bfa_lps_s  *
1915 bfa_lps_alloc(struct bfa_s *bfa)
1916 {
1917         struct bfa_lps_mod_s    *mod = BFA_LPS_MOD(bfa);
1918         struct bfa_lps_s        *lps = NULL;
1919
1920         bfa_q_deq(&mod->lps_free_q, &lps);
1921
1922         if (lps == NULL)
1923                 return NULL;
1924
1925         list_add_tail(&lps->qe, &mod->lps_active_q);
1926
1927         bfa_sm_set_state(lps, bfa_lps_sm_init);
1928         return lps;
1929 }
1930
1931 /*
1932  * Free lport service tag. This can be called anytime after an alloc.
1933  * No need to wait for any pending login/logout completions.
1934  */
1935 void
1936 bfa_lps_delete(struct bfa_lps_s *lps)
1937 {
1938         bfa_sm_send_event(lps, BFA_LPS_SM_DELETE);
1939 }
1940
1941 /*
1942  * Initiate a lport login.
1943  */
1944 void
1945 bfa_lps_flogi(struct bfa_lps_s *lps, void *uarg, u8 alpa, u16 pdusz,
1946         wwn_t pwwn, wwn_t nwwn, bfa_boolean_t auth_en)
1947 {
1948         lps->uarg       = uarg;
1949         lps->alpa       = alpa;
1950         lps->pdusz      = pdusz;
1951         lps->pwwn       = pwwn;
1952         lps->nwwn       = nwwn;
1953         lps->fdisc      = BFA_FALSE;
1954         lps->auth_en    = auth_en;
1955         bfa_sm_send_event(lps, BFA_LPS_SM_LOGIN);
1956 }
1957
1958 /*
1959  * Initiate a lport fdisc login.
1960  */
1961 void
1962 bfa_lps_fdisc(struct bfa_lps_s *lps, void *uarg, u16 pdusz, wwn_t pwwn,
1963         wwn_t nwwn)
1964 {
1965         lps->uarg       = uarg;
1966         lps->alpa       = 0;
1967         lps->pdusz      = pdusz;
1968         lps->pwwn       = pwwn;
1969         lps->nwwn       = nwwn;
1970         lps->fdisc      = BFA_TRUE;
1971         lps->auth_en    = BFA_FALSE;
1972         bfa_sm_send_event(lps, BFA_LPS_SM_LOGIN);
1973 }
1974
1975
1976 /*
1977  * Initiate a lport FDSIC logout.
1978  */
1979 void
1980 bfa_lps_fdisclogo(struct bfa_lps_s *lps)
1981 {
1982         bfa_sm_send_event(lps, BFA_LPS_SM_LOGOUT);
1983 }
1984
1985 u8
1986 bfa_lps_get_fwtag(struct bfa_s *bfa, u8 lp_tag)
1987 {
1988         struct bfa_lps_mod_s    *mod = BFA_LPS_MOD(bfa);
1989
1990         return BFA_LPS_FROM_TAG(mod, lp_tag)->fw_tag;
1991 }
1992
1993 /*
1994  * Return lport services tag given the pid
1995  */
1996 u8
1997 bfa_lps_get_tag_from_pid(struct bfa_s *bfa, u32 pid)
1998 {
1999         struct bfa_lps_mod_s    *mod = BFA_LPS_MOD(bfa);
2000         struct bfa_lps_s        *lps;
2001         int                     i;
2002
2003         for (i = 0, lps = mod->lps_arr; i < mod->num_lps; i++, lps++) {
2004                 if (lps->lp_pid == pid)
2005                         return lps->bfa_tag;
2006         }
2007
2008         /* Return base port tag anyway */
2009         return 0;
2010 }
2011
2012
2013 /*
2014  * return port id assigned to the base lport
2015  */
2016 u32
2017 bfa_lps_get_base_pid(struct bfa_s *bfa)
2018 {
2019         struct bfa_lps_mod_s    *mod = BFA_LPS_MOD(bfa);
2020
2021         return BFA_LPS_FROM_TAG(mod, 0)->lp_pid;
2022 }
2023
2024 /*
2025  * Set PID in case of n2n (which is assigned during PLOGI)
2026  */
2027 void
2028 bfa_lps_set_n2n_pid(struct bfa_lps_s *lps, uint32_t n2n_pid)
2029 {
2030         bfa_trc(lps->bfa, lps->bfa_tag);
2031         bfa_trc(lps->bfa, n2n_pid);
2032
2033         lps->lp_pid = n2n_pid;
2034         bfa_sm_send_event(lps, BFA_LPS_SM_SET_N2N_PID);
2035 }
2036
2037 /*
2038  * LPS firmware message class handler.
2039  */
2040 void
2041 bfa_lps_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
2042 {
2043         union bfi_lps_i2h_msg_u msg;
2044
2045         bfa_trc(bfa, m->mhdr.msg_id);
2046         msg.msg = m;
2047
2048         switch (m->mhdr.msg_id) {
2049         case BFI_LPS_I2H_LOGIN_RSP:
2050                 bfa_lps_login_rsp(bfa, msg.login_rsp);
2051                 break;
2052
2053         case BFI_LPS_I2H_LOGOUT_RSP:
2054                 bfa_lps_logout_rsp(bfa, msg.logout_rsp);
2055                 break;
2056
2057         case BFI_LPS_I2H_CVL_EVENT:
2058                 bfa_lps_rx_cvl_event(bfa, msg.cvl_event);
2059                 break;
2060
2061         default:
2062                 bfa_trc(bfa, m->mhdr.msg_id);
2063                 WARN_ON(1);
2064         }
2065 }
2066
2067 static void
2068 bfa_fcport_aen_post(struct bfa_fcport_s *fcport, enum bfa_port_aen_event event)
2069 {
2070         struct bfad_s *bfad = (struct bfad_s *)fcport->bfa->bfad;
2071         struct bfa_aen_entry_s  *aen_entry;
2072
2073         bfad_get_aen_entry(bfad, aen_entry);
2074         if (!aen_entry)
2075                 return;
2076
2077         aen_entry->aen_data.port.ioc_type = bfa_get_type(fcport->bfa);
2078         aen_entry->aen_data.port.pwwn = fcport->pwwn;
2079
2080         /* Send the AEN notification */
2081         bfad_im_post_vendor_event(aen_entry, bfad, ++fcport->bfa->bfa_aen_seq,
2082                                   BFA_AEN_CAT_PORT, event);
2083 }
2084
2085 /*
2086  * FC PORT state machine functions
2087  */
2088 static void
2089 bfa_fcport_sm_uninit(struct bfa_fcport_s *fcport,
2090                         enum bfa_fcport_sm_event event)
2091 {
2092         bfa_trc(fcport->bfa, event);
2093
2094         switch (event) {
2095         case BFA_FCPORT_SM_START:
2096                 /*
2097                  * Start event after IOC is configured and BFA is started.
2098                  */
2099                 fcport->use_flash_cfg = BFA_TRUE;
2100
2101                 if (bfa_fcport_send_enable(fcport)) {
2102                         bfa_trc(fcport->bfa, BFA_TRUE);
2103                         bfa_sm_set_state(fcport, bfa_fcport_sm_enabling);
2104                 } else {
2105                         bfa_trc(fcport->bfa, BFA_FALSE);
2106                         bfa_sm_set_state(fcport,
2107                                         bfa_fcport_sm_enabling_qwait);
2108                 }
2109                 break;
2110
2111         case BFA_FCPORT_SM_ENABLE:
2112                 /*
2113                  * Port is persistently configured to be in enabled state. Do
2114                  * not change state. Port enabling is done when START event is
2115                  * received.
2116                  */
2117                 break;
2118
2119         case BFA_FCPORT_SM_DISABLE:
2120                 /*
2121                  * If a port is persistently configured to be disabled, the
2122                  * first event will a port disable request.
2123                  */
2124                 bfa_sm_set_state(fcport, bfa_fcport_sm_disabled);
2125                 break;
2126
2127         case BFA_FCPORT_SM_HWFAIL:
2128                 bfa_sm_set_state(fcport, bfa_fcport_sm_iocdown);
2129                 break;
2130
2131         default:
2132                 bfa_sm_fault(fcport->bfa, event);
2133         }
2134 }
2135
2136 static void
2137 bfa_fcport_sm_enabling_qwait(struct bfa_fcport_s *fcport,
2138                                 enum bfa_fcport_sm_event event)
2139 {
2140         char pwwn_buf[BFA_STRING_32];
2141         struct bfad_s *bfad = (struct bfad_s *)fcport->bfa->bfad;
2142         bfa_trc(fcport->bfa, event);
2143
2144         switch (event) {
2145         case BFA_FCPORT_SM_QRESUME:
2146                 bfa_sm_set_state(fcport, bfa_fcport_sm_enabling);
2147                 bfa_fcport_send_enable(fcport);
2148                 break;
2149
2150         case BFA_FCPORT_SM_STOP:
2151                 bfa_reqq_wcancel(&fcport->reqq_wait);
2152                 bfa_sm_set_state(fcport, bfa_fcport_sm_stopped);
2153                 break;
2154
2155         case BFA_FCPORT_SM_ENABLE:
2156                 /*
2157                  * Already enable is in progress.
2158                  */
2159                 break;
2160
2161         case BFA_FCPORT_SM_DISABLE:
2162                 /*
2163                  * Just send disable request to firmware when room becomes
2164                  * available in request queue.
2165                  */
2166                 bfa_sm_set_state(fcport, bfa_fcport_sm_disabled);
2167                 bfa_reqq_wcancel(&fcport->reqq_wait);
2168                 bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
2169                                 BFA_PL_EID_PORT_DISABLE, 0, "Port Disable");
2170                 wwn2str(pwwn_buf, fcport->pwwn);
2171                 BFA_LOG(KERN_INFO, bfad, bfa_log_level,
2172                         "Base port disabled: WWN = %s\n", pwwn_buf);
2173                 bfa_fcport_aen_post(fcport, BFA_PORT_AEN_DISABLE);
2174                 break;
2175
2176         case BFA_FCPORT_SM_LINKUP:
2177         case BFA_FCPORT_SM_LINKDOWN:
2178                 /*
2179                  * Possible to get link events when doing back-to-back
2180                  * enable/disables.
2181                  */
2182                 break;
2183
2184         case BFA_FCPORT_SM_HWFAIL:
2185                 bfa_reqq_wcancel(&fcport->reqq_wait);
2186                 bfa_sm_set_state(fcport, bfa_fcport_sm_iocdown);
2187                 break;
2188
2189         case BFA_FCPORT_SM_FAA_MISCONFIG:
2190                 bfa_fcport_reset_linkinfo(fcport);
2191                 bfa_fcport_aen_post(fcport, BFA_PORT_AEN_DISCONNECT);
2192                 bfa_sm_set_state(fcport, bfa_fcport_sm_faa_misconfig);
2193                 break;
2194
2195         default:
2196                 bfa_sm_fault(fcport->bfa, event);
2197         }
2198 }
2199
2200 static void
2201 bfa_fcport_sm_enabling(struct bfa_fcport_s *fcport,
2202                                                 enum bfa_fcport_sm_event event)
2203 {
2204         char pwwn_buf[BFA_STRING_32];
2205         struct bfad_s *bfad = (struct bfad_s *)fcport->bfa->bfad;
2206         bfa_trc(fcport->bfa, event);
2207
2208         switch (event) {
2209         case BFA_FCPORT_SM_FWRSP:
2210         case BFA_FCPORT_SM_LINKDOWN:
2211                 bfa_sm_set_state(fcport, bfa_fcport_sm_linkdown);
2212                 break;
2213
2214         case BFA_FCPORT_SM_LINKUP:
2215                 bfa_fcport_update_linkinfo(fcport);
2216                 bfa_sm_set_state(fcport, bfa_fcport_sm_linkup);
2217
2218                 WARN_ON(!fcport->event_cbfn);
2219                 bfa_fcport_scn(fcport, BFA_PORT_LINKUP, BFA_FALSE);
2220                 break;
2221
2222         case BFA_FCPORT_SM_ENABLE:
2223                 /*
2224                  * Already being enabled.
2225                  */
2226                 break;
2227
2228         case BFA_FCPORT_SM_DISABLE:
2229                 if (bfa_fcport_send_disable(fcport))
2230                         bfa_sm_set_state(fcport, bfa_fcport_sm_disabling);
2231                 else
2232                         bfa_sm_set_state(fcport,
2233                                          bfa_fcport_sm_disabling_qwait);
2234
2235                 bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
2236                                 BFA_PL_EID_PORT_DISABLE, 0, "Port Disable");
2237                 wwn2str(pwwn_buf, fcport->pwwn);
2238                 BFA_LOG(KERN_INFO, bfad, bfa_log_level,
2239                         "Base port disabled: WWN = %s\n", pwwn_buf);
2240                 bfa_fcport_aen_post(fcport, BFA_PORT_AEN_DISABLE);
2241                 break;
2242
2243         case BFA_FCPORT_SM_STOP:
2244                 bfa_sm_set_state(fcport, bfa_fcport_sm_stopped);
2245                 break;
2246
2247         case BFA_FCPORT_SM_HWFAIL:
2248                 bfa_sm_set_state(fcport, bfa_fcport_sm_iocdown);
2249                 break;
2250
2251         case BFA_FCPORT_SM_FAA_MISCONFIG:
2252                 bfa_fcport_reset_linkinfo(fcport);
2253                 bfa_fcport_aen_post(fcport, BFA_PORT_AEN_DISCONNECT);
2254                 bfa_sm_set_state(fcport, bfa_fcport_sm_faa_misconfig);
2255                 break;
2256
2257         default:
2258                 bfa_sm_fault(fcport->bfa, event);
2259         }
2260 }
2261
2262 static void
2263 bfa_fcport_sm_linkdown(struct bfa_fcport_s *fcport,
2264                                                 enum bfa_fcport_sm_event event)
2265 {
2266         struct bfi_fcport_event_s *pevent = fcport->event_arg.i2hmsg.event;
2267         char pwwn_buf[BFA_STRING_32];
2268         struct bfad_s *bfad = (struct bfad_s *)fcport->bfa->bfad;
2269
2270         bfa_trc(fcport->bfa, event);
2271
2272         switch (event) {
2273         case BFA_FCPORT_SM_LINKUP:
2274                 bfa_fcport_update_linkinfo(fcport);
2275                 bfa_sm_set_state(fcport, bfa_fcport_sm_linkup);
2276                 WARN_ON(!fcport->event_cbfn);
2277                 bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
2278                                 BFA_PL_EID_PORT_ST_CHANGE, 0, "Port Linkup");
2279                 if (!bfa_ioc_get_fcmode(&fcport->bfa->ioc)) {
2280
2281                         bfa_trc(fcport->bfa,
2282                                 pevent->link_state.attr.vc_fcf.fcf.fipenabled);
2283                         bfa_trc(fcport->bfa,
2284                                 pevent->link_state.attr.vc_fcf.fcf.fipfailed);
2285
2286                         if (pevent->link_state.attr.vc_fcf.fcf.fipfailed)
2287                                 bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
2288                                         BFA_PL_EID_FIP_FCF_DISC, 0,
2289                                         "FIP FCF Discovery Failed");
2290                         else
2291                                 bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
2292                                         BFA_PL_EID_FIP_FCF_DISC, 0,
2293                                         "FIP FCF Discovered");
2294                 }
2295
2296                 bfa_fcport_scn(fcport, BFA_PORT_LINKUP, BFA_FALSE);
2297                 wwn2str(pwwn_buf, fcport->pwwn);
2298                 BFA_LOG(KERN_INFO, bfad, bfa_log_level,
2299                         "Base port online: WWN = %s\n", pwwn_buf);
2300                 bfa_fcport_aen_post(fcport, BFA_PORT_AEN_ONLINE);
2301
2302                 /* If QoS is enabled and it is not online, send AEN */
2303                 if (fcport->cfg.qos_enabled &&
2304                     fcport->qos_attr.state != BFA_QOS_ONLINE)
2305                         bfa_fcport_aen_post(fcport, BFA_PORT_AEN_QOS_NEG);
2306                 break;
2307
2308         case BFA_FCPORT_SM_LINKDOWN:
2309                 /*
2310                  * Possible to get link down event.
2311                  */
2312                 break;
2313
2314         case BFA_FCPORT_SM_ENABLE:
2315                 /*
2316                  * Already enabled.
2317                  */
2318                 break;
2319
2320         case BFA_FCPORT_SM_DISABLE:
2321                 if (bfa_fcport_send_disable(fcport))
2322                         bfa_sm_set_state(fcport, bfa_fcport_sm_disabling);
2323                 else
2324                         bfa_sm_set_state(fcport,
2325                                          bfa_fcport_sm_disabling_qwait);
2326
2327                 bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
2328                                 BFA_PL_EID_PORT_DISABLE, 0, "Port Disable");
2329                 wwn2str(pwwn_buf, fcport->pwwn);
2330                 BFA_LOG(KERN_INFO, bfad, bfa_log_level,
2331                         "Base port disabled: WWN = %s\n", pwwn_buf);
2332                 bfa_fcport_aen_post(fcport, BFA_PORT_AEN_DISABLE);
2333                 break;
2334
2335         case BFA_FCPORT_SM_STOP:
2336                 bfa_sm_set_state(fcport, bfa_fcport_sm_stopped);
2337                 break;
2338
2339         case BFA_FCPORT_SM_HWFAIL:
2340                 bfa_sm_set_state(fcport, bfa_fcport_sm_iocdown);
2341                 break;
2342
2343         case BFA_FCPORT_SM_FAA_MISCONFIG:
2344                 bfa_fcport_reset_linkinfo(fcport);
2345                 bfa_fcport_aen_post(fcport, BFA_PORT_AEN_DISCONNECT);
2346                 bfa_sm_set_state(fcport, bfa_fcport_sm_faa_misconfig);
2347                 break;
2348
2349         default:
2350                 bfa_sm_fault(fcport->bfa, event);
2351         }
2352 }
2353
2354 static void
2355 bfa_fcport_sm_linkup(struct bfa_fcport_s *fcport,
2356         enum bfa_fcport_sm_event event)
2357 {
2358         char pwwn_buf[BFA_STRING_32];
2359         struct bfad_s *bfad = (struct bfad_s *)fcport->bfa->bfad;
2360
2361         bfa_trc(fcport->bfa, event);
2362
2363         switch (event) {
2364         case BFA_FCPORT_SM_ENABLE:
2365                 /*
2366                  * Already enabled.
2367                  */
2368                 break;
2369
2370         case BFA_FCPORT_SM_DISABLE:
2371                 if (bfa_fcport_send_disable(fcport))
2372                         bfa_sm_set_state(fcport, bfa_fcport_sm_disabling);
2373                 else
2374                         bfa_sm_set_state(fcport,
2375                                          bfa_fcport_sm_disabling_qwait);
2376
2377                 bfa_fcport_reset_linkinfo(fcport);
2378                 bfa_fcport_scn(fcport, BFA_PORT_LINKDOWN, BFA_FALSE);
2379                 bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
2380                                 BFA_PL_EID_PORT_DISABLE, 0, "Port Disable");
2381                 wwn2str(pwwn_buf, fcport->pwwn);
2382                 BFA_LOG(KERN_INFO, bfad, bfa_log_level,
2383                         "Base port offline: WWN = %s\n", pwwn_buf);
2384                 bfa_fcport_aen_post(fcport, BFA_PORT_AEN_OFFLINE);
2385                 BFA_LOG(KERN_INFO, bfad, bfa_log_level,
2386                         "Base port disabled: WWN = %s\n", pwwn_buf);
2387                 bfa_fcport_aen_post(fcport, BFA_PORT_AEN_DISABLE);
2388                 break;
2389
2390         case BFA_FCPORT_SM_LINKDOWN:
2391                 bfa_sm_set_state(fcport, bfa_fcport_sm_linkdown);
2392                 bfa_fcport_reset_linkinfo(fcport);
2393                 bfa_fcport_scn(fcport, BFA_PORT_LINKDOWN, BFA_FALSE);
2394                 bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
2395                                 BFA_PL_EID_PORT_ST_CHANGE, 0, "Port Linkdown");
2396                 wwn2str(pwwn_buf, fcport->pwwn);
2397                 if (BFA_PORT_IS_DISABLED(fcport->bfa)) {
2398                         BFA_LOG(KERN_INFO, bfad, bfa_log_level,
2399                                 "Base port offline: WWN = %s\n", pwwn_buf);
2400                         bfa_fcport_aen_post(fcport, BFA_PORT_AEN_OFFLINE);
2401                 } else {
2402                         BFA_LOG(KERN_ERR, bfad, bfa_log_level,
2403                                 "Base port (WWN = %s) "
2404                                 "lost fabric connectivity\n", pwwn_buf);
2405                         bfa_fcport_aen_post(fcport, BFA_PORT_AEN_DISCONNECT);
2406                 }
2407                 break;
2408
2409         case BFA_FCPORT_SM_STOP:
2410                 bfa_sm_set_state(fcport, bfa_fcport_sm_stopped);
2411                 bfa_fcport_reset_linkinfo(fcport);
2412                 wwn2str(pwwn_buf, fcport->pwwn);
2413                 if (BFA_PORT_IS_DISABLED(fcport->bfa)) {
2414                         BFA_LOG(KERN_INFO, bfad, bfa_log_level,
2415                                 "Base port offline: WWN = %s\n", pwwn_buf);
2416                         bfa_fcport_aen_post(fcport, BFA_PORT_AEN_OFFLINE);
2417                 } else {
2418                         BFA_LOG(KERN_ERR, bfad, bfa_log_level,
2419                                 "Base port (WWN = %s) "
2420                                 "lost fabric connectivity\n", pwwn_buf);
2421                         bfa_fcport_aen_post(fcport, BFA_PORT_AEN_DISCONNECT);
2422                 }
2423                 break;
2424
2425         case BFA_FCPORT_SM_HWFAIL:
2426                 bfa_sm_set_state(fcport, bfa_fcport_sm_iocdown);
2427                 bfa_fcport_reset_linkinfo(fcport);
2428                 bfa_fcport_scn(fcport, BFA_PORT_LINKDOWN, BFA_FALSE);
2429                 wwn2str(pwwn_buf, fcport->pwwn);
2430                 if (BFA_PORT_IS_DISABLED(fcport->bfa)) {
2431                         BFA_LOG(KERN_INFO, bfad, bfa_log_level,
2432                                 "Base port offline: WWN = %s\n", pwwn_buf);
2433                         bfa_fcport_aen_post(fcport, BFA_PORT_AEN_OFFLINE);
2434                 } else {
2435                         BFA_LOG(KERN_ERR, bfad, bfa_log_level,
2436                                 "Base port (WWN = %s) "
2437                                 "lost fabric connectivity\n", pwwn_buf);
2438                         bfa_fcport_aen_post(fcport, BFA_PORT_AEN_DISCONNECT);
2439                 }
2440                 break;
2441
2442         case BFA_FCPORT_SM_FAA_MISCONFIG:
2443                 bfa_fcport_reset_linkinfo(fcport);
2444                 bfa_fcport_aen_post(fcport, BFA_PORT_AEN_DISCONNECT);
2445                 bfa_sm_set_state(fcport, bfa_fcport_sm_faa_misconfig);
2446                 break;
2447
2448         default:
2449                 bfa_sm_fault(fcport->bfa, event);
2450         }
2451 }
2452
2453 static void
2454 bfa_fcport_sm_disabling_qwait(struct bfa_fcport_s *fcport,
2455                                  enum bfa_fcport_sm_event event)
2456 {
2457         bfa_trc(fcport->bfa, event);
2458
2459         switch (event) {
2460         case BFA_FCPORT_SM_QRESUME:
2461                 bfa_sm_set_state(fcport, bfa_fcport_sm_disabling);
2462                 bfa_fcport_send_disable(fcport);
2463                 break;
2464
2465         case BFA_FCPORT_SM_STOP:
2466                 bfa_sm_set_state(fcport, bfa_fcport_sm_stopped);
2467                 bfa_reqq_wcancel(&fcport->reqq_wait);
2468                 break;
2469
2470         case BFA_FCPORT_SM_ENABLE:
2471                 bfa_sm_set_state(fcport, bfa_fcport_sm_toggling_qwait);
2472                 break;
2473
2474         case BFA_FCPORT_SM_DISABLE:
2475                 /*
2476                  * Already being disabled.
2477                  */
2478                 break;
2479
2480         case BFA_FCPORT_SM_LINKUP:
2481         case BFA_FCPORT_SM_LINKDOWN:
2482                 /*
2483                  * Possible to get link events when doing back-to-back
2484                  * enable/disables.
2485                  */
2486                 break;
2487
2488         case BFA_FCPORT_SM_HWFAIL:
2489                 bfa_sm_set_state(fcport, bfa_fcport_sm_iocfail);
2490                 bfa_reqq_wcancel(&fcport->reqq_wait);
2491                 break;
2492
2493         case BFA_FCPORT_SM_FAA_MISCONFIG:
2494                 bfa_fcport_reset_linkinfo(fcport);
2495                 bfa_fcport_aen_post(fcport, BFA_PORT_AEN_DISCONNECT);
2496                 bfa_sm_set_state(fcport, bfa_fcport_sm_faa_misconfig);
2497                 break;
2498
2499         default:
2500                 bfa_sm_fault(fcport->bfa, event);
2501         }
2502 }
2503
2504 static void
2505 bfa_fcport_sm_toggling_qwait(struct bfa_fcport_s *fcport,
2506                                  enum bfa_fcport_sm_event event)
2507 {
2508         bfa_trc(fcport->bfa, event);
2509
2510         switch (event) {
2511         case BFA_FCPORT_SM_QRESUME:
2512                 bfa_sm_set_state(fcport, bfa_fcport_sm_disabling);
2513                 bfa_fcport_send_disable(fcport);
2514                 if (bfa_fcport_send_enable(fcport))
2515                         bfa_sm_set_state(fcport, bfa_fcport_sm_enabling);
2516                 else
2517                         bfa_sm_set_state(fcport,
2518                                          bfa_fcport_sm_enabling_qwait);
2519                 break;
2520
2521         case BFA_FCPORT_SM_STOP:
2522                 bfa_sm_set_state(fcport, bfa_fcport_sm_stopped);
2523                 bfa_reqq_wcancel(&fcport->reqq_wait);
2524                 break;
2525
2526         case BFA_FCPORT_SM_ENABLE:
2527                 break;
2528
2529         case BFA_FCPORT_SM_DISABLE:
2530                 bfa_sm_set_state(fcport, bfa_fcport_sm_disabling_qwait);
2531                 break;
2532
2533         case BFA_FCPORT_SM_LINKUP:
2534         case BFA_FCPORT_SM_LINKDOWN:
2535                 /*
2536                  * Possible to get link events when doing back-to-back
2537                  * enable/disables.
2538                  */
2539                 break;
2540
2541         case BFA_FCPORT_SM_HWFAIL:
2542                 bfa_sm_set_state(fcport, bfa_fcport_sm_iocfail);
2543                 bfa_reqq_wcancel(&fcport->reqq_wait);
2544                 break;
2545
2546         default:
2547                 bfa_sm_fault(fcport->bfa, event);
2548         }
2549 }
2550
2551 static void
2552 bfa_fcport_sm_disabling(struct bfa_fcport_s *fcport,
2553                                                 enum bfa_fcport_sm_event event)
2554 {
2555         char pwwn_buf[BFA_STRING_32];
2556         struct bfad_s *bfad = (struct bfad_s *)fcport->bfa->bfad;
2557         bfa_trc(fcport->bfa, event);
2558
2559         switch (event) {
2560         case BFA_FCPORT_SM_FWRSP:
2561                 bfa_sm_set_state(fcport, bfa_fcport_sm_disabled);
2562                 break;
2563
2564         case BFA_FCPORT_SM_DISABLE:
2565                 /*
2566                  * Already being disabled.
2567                  */
2568                 break;
2569
2570         case BFA_FCPORT_SM_ENABLE:
2571                 if (bfa_fcport_send_enable(fcport))
2572                         bfa_sm_set_state(fcport, bfa_fcport_sm_enabling);
2573                 else
2574                         bfa_sm_set_state(fcport,
2575                                          bfa_fcport_sm_enabling_qwait);
2576
2577                 bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
2578                                 BFA_PL_EID_PORT_ENABLE, 0, "Port Enable");
2579                 wwn2str(pwwn_buf, fcport->pwwn);
2580                 BFA_LOG(KERN_INFO, bfad, bfa_log_level,
2581                         "Base port enabled: WWN = %s\n", pwwn_buf);
2582                 bfa_fcport_aen_post(fcport, BFA_PORT_AEN_ENABLE);
2583                 break;
2584
2585         case BFA_FCPORT_SM_STOP:
2586                 bfa_sm_set_state(fcport, bfa_fcport_sm_stopped);
2587                 break;
2588
2589         case BFA_FCPORT_SM_LINKUP:
2590         case BFA_FCPORT_SM_LINKDOWN:
2591                 /*
2592                  * Possible to get link events when doing back-to-back
2593                  * enable/disables.
2594                  */
2595                 break;
2596
2597         case BFA_FCPORT_SM_HWFAIL:
2598                 bfa_sm_set_state(fcport, bfa_fcport_sm_iocfail);
2599                 break;
2600
2601         default:
2602                 bfa_sm_fault(fcport->bfa, event);
2603         }
2604 }
2605
2606 static void
2607 bfa_fcport_sm_disabled(struct bfa_fcport_s *fcport,
2608                                                 enum bfa_fcport_sm_event event)
2609 {
2610         char pwwn_buf[BFA_STRING_32];
2611         struct bfad_s *bfad = (struct bfad_s *)fcport->bfa->bfad;
2612         bfa_trc(fcport->bfa, event);
2613
2614         switch (event) {
2615         case BFA_FCPORT_SM_START:
2616                 /*
2617                  * Ignore start event for a port that is disabled.
2618                  */
2619                 break;
2620
2621         case BFA_FCPORT_SM_STOP:
2622                 bfa_sm_set_state(fcport, bfa_fcport_sm_stopped);
2623                 break;
2624
2625         case BFA_FCPORT_SM_ENABLE:
2626                 if (bfa_fcport_send_enable(fcport))
2627                         bfa_sm_set_state(fcport, bfa_fcport_sm_enabling);
2628                 else
2629                         bfa_sm_set_state(fcport,
2630                                          bfa_fcport_sm_enabling_qwait);
2631
2632                 bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
2633                                 BFA_PL_EID_PORT_ENABLE, 0, "Port Enable");
2634                 wwn2str(pwwn_buf, fcport->pwwn);
2635                 BFA_LOG(KERN_INFO, bfad, bfa_log_level,
2636                         "Base port enabled: WWN = %s\n", pwwn_buf);
2637                 bfa_fcport_aen_post(fcport, BFA_PORT_AEN_ENABLE);
2638                 break;
2639
2640         case BFA_FCPORT_SM_DISABLE:
2641                 /*
2642                  * Already disabled.
2643                  */
2644                 break;
2645
2646         case BFA_FCPORT_SM_HWFAIL:
2647                 bfa_sm_set_state(fcport, bfa_fcport_sm_iocfail);
2648                 break;
2649
2650         case BFA_FCPORT_SM_DPORTENABLE:
2651                 bfa_sm_set_state(fcport, bfa_fcport_sm_dport);
2652                 break;
2653
2654         case BFA_FCPORT_SM_DDPORTENABLE:
2655                 bfa_sm_set_state(fcport, bfa_fcport_sm_ddport);
2656                 break;
2657
2658         default:
2659                 bfa_sm_fault(fcport->bfa, event);
2660         }
2661 }
2662
2663 static void
2664 bfa_fcport_sm_stopped(struct bfa_fcport_s *fcport,
2665                          enum bfa_fcport_sm_event event)
2666 {
2667         bfa_trc(fcport->bfa, event);
2668
2669         switch (event) {
2670         case BFA_FCPORT_SM_START:
2671                 if (bfa_fcport_send_enable(fcport))
2672                         bfa_sm_set_state(fcport, bfa_fcport_sm_enabling);
2673                 else
2674                         bfa_sm_set_state(fcport,
2675                                          bfa_fcport_sm_enabling_qwait);
2676                 break;
2677
2678         default:
2679                 /*
2680                  * Ignore all other events.
2681                  */
2682                 ;
2683         }
2684 }
2685
2686 /*
2687  * Port is enabled. IOC is down/failed.
2688  */
2689 static void
2690 bfa_fcport_sm_iocdown(struct bfa_fcport_s *fcport,
2691                          enum bfa_fcport_sm_event event)
2692 {
2693         bfa_trc(fcport->bfa, event);
2694
2695         switch (event) {
2696         case BFA_FCPORT_SM_START:
2697                 if (bfa_fcport_send_enable(fcport))
2698                         bfa_sm_set_state(fcport, bfa_fcport_sm_enabling);
2699                 else
2700                         bfa_sm_set_state(fcport,
2701                                          bfa_fcport_sm_enabling_qwait);
2702                 break;
2703
2704         default:
2705                 /*
2706                  * Ignore all events.
2707                  */
2708                 ;
2709         }
2710 }
2711
2712 /*
2713  * Port is disabled. IOC is down/failed.
2714  */
2715 static void
2716 bfa_fcport_sm_iocfail(struct bfa_fcport_s *fcport,
2717                          enum bfa_fcport_sm_event event)
2718 {
2719         bfa_trc(fcport->bfa, event);
2720
2721         switch (event) {
2722         case BFA_FCPORT_SM_START:
2723                 bfa_sm_set_state(fcport, bfa_fcport_sm_disabled);
2724                 break;
2725
2726         case BFA_FCPORT_SM_ENABLE:
2727                 bfa_sm_set_state(fcport, bfa_fcport_sm_iocdown);
2728                 break;
2729
2730         default:
2731                 /*
2732                  * Ignore all events.
2733                  */
2734                 ;
2735         }
2736 }
2737
2738 static void
2739 bfa_fcport_sm_dport(struct bfa_fcport_s *fcport, enum bfa_fcport_sm_event event)
2740 {
2741         bfa_trc(fcport->bfa, event);
2742
2743         switch (event) {
2744         case BFA_FCPORT_SM_DPORTENABLE:
2745         case BFA_FCPORT_SM_DISABLE:
2746         case BFA_FCPORT_SM_ENABLE:
2747         case BFA_FCPORT_SM_START:
2748                 /*
2749                  * Ignore event for a port that is dport
2750                  */
2751                 break;
2752
2753         case BFA_FCPORT_SM_STOP:
2754                 bfa_sm_set_state(fcport, bfa_fcport_sm_stopped);
2755                 break;
2756
2757         case BFA_FCPORT_SM_HWFAIL:
2758                 bfa_sm_set_state(fcport, bfa_fcport_sm_iocfail);
2759                 break;
2760
2761         case BFA_FCPORT_SM_DPORTDISABLE:
2762                 bfa_sm_set_state(fcport, bfa_fcport_sm_disabled);
2763                 break;
2764
2765         default:
2766                 bfa_sm_fault(fcport->bfa, event);
2767         }
2768 }
2769
2770 static void
2771 bfa_fcport_sm_ddport(struct bfa_fcport_s *fcport,
2772                         enum bfa_fcport_sm_event event)
2773 {
2774         bfa_trc(fcport->bfa, event);
2775
2776         switch (event) {
2777         case BFA_FCPORT_SM_DISABLE:
2778         case BFA_FCPORT_SM_DDPORTDISABLE:
2779                 bfa_sm_set_state(fcport, bfa_fcport_sm_disabled);
2780                 break;
2781
2782         case BFA_FCPORT_SM_DPORTENABLE:
2783         case BFA_FCPORT_SM_DPORTDISABLE:
2784         case BFA_FCPORT_SM_ENABLE:
2785         case BFA_FCPORT_SM_START:
2786                 /**
2787                  * Ignore event for a port that is ddport
2788                  */
2789                 break;
2790
2791         case BFA_FCPORT_SM_STOP:
2792                 bfa_sm_set_state(fcport, bfa_fcport_sm_stopped);
2793                 break;
2794
2795         case BFA_FCPORT_SM_HWFAIL:
2796                 bfa_sm_set_state(fcport, bfa_fcport_sm_iocfail);
2797                 break;
2798
2799         default:
2800                 bfa_sm_fault(fcport->bfa, event);
2801         }
2802 }
2803
2804 static void
2805 bfa_fcport_sm_faa_misconfig(struct bfa_fcport_s *fcport,
2806                             enum bfa_fcport_sm_event event)
2807 {
2808         bfa_trc(fcport->bfa, event);
2809
2810         switch (event) {
2811         case BFA_FCPORT_SM_DPORTENABLE:
2812         case BFA_FCPORT_SM_ENABLE:
2813         case BFA_FCPORT_SM_START:
2814                 /*
2815                  * Ignore event for a port as there is FAA misconfig
2816                  */
2817                 break;
2818
2819         case BFA_FCPORT_SM_DISABLE:
2820                 if (bfa_fcport_send_disable(fcport))
2821                         bfa_sm_set_state(fcport, bfa_fcport_sm_disabling);
2822                 else
2823                         bfa_sm_set_state(fcport, bfa_fcport_sm_disabling_qwait);
2824
2825                 bfa_fcport_reset_linkinfo(fcport);
2826                 bfa_fcport_scn(fcport, BFA_PORT_LINKDOWN, BFA_FALSE);
2827                 bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
2828                              BFA_PL_EID_PORT_DISABLE, 0, "Port Disable");
2829                 bfa_fcport_aen_post(fcport, BFA_PORT_AEN_DISABLE);
2830                 break;
2831
2832         case BFA_FCPORT_SM_STOP:
2833                 bfa_sm_set_state(fcport, bfa_fcport_sm_stopped);
2834                 break;
2835
2836         case BFA_FCPORT_SM_HWFAIL:
2837                 bfa_fcport_reset_linkinfo(fcport);
2838                 bfa_fcport_scn(fcport, BFA_PORT_LINKDOWN, BFA_FALSE);
2839                 bfa_sm_set_state(fcport, bfa_fcport_sm_iocdown);
2840                 break;
2841
2842         default:
2843                 bfa_sm_fault(fcport->bfa, event);
2844         }
2845 }
2846
2847 /*
2848  * Link state is down
2849  */
2850 static void
2851 bfa_fcport_ln_sm_dn(struct bfa_fcport_ln_s *ln,
2852                 enum bfa_fcport_ln_sm_event event)
2853 {
2854         bfa_trc(ln->fcport->bfa, event);
2855
2856         switch (event) {
2857         case BFA_FCPORT_LN_SM_LINKUP:
2858                 bfa_sm_set_state(ln, bfa_fcport_ln_sm_up_nf);
2859                 bfa_fcport_queue_cb(ln, BFA_PORT_LINKUP);
2860                 break;
2861
2862         default:
2863                 bfa_sm_fault(ln->fcport->bfa, event);
2864         }
2865 }
2866
2867 /*
2868  * Link state is waiting for down notification
2869  */
2870 static void
2871 bfa_fcport_ln_sm_dn_nf(struct bfa_fcport_ln_s *ln,
2872                 enum bfa_fcport_ln_sm_event event)
2873 {
2874         bfa_trc(ln->fcport->bfa, event);
2875
2876         switch (event) {
2877         case BFA_FCPORT_LN_SM_LINKUP:
2878                 bfa_sm_set_state(ln, bfa_fcport_ln_sm_dn_up_nf);
2879                 break;
2880
2881         case BFA_FCPORT_LN_SM_NOTIFICATION:
2882                 bfa_sm_set_state(ln, bfa_fcport_ln_sm_dn);
2883                 break;
2884
2885         default:
2886                 bfa_sm_fault(ln->fcport->bfa, event);
2887         }
2888 }
2889
2890 /*
2891  * Link state is waiting for down notification and there is a pending up
2892  */
2893 static void
2894 bfa_fcport_ln_sm_dn_up_nf(struct bfa_fcport_ln_s *ln,
2895                 enum bfa_fcport_ln_sm_event event)
2896 {
2897         bfa_trc(ln->fcport->bfa, event);
2898
2899         switch (event) {
2900         case BFA_FCPORT_LN_SM_LINKDOWN:
2901                 bfa_sm_set_state(ln, bfa_fcport_ln_sm_dn_nf);
2902                 break;
2903
2904         case BFA_FCPORT_LN_SM_NOTIFICATION:
2905                 bfa_sm_set_state(ln, bfa_fcport_ln_sm_up_nf);
2906                 bfa_fcport_queue_cb(ln, BFA_PORT_LINKUP);
2907                 break;
2908
2909         default:
2910                 bfa_sm_fault(ln->fcport->bfa, event);
2911         }
2912 }
2913
2914 /*
2915  * Link state is up
2916  */
2917 static void
2918 bfa_fcport_ln_sm_up(struct bfa_fcport_ln_s *ln,
2919                 enum bfa_fcport_ln_sm_event event)
2920 {
2921         bfa_trc(ln->fcport->bfa, event);
2922
2923         switch (event) {
2924         case BFA_FCPORT_LN_SM_LINKDOWN:
2925                 bfa_sm_set_state(ln, bfa_fcport_ln_sm_dn_nf);
2926                 bfa_fcport_queue_cb(ln, BFA_PORT_LINKDOWN);
2927                 break;
2928
2929         default:
2930                 bfa_sm_fault(ln->fcport->bfa, event);
2931         }
2932 }
2933
2934 /*
2935  * Link state is waiting for up notification
2936  */
2937 static void
2938 bfa_fcport_ln_sm_up_nf(struct bfa_fcport_ln_s *ln,
2939                 enum bfa_fcport_ln_sm_event event)
2940 {
2941         bfa_trc(ln->fcport->bfa, event);
2942
2943         switch (event) {
2944         case BFA_FCPORT_LN_SM_LINKDOWN:
2945                 bfa_sm_set_state(ln, bfa_fcport_ln_sm_up_dn_nf);
2946                 break;
2947
2948         case BFA_FCPORT_LN_SM_NOTIFICATION:
2949                 bfa_sm_set_state(ln, bfa_fcport_ln_sm_up);
2950                 break;
2951
2952         default:
2953                 bfa_sm_fault(ln->fcport->bfa, event);
2954         }
2955 }
2956
2957 /*
2958  * Link state is waiting for up notification and there is a pending down
2959  */
2960 static void
2961 bfa_fcport_ln_sm_up_dn_nf(struct bfa_fcport_ln_s *ln,
2962                 enum bfa_fcport_ln_sm_event event)
2963 {
2964         bfa_trc(ln->fcport->bfa, event);
2965
2966         switch (event) {
2967         case BFA_FCPORT_LN_SM_LINKUP:
2968                 bfa_sm_set_state(ln, bfa_fcport_ln_sm_up_dn_up_nf);
2969                 break;
2970
2971         case BFA_FCPORT_LN_SM_NOTIFICATION:
2972                 bfa_sm_set_state(ln, bfa_fcport_ln_sm_dn_nf);
2973                 bfa_fcport_queue_cb(ln, BFA_PORT_LINKDOWN);
2974                 break;
2975
2976         default:
2977                 bfa_sm_fault(ln->fcport->bfa, event);
2978         }
2979 }
2980
2981 /*
2982  * Link state is waiting for up notification and there are pending down and up
2983  */
2984 static void
2985 bfa_fcport_ln_sm_up_dn_up_nf(struct bfa_fcport_ln_s *ln,
2986                         enum bfa_fcport_ln_sm_event event)
2987 {
2988         bfa_trc(ln->fcport->bfa, event);
2989
2990         switch (event) {
2991         case BFA_FCPORT_LN_SM_LINKDOWN:
2992                 bfa_sm_set_state(ln, bfa_fcport_ln_sm_up_dn_nf);
2993                 break;
2994
2995         case BFA_FCPORT_LN_SM_NOTIFICATION:
2996                 bfa_sm_set_state(ln, bfa_fcport_ln_sm_dn_up_nf);
2997                 bfa_fcport_queue_cb(ln, BFA_PORT_LINKDOWN);
2998                 break;
2999
3000         default:
3001                 bfa_sm_fault(ln->fcport->bfa, event);
3002         }
3003 }
3004
3005 static void
3006 __bfa_cb_fcport_event(void *cbarg, bfa_boolean_t complete)
3007 {
3008         struct bfa_fcport_ln_s *ln = cbarg;
3009
3010         if (complete)
3011                 ln->fcport->event_cbfn(ln->fcport->event_cbarg, ln->ln_event);
3012         else
3013                 bfa_sm_send_event(ln, BFA_FCPORT_LN_SM_NOTIFICATION);
3014 }
3015
3016 /*
3017  * Send SCN notification to upper layers.
3018  * trunk - false if caller is fcport to ignore fcport event in trunked mode
3019  */
3020 static void
3021 bfa_fcport_scn(struct bfa_fcport_s *fcport, enum bfa_port_linkstate event,
3022         bfa_boolean_t trunk)
3023 {
3024         if (fcport->cfg.trunked && !trunk)
3025                 return;
3026
3027         switch (event) {
3028         case BFA_PORT_LINKUP:
3029                 bfa_sm_send_event(&fcport->ln, BFA_FCPORT_LN_SM_LINKUP);
3030                 break;
3031         case BFA_PORT_LINKDOWN:
3032                 bfa_sm_send_event(&fcport->ln, BFA_FCPORT_LN_SM_LINKDOWN);
3033                 break;
3034         default:
3035                 WARN_ON(1);
3036         }
3037 }
3038
3039 static void
3040 bfa_fcport_queue_cb(struct bfa_fcport_ln_s *ln, enum bfa_port_linkstate event)
3041 {
3042         struct bfa_fcport_s *fcport = ln->fcport;
3043
3044         if (fcport->bfa->fcs) {
3045                 fcport->event_cbfn(fcport->event_cbarg, event);
3046                 bfa_sm_send_event(ln, BFA_FCPORT_LN_SM_NOTIFICATION);
3047         } else {
3048                 ln->ln_event = event;
3049                 bfa_cb_queue(fcport->bfa, &ln->ln_qe,
3050                         __bfa_cb_fcport_event, ln);
3051         }
3052 }
3053
3054 #define FCPORT_STATS_DMA_SZ (BFA_ROUNDUP(sizeof(union bfa_fcport_stats_u), \
3055                                                         BFA_CACHELINE_SZ))
3056
3057 static void
3058 bfa_fcport_meminfo(struct bfa_iocfc_cfg_s *cfg, struct bfa_meminfo_s *minfo,
3059                    struct bfa_s *bfa)
3060 {
3061         struct bfa_mem_dma_s *fcport_dma = BFA_MEM_FCPORT_DMA(bfa);
3062
3063         bfa_mem_dma_setup(minfo, fcport_dma, FCPORT_STATS_DMA_SZ);
3064 }
3065
3066 static void
3067 bfa_fcport_qresume(void *cbarg)
3068 {
3069         struct bfa_fcport_s *fcport = cbarg;
3070
3071         bfa_sm_send_event(fcport, BFA_FCPORT_SM_QRESUME);
3072 }
3073
3074 static void
3075 bfa_fcport_mem_claim(struct bfa_fcport_s *fcport)
3076 {
3077         struct bfa_mem_dma_s *fcport_dma = &fcport->fcport_dma;
3078
3079         fcport->stats_kva = bfa_mem_dma_virt(fcport_dma);
3080         fcport->stats_pa  = bfa_mem_dma_phys(fcport_dma);
3081         fcport->stats = (union bfa_fcport_stats_u *)
3082                                 bfa_mem_dma_virt(fcport_dma);
3083 }
3084
3085 /*
3086  * Memory initialization.
3087  */
3088 static void
3089 bfa_fcport_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
3090                 struct bfa_pcidev_s *pcidev)
3091 {
3092         struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3093         struct bfa_port_cfg_s *port_cfg = &fcport->cfg;
3094         struct bfa_fcport_ln_s *ln = &fcport->ln;
3095         struct timeval tv;
3096
3097         fcport->bfa = bfa;
3098         ln->fcport = fcport;
3099
3100         bfa_fcport_mem_claim(fcport);
3101
3102         bfa_sm_set_state(fcport, bfa_fcport_sm_uninit);
3103         bfa_sm_set_state(ln, bfa_fcport_ln_sm_dn);
3104
3105         /*
3106          * initialize time stamp for stats reset
3107          */
3108         do_gettimeofday(&tv);
3109         fcport->stats_reset_time = tv.tv_sec;
3110         fcport->stats_dma_ready = BFA_FALSE;
3111
3112         /*
3113          * initialize and set default configuration
3114          */
3115         port_cfg->topology = BFA_PORT_TOPOLOGY_P2P;
3116         port_cfg->speed = BFA_PORT_SPEED_AUTO;
3117         port_cfg->trunked = BFA_FALSE;
3118         port_cfg->maxfrsize = 0;
3119
3120         port_cfg->trl_def_speed = BFA_PORT_SPEED_1GBPS;
3121         port_cfg->qos_bw.high = BFA_QOS_BW_HIGH;
3122         port_cfg->qos_bw.med = BFA_QOS_BW_MED;
3123         port_cfg->qos_bw.low = BFA_QOS_BW_LOW;
3124
3125         fcport->fec_state = BFA_FEC_OFFLINE;
3126
3127         INIT_LIST_HEAD(&fcport->stats_pending_q);
3128         INIT_LIST_HEAD(&fcport->statsclr_pending_q);
3129
3130         bfa_reqq_winit(&fcport->reqq_wait, bfa_fcport_qresume, fcport);
3131 }
3132
3133 static void
3134 bfa_fcport_detach(struct bfa_s *bfa)
3135 {
3136 }
3137
3138 /*
3139  * Called when IOC is ready.
3140  */
3141 static void
3142 bfa_fcport_start(struct bfa_s *bfa)
3143 {
3144         bfa_sm_send_event(BFA_FCPORT_MOD(bfa), BFA_FCPORT_SM_START);
3145 }
3146
3147 /*
3148  * Called before IOC is stopped.
3149  */
3150 static void
3151 bfa_fcport_stop(struct bfa_s *bfa)
3152 {
3153         bfa_sm_send_event(BFA_FCPORT_MOD(bfa), BFA_FCPORT_SM_STOP);
3154         bfa_trunk_iocdisable(bfa);
3155 }
3156
3157 /*
3158  * Called when IOC failure is detected.
3159  */
3160 static void
3161 bfa_fcport_iocdisable(struct bfa_s *bfa)
3162 {
3163         struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3164
3165         bfa_sm_send_event(fcport, BFA_FCPORT_SM_HWFAIL);
3166         bfa_trunk_iocdisable(bfa);
3167 }
3168
3169 /*
3170  * Update loop info in fcport for SCN online
3171  */
3172 static void
3173 bfa_fcport_update_loop_info(struct bfa_fcport_s *fcport,
3174                         struct bfa_fcport_loop_info_s *loop_info)
3175 {
3176         fcport->myalpa = loop_info->myalpa;
3177         fcport->alpabm_valid =
3178                         loop_info->alpabm_val;
3179         memcpy(fcport->alpabm.alpa_bm,
3180                         loop_info->alpabm.alpa_bm,
3181                         sizeof(struct fc_alpabm_s));
3182 }
3183
3184 static void
3185 bfa_fcport_update_linkinfo(struct bfa_fcport_s *fcport)
3186 {
3187         struct bfi_fcport_event_s *pevent = fcport->event_arg.i2hmsg.event;
3188         struct bfa_fcport_trunk_s *trunk = &fcport->trunk;
3189
3190         fcport->speed = pevent->link_state.speed;
3191         fcport->topology = pevent->link_state.topology;
3192
3193         if (fcport->topology == BFA_PORT_TOPOLOGY_LOOP) {
3194                 bfa_fcport_update_loop_info(fcport,
3195                                 &pevent->link_state.attr.loop_info);
3196                 return;
3197         }
3198
3199         /* QoS Details */
3200         fcport->qos_attr = pevent->link_state.qos_attr;
3201         fcport->qos_vc_attr = pevent->link_state.attr.vc_fcf.qos_vc_attr;
3202
3203         if (fcport->cfg.bb_cr_enabled)
3204                 fcport->bbcr_attr = pevent->link_state.attr.bbcr_attr;
3205
3206         fcport->fec_state = pevent->link_state.fec_state;
3207
3208         /*
3209          * update trunk state if applicable
3210          */
3211         if (!fcport->cfg.trunked)
3212                 trunk->attr.state = BFA_TRUNK_DISABLED;
3213
3214         /* update FCoE specific */
3215         fcport->fcoe_vlan =
3216                 be16_to_cpu(pevent->link_state.attr.vc_fcf.fcf.vlan);
3217
3218         bfa_trc(fcport->bfa, fcport->speed);
3219         bfa_trc(fcport->bfa, fcport->topology);
3220 }
3221
3222 static void
3223 bfa_fcport_reset_linkinfo(struct bfa_fcport_s *fcport)
3224 {
3225         fcport->speed = BFA_PORT_SPEED_UNKNOWN;
3226         fcport->topology = BFA_PORT_TOPOLOGY_NONE;
3227         fcport->fec_state = BFA_FEC_OFFLINE;
3228 }
3229
3230 /*
3231  * Send port enable message to firmware.
3232  */
3233 static bfa_boolean_t
3234 bfa_fcport_send_enable(struct bfa_fcport_s *fcport)
3235 {
3236         struct bfi_fcport_enable_req_s *m;
3237
3238         /*
3239          * Increment message tag before queue check, so that responses to old
3240          * requests are discarded.
3241          */
3242         fcport->msgtag++;
3243
3244         /*
3245          * check for room in queue to send request now
3246          */
3247         m = bfa_reqq_next(fcport->bfa, BFA_REQQ_PORT);
3248         if (!m) {
3249                 bfa_reqq_wait(fcport->bfa, BFA_REQQ_PORT,
3250                                                         &fcport->reqq_wait);
3251                 return BFA_FALSE;
3252         }
3253
3254         bfi_h2i_set(m->mh, BFI_MC_FCPORT, BFI_FCPORT_H2I_ENABLE_REQ,
3255                         bfa_fn_lpu(fcport->bfa));
3256         m->nwwn = fcport->nwwn;
3257         m->pwwn = fcport->pwwn;
3258         m->port_cfg = fcport->cfg;
3259         m->msgtag = fcport->msgtag;
3260         m->port_cfg.maxfrsize = cpu_to_be16(fcport->cfg.maxfrsize);
3261          m->use_flash_cfg = fcport->use_flash_cfg;
3262         bfa_dma_be_addr_set(m->stats_dma_addr, fcport->stats_pa);
3263         bfa_trc(fcport->bfa, m->stats_dma_addr.a32.addr_lo);
3264         bfa_trc(fcport->bfa, m->stats_dma_addr.a32.addr_hi);
3265
3266         /*
3267          * queue I/O message to firmware
3268          */
3269         bfa_reqq_produce(fcport->bfa, BFA_REQQ_PORT, m->mh);
3270         return BFA_TRUE;
3271 }
3272
3273 /*
3274  * Send port disable message to firmware.
3275  */
3276 static  bfa_boolean_t
3277 bfa_fcport_send_disable(struct bfa_fcport_s *fcport)
3278 {
3279         struct bfi_fcport_req_s *m;
3280
3281         /*
3282          * Increment message tag before queue check, so that responses to old
3283          * requests are discarded.
3284          */
3285         fcport->msgtag++;
3286
3287         /*
3288          * check for room in queue to send request now
3289          */
3290         m = bfa_reqq_next(fcport->bfa, BFA_REQQ_PORT);
3291         if (!m) {
3292                 bfa_reqq_wait(fcport->bfa, BFA_REQQ_PORT,
3293                                                         &fcport->reqq_wait);
3294                 return BFA_FALSE;
3295         }
3296
3297         bfi_h2i_set(m->mh, BFI_MC_FCPORT, BFI_FCPORT_H2I_DISABLE_REQ,
3298                         bfa_fn_lpu(fcport->bfa));
3299         m->msgtag = fcport->msgtag;
3300
3301         /*
3302          * queue I/O message to firmware
3303          */
3304         bfa_reqq_produce(fcport->bfa, BFA_REQQ_PORT, m->mh);
3305
3306         return BFA_TRUE;
3307 }
3308
3309 static void
3310 bfa_fcport_set_wwns(struct bfa_fcport_s *fcport)
3311 {
3312         fcport->pwwn = fcport->bfa->ioc.attr->pwwn;
3313         fcport->nwwn = fcport->bfa->ioc.attr->nwwn;
3314
3315         bfa_trc(fcport->bfa, fcport->pwwn);
3316         bfa_trc(fcport->bfa, fcport->nwwn);
3317 }
3318
3319 static void
3320 bfa_fcport_qos_stats_swap(struct bfa_qos_stats_s *d,
3321         struct bfa_qos_stats_s *s)
3322 {
3323         u32     *dip = (u32 *) d;
3324         __be32  *sip = (__be32 *) s;
3325         int             i;
3326
3327         /* Now swap the 32 bit fields */
3328         for (i = 0; i < (sizeof(struct bfa_qos_stats_s)/sizeof(u32)); ++i)
3329                 dip[i] = be32_to_cpu(sip[i]);
3330 }
3331
3332 static void
3333 bfa_fcport_fcoe_stats_swap(struct bfa_fcoe_stats_s *d,
3334         struct bfa_fcoe_stats_s *s)
3335 {
3336         u32     *dip = (u32 *) d;
3337         __be32  *sip = (__be32 *) s;
3338         int             i;
3339
3340         for (i = 0; i < ((sizeof(struct bfa_fcoe_stats_s))/sizeof(u32));
3341              i = i + 2) {
3342 #ifdef __BIG_ENDIAN
3343                 dip[i] = be32_to_cpu(sip[i]);
3344                 dip[i + 1] = be32_to_cpu(sip[i + 1]);
3345 #else
3346                 dip[i] = be32_to_cpu(sip[i + 1]);
3347                 dip[i + 1] = be32_to_cpu(sip[i]);
3348 #endif
3349         }
3350 }
3351
3352 static void
3353 __bfa_cb_fcport_stats_get(void *cbarg, bfa_boolean_t complete)
3354 {
3355         struct bfa_fcport_s *fcport = (struct bfa_fcport_s *)cbarg;
3356         struct bfa_cb_pending_q_s *cb;
3357         struct list_head *qe, *qen;
3358         union bfa_fcport_stats_u *ret;
3359
3360         if (complete) {
3361                 struct timeval tv;
3362                 if (fcport->stats_status == BFA_STATUS_OK)
3363                         do_gettimeofday(&tv);
3364
3365                 list_for_each_safe(qe, qen, &fcport->stats_pending_q) {
3366                         bfa_q_deq(&fcport->stats_pending_q, &qe);
3367                         cb = (struct bfa_cb_pending_q_s *)qe;
3368                         if (fcport->stats_status == BFA_STATUS_OK) {
3369                                 ret = (union bfa_fcport_stats_u *)cb->data;
3370                                 /* Swap FC QoS or FCoE stats */
3371                                 if (bfa_ioc_get_fcmode(&fcport->bfa->ioc))
3372                                         bfa_fcport_qos_stats_swap(&ret->fcqos,
3373                                                         &fcport->stats->fcqos);
3374                                 else {
3375                                         bfa_fcport_fcoe_stats_swap(&ret->fcoe,
3376                                                         &fcport->stats->fcoe);
3377                                         ret->fcoe.secs_reset =
3378                                         tv.tv_sec - fcport->stats_reset_time;
3379                                 }
3380                         }
3381                         bfa_cb_queue_status(fcport->bfa, &cb->hcb_qe,
3382                                         fcport->stats_status);
3383                 }
3384                 fcport->stats_status = BFA_STATUS_OK;
3385         } else {
3386                 INIT_LIST_HEAD(&fcport->stats_pending_q);
3387                 fcport->stats_status = BFA_STATUS_OK;
3388         }
3389 }
3390
3391 static void
3392 bfa_fcport_stats_get_timeout(void *cbarg)
3393 {
3394         struct bfa_fcport_s *fcport = (struct bfa_fcport_s *) cbarg;
3395
3396         bfa_trc(fcport->bfa, fcport->stats_qfull);
3397
3398         if (fcport->stats_qfull) {
3399                 bfa_reqq_wcancel(&fcport->stats_reqq_wait);
3400                 fcport->stats_qfull = BFA_FALSE;
3401         }
3402
3403         fcport->stats_status = BFA_STATUS_ETIMER;
3404         __bfa_cb_fcport_stats_get(fcport, BFA_TRUE);
3405 }
3406
3407 static void
3408 bfa_fcport_send_stats_get(void *cbarg)
3409 {
3410         struct bfa_fcport_s *fcport = (struct bfa_fcport_s *) cbarg;
3411         struct bfi_fcport_req_s *msg;
3412
3413         msg = bfa_reqq_next(fcport->bfa, BFA_REQQ_PORT);
3414
3415         if (!msg) {
3416                 fcport->stats_qfull = BFA_TRUE;
3417                 bfa_reqq_winit(&fcport->stats_reqq_wait,
3418                                 bfa_fcport_send_stats_get, fcport);
3419                 bfa_reqq_wait(fcport->bfa, BFA_REQQ_PORT,
3420                                 &fcport->stats_reqq_wait);
3421                 return;
3422         }
3423         fcport->stats_qfull = BFA_FALSE;
3424
3425         memset(msg, 0, sizeof(struct bfi_fcport_req_s));
3426         bfi_h2i_set(msg->mh, BFI_MC_FCPORT, BFI_FCPORT_H2I_STATS_GET_REQ,
3427                         bfa_fn_lpu(fcport->bfa));
3428         bfa_reqq_produce(fcport->bfa, BFA_REQQ_PORT, msg->mh);
3429 }
3430
3431 static void
3432 __bfa_cb_fcport_stats_clr(void *cbarg, bfa_boolean_t complete)
3433 {
3434         struct bfa_fcport_s *fcport = (struct bfa_fcport_s *) cbarg;
3435         struct bfa_cb_pending_q_s *cb;
3436         struct list_head *qe, *qen;
3437
3438         if (complete) {
3439                 struct timeval tv;
3440
3441                 /*
3442                  * re-initialize time stamp for stats reset
3443                  */
3444                 do_gettimeofday(&tv);
3445                 fcport->stats_reset_time = tv.tv_sec;
3446                 list_for_each_safe(qe, qen, &fcport->statsclr_pending_q) {
3447                         bfa_q_deq(&fcport->statsclr_pending_q, &qe);
3448                         cb = (struct bfa_cb_pending_q_s *)qe;
3449                         bfa_cb_queue_status(fcport->bfa, &cb->hcb_qe,
3450                                                 fcport->stats_status);
3451                 }
3452                 fcport->stats_status = BFA_STATUS_OK;
3453         } else {
3454                 INIT_LIST_HEAD(&fcport->statsclr_pending_q);
3455                 fcport->stats_status = BFA_STATUS_OK;
3456         }
3457 }
3458
3459 static void
3460 bfa_fcport_stats_clr_timeout(void *cbarg)
3461 {
3462         struct bfa_fcport_s *fcport = (struct bfa_fcport_s *) cbarg;
3463
3464         bfa_trc(fcport->bfa, fcport->stats_qfull);
3465
3466         if (fcport->stats_qfull) {
3467                 bfa_reqq_wcancel(&fcport->stats_reqq_wait);
3468                 fcport->stats_qfull = BFA_FALSE;
3469         }
3470
3471         fcport->stats_status = BFA_STATUS_ETIMER;
3472         __bfa_cb_fcport_stats_clr(fcport, BFA_TRUE);
3473 }
3474
3475 static void
3476 bfa_fcport_send_stats_clear(void *cbarg)
3477 {
3478         struct bfa_fcport_s *fcport = (struct bfa_fcport_s *) cbarg;
3479         struct bfi_fcport_req_s *msg;
3480
3481         msg = bfa_reqq_next(fcport->bfa, BFA_REQQ_PORT);
3482
3483         if (!msg) {
3484                 fcport->stats_qfull = BFA_TRUE;
3485                 bfa_reqq_winit(&fcport->stats_reqq_wait,
3486                                 bfa_fcport_send_stats_clear, fcport);
3487                 bfa_reqq_wait(fcport->bfa, BFA_REQQ_PORT,
3488                                                 &fcport->stats_reqq_wait);
3489                 return;
3490         }
3491         fcport->stats_qfull = BFA_FALSE;
3492
3493         memset(msg, 0, sizeof(struct bfi_fcport_req_s));
3494         bfi_h2i_set(msg->mh, BFI_MC_FCPORT, BFI_FCPORT_H2I_STATS_CLEAR_REQ,
3495                         bfa_fn_lpu(fcport->bfa));
3496         bfa_reqq_produce(fcport->bfa, BFA_REQQ_PORT, msg->mh);
3497 }
3498
3499 /*
3500  * Handle trunk SCN event from firmware.
3501  */
3502 static void
3503 bfa_trunk_scn(struct bfa_fcport_s *fcport, struct bfi_fcport_trunk_scn_s *scn)
3504 {
3505         struct bfa_fcport_trunk_s *trunk = &fcport->trunk;
3506         struct bfi_fcport_trunk_link_s *tlink;
3507         struct bfa_trunk_link_attr_s *lattr;
3508         enum bfa_trunk_state state_prev;
3509         int i;
3510         int link_bm = 0;
3511
3512         bfa_trc(fcport->bfa, fcport->cfg.trunked);
3513         WARN_ON(scn->trunk_state != BFA_TRUNK_ONLINE &&
3514                    scn->trunk_state != BFA_TRUNK_OFFLINE);
3515
3516         bfa_trc(fcport->bfa, trunk->attr.state);
3517         bfa_trc(fcport->bfa, scn->trunk_state);
3518         bfa_trc(fcport->bfa, scn->trunk_speed);
3519
3520         /*
3521          * Save off new state for trunk attribute query
3522          */
3523         state_prev = trunk->attr.state;
3524         if (fcport->cfg.trunked && (trunk->attr.state != BFA_TRUNK_DISABLED))
3525                 trunk->attr.state = scn->trunk_state;
3526         trunk->attr.speed = scn->trunk_speed;
3527         for (i = 0; i < BFA_TRUNK_MAX_PORTS; i++) {
3528                 lattr = &trunk->attr.link_attr[i];
3529                 tlink = &scn->tlink[i];
3530
3531                 lattr->link_state = tlink->state;
3532                 lattr->trunk_wwn  = tlink->trunk_wwn;
3533                 lattr->fctl       = tlink->fctl;
3534                 lattr->speed      = tlink->speed;
3535                 lattr->deskew     = be32_to_cpu(tlink->deskew);
3536
3537                 if (tlink->state == BFA_TRUNK_LINK_STATE_UP) {
3538                         fcport->speed    = tlink->speed;
3539                         fcport->topology = BFA_PORT_TOPOLOGY_P2P;
3540                         link_bm |= 1 << i;
3541                 }
3542
3543                 bfa_trc(fcport->bfa, lattr->link_state);
3544                 bfa_trc(fcport->bfa, lattr->trunk_wwn);
3545                 bfa_trc(fcport->bfa, lattr->fctl);
3546                 bfa_trc(fcport->bfa, lattr->speed);
3547                 bfa_trc(fcport->bfa, lattr->deskew);
3548         }
3549
3550         switch (link_bm) {
3551         case 3:
3552                 bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
3553                         BFA_PL_EID_TRUNK_SCN, 0, "Trunk up(0,1)");
3554                 break;
3555         case 2:
3556                 bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
3557                         BFA_PL_EID_TRUNK_SCN, 0, "Trunk up(-,1)");
3558                 break;
3559         case 1:
3560                 bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
3561                         BFA_PL_EID_TRUNK_SCN, 0, "Trunk up(0,-)");
3562                 break;
3563         default:
3564                 bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
3565                         BFA_PL_EID_TRUNK_SCN, 0, "Trunk down");
3566         }
3567
3568         /*
3569          * Notify upper layers if trunk state changed.
3570          */
3571         if ((state_prev != trunk->attr.state) ||
3572                 (scn->trunk_state == BFA_TRUNK_OFFLINE)) {
3573                 bfa_fcport_scn(fcport, (scn->trunk_state == BFA_TRUNK_ONLINE) ?
3574                         BFA_PORT_LINKUP : BFA_PORT_LINKDOWN, BFA_TRUE);
3575         }
3576 }
3577
3578 static void
3579 bfa_trunk_iocdisable(struct bfa_s *bfa)
3580 {
3581         struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3582         int i = 0;
3583
3584         /*
3585          * In trunked mode, notify upper layers that link is down
3586          */
3587         if (fcport->cfg.trunked) {
3588                 if (fcport->trunk.attr.state == BFA_TRUNK_ONLINE)
3589                         bfa_fcport_scn(fcport, BFA_PORT_LINKDOWN, BFA_TRUE);
3590
3591                 fcport->trunk.attr.state = BFA_TRUNK_OFFLINE;
3592                 fcport->trunk.attr.speed = BFA_PORT_SPEED_UNKNOWN;
3593                 for (i = 0; i < BFA_TRUNK_MAX_PORTS; i++) {
3594                         fcport->trunk.attr.link_attr[i].trunk_wwn = 0;
3595                         fcport->trunk.attr.link_attr[i].fctl =
3596                                                 BFA_TRUNK_LINK_FCTL_NORMAL;
3597                         fcport->trunk.attr.link_attr[i].link_state =
3598                                                 BFA_TRUNK_LINK_STATE_DN_LINKDN;
3599                         fcport->trunk.attr.link_attr[i].speed =
3600                                                 BFA_PORT_SPEED_UNKNOWN;
3601                         fcport->trunk.attr.link_attr[i].deskew = 0;
3602                 }
3603         }
3604 }
3605
3606 /*
3607  * Called to initialize port attributes
3608  */
3609 void
3610 bfa_fcport_init(struct bfa_s *bfa)
3611 {
3612         struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3613
3614         /*
3615          * Initialize port attributes from IOC hardware data.
3616          */
3617         bfa_fcport_set_wwns(fcport);
3618         if (fcport->cfg.maxfrsize == 0)
3619                 fcport->cfg.maxfrsize = bfa_ioc_maxfrsize(&bfa->ioc);
3620         fcport->cfg.rx_bbcredit = bfa_ioc_rx_bbcredit(&bfa->ioc);
3621         fcport->speed_sup = bfa_ioc_speed_sup(&bfa->ioc);
3622
3623         if (bfa_fcport_is_pbcdisabled(bfa))
3624                 bfa->modules.port.pbc_disabled = BFA_TRUE;
3625
3626         WARN_ON(!fcport->cfg.maxfrsize);
3627         WARN_ON(!fcport->cfg.rx_bbcredit);
3628         WARN_ON(!fcport->speed_sup);
3629 }
3630
3631 /*
3632  * Firmware message handler.
3633  */
3634 void
3635 bfa_fcport_isr(struct bfa_s *bfa, struct bfi_msg_s *msg)
3636 {
3637         struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3638         union bfi_fcport_i2h_msg_u i2hmsg;
3639
3640         i2hmsg.msg = msg;
3641         fcport->event_arg.i2hmsg = i2hmsg;
3642
3643         bfa_trc(bfa, msg->mhdr.msg_id);
3644         bfa_trc(bfa, bfa_sm_to_state(hal_port_sm_table, fcport->sm));
3645
3646         switch (msg->mhdr.msg_id) {
3647         case BFI_FCPORT_I2H_ENABLE_RSP:
3648                 if (fcport->msgtag == i2hmsg.penable_rsp->msgtag) {
3649
3650                         fcport->stats_dma_ready = BFA_TRUE;
3651                         if (fcport->use_flash_cfg) {
3652                                 fcport->cfg = i2hmsg.penable_rsp->port_cfg;
3653                                 fcport->cfg.maxfrsize =
3654                                         cpu_to_be16(fcport->cfg.maxfrsize);
3655                                 fcport->cfg.path_tov =
3656                                         cpu_to_be16(fcport->cfg.path_tov);
3657                                 fcport->cfg.q_depth =
3658                                         cpu_to_be16(fcport->cfg.q_depth);
3659
3660                                 if (fcport->cfg.trunked)
3661                                         fcport->trunk.attr.state =
3662                                                 BFA_TRUNK_OFFLINE;
3663                                 else
3664                                         fcport->trunk.attr.state =
3665                                                 BFA_TRUNK_DISABLED;
3666                                 fcport->qos_attr.qos_bw =
3667                                         i2hmsg.penable_rsp->port_cfg.qos_bw;
3668                                 fcport->use_flash_cfg = BFA_FALSE;
3669                         }
3670
3671                         if (fcport->cfg.qos_enabled)
3672                                 fcport->qos_attr.state = BFA_QOS_OFFLINE;
3673                         else
3674                                 fcport->qos_attr.state = BFA_QOS_DISABLED;
3675
3676                         fcport->qos_attr.qos_bw_op =
3677                                         i2hmsg.penable_rsp->port_cfg.qos_bw;
3678
3679                         if (fcport->cfg.bb_cr_enabled)
3680                                 fcport->bbcr_attr.state = BFA_BBCR_OFFLINE;
3681                         else
3682                                 fcport->bbcr_attr.state = BFA_BBCR_DISABLED;
3683
3684                         bfa_sm_send_event(fcport, BFA_FCPORT_SM_FWRSP);
3685                 }
3686                 break;
3687
3688         case BFI_FCPORT_I2H_DISABLE_RSP:
3689                 if (fcport->msgtag == i2hmsg.penable_rsp->msgtag)
3690                         bfa_sm_send_event(fcport, BFA_FCPORT_SM_FWRSP);
3691                 break;
3692
3693         case BFI_FCPORT_I2H_EVENT:
3694                 if (fcport->cfg.bb_cr_enabled)
3695                         fcport->bbcr_attr.state = BFA_BBCR_OFFLINE;
3696                 else
3697                         fcport->bbcr_attr.state = BFA_BBCR_DISABLED;
3698
3699                 if (i2hmsg.event->link_state.linkstate == BFA_PORT_LINKUP)
3700                         bfa_sm_send_event(fcport, BFA_FCPORT_SM_LINKUP);
3701                 else {
3702                         if (i2hmsg.event->link_state.linkstate_rsn ==
3703                             BFA_PORT_LINKSTATE_RSN_FAA_MISCONFIG)
3704                                 bfa_sm_send_event(fcport,
3705                                                   BFA_FCPORT_SM_FAA_MISCONFIG);
3706                         else
3707                                 bfa_sm_send_event(fcport,
3708                                                   BFA_FCPORT_SM_LINKDOWN);
3709                 }
3710                 fcport->qos_attr.qos_bw_op =
3711                                 i2hmsg.event->link_state.qos_attr.qos_bw_op;
3712                 break;
3713
3714         case BFI_FCPORT_I2H_TRUNK_SCN:
3715                 bfa_trunk_scn(fcport, i2hmsg.trunk_scn);
3716                 break;
3717
3718         case BFI_FCPORT_I2H_STATS_GET_RSP:
3719                 /*
3720                  * check for timer pop before processing the rsp
3721                  */
3722                 if (list_empty(&fcport->stats_pending_q) ||
3723                     (fcport->stats_status == BFA_STATUS_ETIMER))
3724                         break;
3725
3726                 bfa_timer_stop(&fcport->timer);
3727                 fcport->stats_status = i2hmsg.pstatsget_rsp->status;
3728                 __bfa_cb_fcport_stats_get(fcport, BFA_TRUE);
3729                 break;
3730
3731         case BFI_FCPORT_I2H_STATS_CLEAR_RSP:
3732                 /*
3733                  * check for timer pop before processing the rsp
3734                  */
3735                 if (list_empty(&fcport->statsclr_pending_q) ||
3736                     (fcport->stats_status == BFA_STATUS_ETIMER))
3737                         break;
3738
3739                 bfa_timer_stop(&fcport->timer);
3740                 fcport->stats_status = BFA_STATUS_OK;
3741                 __bfa_cb_fcport_stats_clr(fcport, BFA_TRUE);
3742                 break;
3743
3744         case BFI_FCPORT_I2H_ENABLE_AEN:
3745                 bfa_sm_send_event(fcport, BFA_FCPORT_SM_ENABLE);
3746                 break;
3747
3748         case BFI_FCPORT_I2H_DISABLE_AEN:
3749                 bfa_sm_send_event(fcport, BFA_FCPORT_SM_DISABLE);
3750                 break;
3751
3752         default:
3753                 WARN_ON(1);
3754         break;
3755         }
3756 }
3757
3758 /*
3759  * Registered callback for port events.
3760  */
3761 void
3762 bfa_fcport_event_register(struct bfa_s *bfa,
3763                                 void (*cbfn) (void *cbarg,
3764                                 enum bfa_port_linkstate event),
3765                                 void *cbarg)
3766 {
3767         struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3768
3769         fcport->event_cbfn = cbfn;
3770         fcport->event_cbarg = cbarg;
3771 }
3772
3773 bfa_status_t
3774 bfa_fcport_enable(struct bfa_s *bfa)
3775 {
3776         struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3777
3778         if (bfa_fcport_is_pbcdisabled(bfa))
3779                 return BFA_STATUS_PBC;
3780
3781         if (bfa_ioc_is_disabled(&bfa->ioc))
3782                 return BFA_STATUS_IOC_DISABLED;
3783
3784         if (fcport->diag_busy)
3785                 return BFA_STATUS_DIAG_BUSY;
3786
3787         bfa_sm_send_event(BFA_FCPORT_MOD(bfa), BFA_FCPORT_SM_ENABLE);
3788         return BFA_STATUS_OK;
3789 }
3790
3791 bfa_status_t
3792 bfa_fcport_disable(struct bfa_s *bfa)
3793 {
3794         if (bfa_fcport_is_pbcdisabled(bfa))
3795                 return BFA_STATUS_PBC;
3796
3797         if (bfa_ioc_is_disabled(&bfa->ioc))
3798                 return BFA_STATUS_IOC_DISABLED;
3799
3800         bfa_sm_send_event(BFA_FCPORT_MOD(bfa), BFA_FCPORT_SM_DISABLE);
3801         return BFA_STATUS_OK;
3802 }
3803
3804 /* If PBC is disabled on port, return error */
3805 bfa_status_t
3806 bfa_fcport_is_pbcdisabled(struct bfa_s *bfa)
3807 {
3808         struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3809         struct bfa_iocfc_s *iocfc = &bfa->iocfc;
3810         struct bfi_iocfc_cfgrsp_s *cfgrsp = iocfc->cfgrsp;
3811
3812         if (cfgrsp->pbc_cfg.port_enabled == BFI_PBC_PORT_DISABLED) {
3813                 bfa_trc(bfa, fcport->pwwn);
3814                 return BFA_STATUS_PBC;
3815         }
3816         return BFA_STATUS_OK;
3817 }
3818
3819 /*
3820  * Configure port speed.
3821  */
3822 bfa_status_t
3823 bfa_fcport_cfg_speed(struct bfa_s *bfa, enum bfa_port_speed speed)
3824 {
3825         struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3826
3827         bfa_trc(bfa, speed);
3828
3829         if (fcport->cfg.trunked == BFA_TRUE)
3830                 return BFA_STATUS_TRUNK_ENABLED;
3831         if ((fcport->cfg.topology == BFA_PORT_TOPOLOGY_LOOP) &&
3832                         (speed == BFA_PORT_SPEED_16GBPS))
3833                 return BFA_STATUS_UNSUPP_SPEED;
3834         if ((speed != BFA_PORT_SPEED_AUTO) && (speed > fcport->speed_sup)) {
3835                 bfa_trc(bfa, fcport->speed_sup);
3836                 return BFA_STATUS_UNSUPP_SPEED;
3837         }
3838
3839         /* Port speed entered needs to be checked */
3840         if (bfa_ioc_get_type(&fcport->bfa->ioc) == BFA_IOC_TYPE_FC) {
3841                 /* For CT2, 1G is not supported */
3842                 if ((speed == BFA_PORT_SPEED_1GBPS) &&
3843                     (bfa_asic_id_ct2(bfa->ioc.pcidev.device_id)))
3844                         return BFA_STATUS_UNSUPP_SPEED;
3845
3846                 /* Already checked for Auto Speed and Max Speed supp */
3847                 if (!(speed == BFA_PORT_SPEED_1GBPS ||
3848                       speed == BFA_PORT_SPEED_2GBPS ||
3849                       speed == BFA_PORT_SPEED_4GBPS ||
3850                       speed == BFA_PORT_SPEED_8GBPS ||
3851                       speed == BFA_PORT_SPEED_16GBPS ||
3852                       speed == BFA_PORT_SPEED_AUTO))
3853                         return BFA_STATUS_UNSUPP_SPEED;
3854         } else {
3855                 if (speed != BFA_PORT_SPEED_10GBPS)
3856                         return BFA_STATUS_UNSUPP_SPEED;
3857         }
3858
3859         fcport->cfg.speed = speed;
3860
3861         return BFA_STATUS_OK;
3862 }
3863
3864 /*
3865  * Get current speed.
3866  */
3867 enum bfa_port_speed
3868 bfa_fcport_get_speed(struct bfa_s *bfa)
3869 {
3870         struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3871
3872         return fcport->speed;
3873 }
3874
3875 /*
3876  * Configure port topology.
3877  */
3878 bfa_status_t
3879 bfa_fcport_cfg_topology(struct bfa_s *bfa, enum bfa_port_topology topology)
3880 {
3881         struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3882
3883         bfa_trc(bfa, topology);
3884         bfa_trc(bfa, fcport->cfg.topology);
3885
3886         switch (topology) {
3887         case BFA_PORT_TOPOLOGY_P2P:
3888                 break;
3889
3890         case BFA_PORT_TOPOLOGY_LOOP:
3891                 if ((bfa_fcport_is_qos_enabled(bfa) != BFA_FALSE) ||
3892                         (fcport->qos_attr.state != BFA_QOS_DISABLED))
3893                         return BFA_STATUS_ERROR_QOS_ENABLED;
3894                 if (fcport->cfg.ratelimit != BFA_FALSE)
3895                         return BFA_STATUS_ERROR_TRL_ENABLED;
3896                 if ((bfa_fcport_is_trunk_enabled(bfa) != BFA_FALSE) ||
3897                         (fcport->trunk.attr.state != BFA_TRUNK_DISABLED))
3898                         return BFA_STATUS_ERROR_TRUNK_ENABLED;
3899                 if ((bfa_fcport_get_speed(bfa) == BFA_PORT_SPEED_16GBPS) ||
3900                         (fcport->cfg.speed == BFA_PORT_SPEED_16GBPS))
3901                         return BFA_STATUS_UNSUPP_SPEED;
3902                 if (bfa_mfg_is_mezz(bfa->ioc.attr->card_type))
3903                         return BFA_STATUS_LOOP_UNSUPP_MEZZ;
3904                 if (bfa_fcport_is_dport(bfa) != BFA_FALSE)
3905                         return BFA_STATUS_DPORT_ERR;
3906                 if (bfa_fcport_is_ddport(bfa) != BFA_FALSE)
3907                         return BFA_STATUS_DPORT_ERR;
3908                 break;
3909
3910         case BFA_PORT_TOPOLOGY_AUTO:
3911                 break;
3912
3913         default:
3914                 return BFA_STATUS_EINVAL;
3915         }
3916
3917         fcport->cfg.topology = topology;
3918         return BFA_STATUS_OK;
3919 }
3920
3921 /*
3922  * Get current topology.
3923  */
3924 enum bfa_port_topology
3925 bfa_fcport_get_topology(struct bfa_s *bfa)
3926 {
3927         struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3928
3929         return fcport->topology;
3930 }
3931
3932 /**
3933  * Get config topology.
3934  */
3935 enum bfa_port_topology
3936 bfa_fcport_get_cfg_topology(struct bfa_s *bfa)
3937 {
3938         struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3939
3940         return fcport->cfg.topology;
3941 }
3942
3943 bfa_status_t
3944 bfa_fcport_cfg_hardalpa(struct bfa_s *bfa, u8 alpa)
3945 {
3946         struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3947
3948         bfa_trc(bfa, alpa);
3949         bfa_trc(bfa, fcport->cfg.cfg_hardalpa);
3950         bfa_trc(bfa, fcport->cfg.hardalpa);
3951
3952         fcport->cfg.cfg_hardalpa = BFA_TRUE;
3953         fcport->cfg.hardalpa = alpa;
3954
3955         return BFA_STATUS_OK;
3956 }
3957
3958 bfa_status_t
3959 bfa_fcport_clr_hardalpa(struct bfa_s *bfa)
3960 {
3961         struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3962
3963         bfa_trc(bfa, fcport->cfg.cfg_hardalpa);
3964         bfa_trc(bfa, fcport->cfg.hardalpa);
3965
3966         fcport->cfg.cfg_hardalpa = BFA_FALSE;
3967         return BFA_STATUS_OK;
3968 }
3969
3970 bfa_boolean_t
3971 bfa_fcport_get_hardalpa(struct bfa_s *bfa, u8 *alpa)
3972 {
3973         struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3974
3975         *alpa = fcport->cfg.hardalpa;
3976         return fcport->cfg.cfg_hardalpa;
3977 }
3978
3979 u8
3980 bfa_fcport_get_myalpa(struct bfa_s *bfa)
3981 {
3982         struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3983
3984         return fcport->myalpa;
3985 }
3986
3987 bfa_status_t
3988 bfa_fcport_cfg_maxfrsize(struct bfa_s *bfa, u16 maxfrsize)
3989 {
3990         struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3991
3992         bfa_trc(bfa, maxfrsize);
3993         bfa_trc(bfa, fcport->cfg.maxfrsize);
3994
3995         /* with in range */
3996         if ((maxfrsize > FC_MAX_PDUSZ) || (maxfrsize < FC_MIN_PDUSZ))
3997                 return BFA_STATUS_INVLD_DFSZ;
3998
3999         /* power of 2, if not the max frame size of 2112 */
4000         if ((maxfrsize != FC_MAX_PDUSZ) && (maxfrsize & (maxfrsize - 1)))
4001                 return BFA_STATUS_INVLD_DFSZ;
4002
4003         fcport->cfg.maxfrsize = maxfrsize;
4004         return BFA_STATUS_OK;
4005 }
4006
4007 u16
4008 bfa_fcport_get_maxfrsize(struct bfa_s *bfa)
4009 {
4010         struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
4011
4012         return fcport->cfg.maxfrsize;
4013 }
4014
4015 u8
4016 bfa_fcport_get_rx_bbcredit(struct bfa_s *bfa)
4017 {
4018         if (bfa_fcport_get_topology(bfa) != BFA_PORT_TOPOLOGY_LOOP)
4019                 return (BFA_FCPORT_MOD(bfa))->cfg.rx_bbcredit;
4020
4021         else
4022                 return 0;
4023 }
4024
4025 void
4026 bfa_fcport_set_tx_bbcredit(struct bfa_s *bfa, u16 tx_bbcredit)
4027 {
4028         struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
4029
4030         fcport->cfg.tx_bbcredit = (u8)tx_bbcredit;
4031 }
4032
4033 /*
4034  * Get port attributes.
4035  */
4036
4037 wwn_t
4038 bfa_fcport_get_wwn(struct bfa_s *bfa, bfa_boolean_t node)
4039 {
4040         struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
4041         if (node)
4042                 return fcport->nwwn;
4043         else
4044                 return fcport->pwwn;
4045 }
4046
4047 void
4048 bfa_fcport_get_attr(struct bfa_s *bfa, struct bfa_port_attr_s *attr)
4049 {
4050         struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
4051
4052         memset(attr, 0, sizeof(struct bfa_port_attr_s));
4053
4054         attr->nwwn = fcport->nwwn;
4055         attr->pwwn = fcport->pwwn;
4056
4057         attr->factorypwwn =  bfa->ioc.attr->mfg_pwwn;
4058         attr->factorynwwn =  bfa->ioc.attr->mfg_nwwn;
4059
4060         memcpy(&attr->pport_cfg, &fcport->cfg,
4061                 sizeof(struct bfa_port_cfg_s));
4062         /* speed attributes */
4063         attr->pport_cfg.speed = fcport->cfg.speed;
4064         attr->speed_supported = fcport->speed_sup;
4065         attr->speed = fcport->speed;
4066         attr->cos_supported = FC_CLASS_3;
4067
4068         /* topology attributes */
4069         attr->pport_cfg.topology = fcport->cfg.topology;
4070         attr->topology = fcport->topology;
4071         attr->pport_cfg.trunked = fcport->cfg.trunked;
4072
4073         /* beacon attributes */
4074         attr->beacon = fcport->beacon;
4075         attr->link_e2e_beacon = fcport->link_e2e_beacon;
4076
4077         attr->pport_cfg.path_tov  = bfa_fcpim_path_tov_get(bfa);
4078         attr->pport_cfg.q_depth  = bfa_fcpim_qdepth_get(bfa);
4079         attr->port_state = bfa_sm_to_state(hal_port_sm_table, fcport->sm);
4080
4081         attr->fec_state = fcport->fec_state;
4082
4083         /* PBC Disabled State */
4084         if (bfa_fcport_is_pbcdisabled(bfa))
4085                 attr->port_state = BFA_PORT_ST_PREBOOT_DISABLED;
4086         else {
4087                 if (bfa_ioc_is_disabled(&fcport->bfa->ioc))
4088                         attr->port_state = BFA_PORT_ST_IOCDIS;
4089                 else if (bfa_ioc_fw_mismatch(&fcport->bfa->ioc))
4090                         attr->port_state = BFA_PORT_ST_FWMISMATCH;
4091         }
4092
4093         /* FCoE vlan */
4094         attr->fcoe_vlan = fcport->fcoe_vlan;
4095 }
4096
4097 #define BFA_FCPORT_STATS_TOV    1000
4098
4099 /*
4100  * Fetch port statistics (FCQoS or FCoE).
4101  */
4102 bfa_status_t
4103 bfa_fcport_get_stats(struct bfa_s *bfa, struct bfa_cb_pending_q_s *cb)
4104 {
4105         struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
4106
4107         if (!bfa_iocfc_is_operational(bfa) ||
4108             !fcport->stats_dma_ready)
4109                 return BFA_STATUS_IOC_NON_OP;
4110
4111         if (!list_empty(&fcport->statsclr_pending_q))
4112                 return BFA_STATUS_DEVBUSY;
4113
4114         if (list_empty(&fcport->stats_pending_q)) {
4115                 list_add_tail(&cb->hcb_qe.qe, &fcport->stats_pending_q);
4116                 bfa_fcport_send_stats_get(fcport);
4117                 bfa_timer_start(bfa, &fcport->timer,
4118                                 bfa_fcport_stats_get_timeout,
4119                                 fcport, BFA_FCPORT_STATS_TOV);
4120         } else
4121                 list_add_tail(&cb->hcb_qe.qe, &fcport->stats_pending_q);
4122
4123         return BFA_STATUS_OK;
4124 }
4125
4126 /*
4127  * Reset port statistics (FCQoS or FCoE).
4128  */
4129 bfa_status_t
4130 bfa_fcport_clear_stats(struct bfa_s *bfa, struct bfa_cb_pending_q_s *cb)
4131 {
4132         struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
4133
4134         if (!bfa_iocfc_is_operational(bfa) ||
4135             !fcport->stats_dma_ready)
4136                 return BFA_STATUS_IOC_NON_OP;
4137
4138         if (!list_empty(&fcport->stats_pending_q))
4139                 return BFA_STATUS_DEVBUSY;
4140
4141         if (list_empty(&fcport->statsclr_pending_q)) {
4142                 list_add_tail(&cb->hcb_qe.qe, &fcport->statsclr_pending_q);
4143                 bfa_fcport_send_stats_clear(fcport);
4144                 bfa_timer_start(bfa, &fcport->timer,
4145                                 bfa_fcport_stats_clr_timeout,
4146                                 fcport, BFA_FCPORT_STATS_TOV);
4147         } else
4148                 list_add_tail(&cb->hcb_qe.qe, &fcport->statsclr_pending_q);
4149
4150         return BFA_STATUS_OK;
4151 }
4152
4153 /*
4154  * Fetch port attributes.
4155  */
4156 bfa_boolean_t
4157 bfa_fcport_is_disabled(struct bfa_s *bfa)
4158 {
4159         struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
4160
4161         return bfa_sm_to_state(hal_port_sm_table, fcport->sm) ==
4162                 BFA_PORT_ST_DISABLED;
4163
4164 }
4165
4166 bfa_boolean_t
4167 bfa_fcport_is_dport(struct bfa_s *bfa)
4168 {
4169         struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
4170
4171         return (bfa_sm_to_state(hal_port_sm_table, fcport->sm) ==
4172                 BFA_PORT_ST_DPORT);
4173 }
4174
4175 bfa_boolean_t
4176 bfa_fcport_is_ddport(struct bfa_s *bfa)
4177 {
4178         struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
4179
4180         return (bfa_sm_to_state(hal_port_sm_table, fcport->sm) ==
4181                 BFA_PORT_ST_DDPORT);
4182 }
4183
4184 bfa_status_t
4185 bfa_fcport_set_qos_bw(struct bfa_s *bfa, struct bfa_qos_bw_s *qos_bw)
4186 {
4187         struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
4188         enum bfa_ioc_type_e ioc_type = bfa_get_type(bfa);
4189
4190         bfa_trc(bfa, ioc_type);
4191
4192         if ((qos_bw->high == 0) || (qos_bw->med == 0) || (qos_bw->low == 0))
4193                 return BFA_STATUS_QOS_BW_INVALID;
4194
4195         if ((qos_bw->high + qos_bw->med + qos_bw->low) != 100)
4196                 return BFA_STATUS_QOS_BW_INVALID;
4197
4198         if ((qos_bw->med > qos_bw->high) || (qos_bw->low > qos_bw->med) ||
4199             (qos_bw->low > qos_bw->high))
4200                 return BFA_STATUS_QOS_BW_INVALID;
4201
4202         if ((ioc_type == BFA_IOC_TYPE_FC) &&
4203             (fcport->cfg.topology != BFA_PORT_TOPOLOGY_LOOP))
4204                 fcport->cfg.qos_bw = *qos_bw;
4205
4206         return BFA_STATUS_OK;
4207 }
4208
4209 bfa_boolean_t
4210 bfa_fcport_is_ratelim(struct bfa_s *bfa)
4211 {
4212         struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
4213
4214         return fcport->cfg.ratelimit ? BFA_TRUE : BFA_FALSE;
4215
4216 }
4217
4218 /*
4219  *      Enable/Disable FAA feature in port config
4220  */
4221 void
4222 bfa_fcport_cfg_faa(struct bfa_s *bfa, u8 state)
4223 {
4224         struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
4225
4226         bfa_trc(bfa, state);
4227         fcport->cfg.faa_state = state;
4228 }
4229
4230 /*
4231  * Get default minimum ratelim speed
4232  */
4233 enum bfa_port_speed
4234 bfa_fcport_get_ratelim_speed(struct bfa_s *bfa)
4235 {
4236         struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
4237
4238         bfa_trc(bfa, fcport->cfg.trl_def_speed);
4239         return fcport->cfg.trl_def_speed;
4240
4241 }
4242
4243 void
4244 bfa_fcport_beacon(void *dev, bfa_boolean_t beacon,
4245                   bfa_boolean_t link_e2e_beacon)
4246 {
4247         struct bfa_s *bfa = dev;
4248         struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
4249
4250         bfa_trc(bfa, beacon);
4251         bfa_trc(bfa, link_e2e_beacon);
4252         bfa_trc(bfa, fcport->beacon);
4253         bfa_trc(bfa, fcport->link_e2e_beacon);
4254
4255         fcport->beacon = beacon;
4256         fcport->link_e2e_beacon = link_e2e_beacon;
4257 }
4258
4259 bfa_boolean_t
4260 bfa_fcport_is_linkup(struct bfa_s *bfa)
4261 {
4262         struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
4263
4264         return  (!fcport->cfg.trunked &&
4265                  bfa_sm_cmp_state(fcport, bfa_fcport_sm_linkup)) ||
4266                 (fcport->cfg.trunked &&
4267                  fcport->trunk.attr.state == BFA_TRUNK_ONLINE);
4268 }
4269
4270 bfa_boolean_t
4271 bfa_fcport_is_qos_enabled(struct bfa_s *bfa)
4272 {
4273         struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
4274
4275         return fcport->cfg.qos_enabled;
4276 }
4277
4278 bfa_boolean_t
4279 bfa_fcport_is_trunk_enabled(struct bfa_s *bfa)
4280 {
4281         struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
4282
4283         return fcport->cfg.trunked;
4284 }
4285
4286 bfa_status_t
4287 bfa_fcport_cfg_bbcr(struct bfa_s *bfa, bfa_boolean_t on_off, u8 bb_scn)
4288 {
4289         struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
4290
4291         bfa_trc(bfa, on_off);
4292
4293         if (bfa_ioc_get_type(&fcport->bfa->ioc) != BFA_IOC_TYPE_FC)
4294                 return BFA_STATUS_BBCR_FC_ONLY;
4295
4296         if (bfa_mfg_is_mezz(bfa->ioc.attr->card_type) &&
4297                 (bfa->ioc.attr->card_type != BFA_MFG_TYPE_CHINOOK))
4298                 return BFA_STATUS_CMD_NOTSUPP_MEZZ;
4299
4300         if (on_off) {
4301                 if (fcport->cfg.topology == BFA_PORT_TOPOLOGY_LOOP)
4302                         return BFA_STATUS_TOPOLOGY_LOOP;
4303
4304                 if (fcport->cfg.qos_enabled)
4305                         return BFA_STATUS_ERROR_QOS_ENABLED;
4306
4307                 if (fcport->cfg.trunked)
4308                         return BFA_STATUS_TRUNK_ENABLED;
4309
4310                 if ((fcport->cfg.speed != BFA_PORT_SPEED_AUTO) &&
4311                         (fcport->cfg.speed < bfa_ioc_speed_sup(&bfa->ioc)))
4312                         return BFA_STATUS_ERR_BBCR_SPEED_UNSUPPORT;
4313
4314                 if (bfa_ioc_speed_sup(&bfa->ioc) < BFA_PORT_SPEED_8GBPS)
4315                         return BFA_STATUS_FEATURE_NOT_SUPPORTED;
4316
4317                 if (fcport->cfg.bb_cr_enabled) {
4318                         if (bb_scn != fcport->cfg.bb_scn)
4319                                 return BFA_STATUS_BBCR_CFG_NO_CHANGE;
4320                         else
4321                                 return BFA_STATUS_NO_CHANGE;
4322                 }
4323
4324                 if ((bb_scn == 0) || (bb_scn > BFA_BB_SCN_MAX))
4325                         bb_scn = BFA_BB_SCN_DEF;
4326
4327                 fcport->cfg.bb_cr_enabled = on_off;
4328                 fcport->cfg.bb_scn = bb_scn;
4329         } else {
4330                 if (!fcport->cfg.bb_cr_enabled)
4331                         return BFA_STATUS_NO_CHANGE;
4332
4333                 fcport->cfg.bb_cr_enabled = on_off;
4334                 fcport->cfg.bb_scn = 0;
4335         }
4336
4337         return BFA_STATUS_OK;
4338 }
4339
4340 bfa_status_t
4341 bfa_fcport_get_bbcr_attr(struct bfa_s *bfa,
4342                 struct bfa_bbcr_attr_s *bbcr_attr)
4343 {
4344         struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
4345
4346         if (bfa_ioc_get_type(&fcport->bfa->ioc) != BFA_IOC_TYPE_FC)
4347                 return BFA_STATUS_BBCR_FC_ONLY;
4348
4349         if (fcport->cfg.topology == BFA_PORT_TOPOLOGY_LOOP)
4350                 return BFA_STATUS_TOPOLOGY_LOOP;
4351
4352         *bbcr_attr = fcport->bbcr_attr;
4353
4354         return BFA_STATUS_OK;
4355 }
4356
4357 void
4358 bfa_fcport_dportenable(struct bfa_s *bfa)
4359 {
4360         /*
4361          * Assume caller check for port is in disable state
4362          */
4363         bfa_sm_send_event(BFA_FCPORT_MOD(bfa), BFA_FCPORT_SM_DPORTENABLE);
4364         bfa_port_set_dportenabled(&bfa->modules.port, BFA_TRUE);
4365 }
4366
4367 void
4368 bfa_fcport_dportdisable(struct bfa_s *bfa)
4369 {
4370         /*
4371          * Assume caller check for port is in disable state
4372          */
4373         bfa_sm_send_event(BFA_FCPORT_MOD(bfa), BFA_FCPORT_SM_DPORTDISABLE);
4374         bfa_port_set_dportenabled(&bfa->modules.port, BFA_FALSE);
4375 }
4376
4377 void
4378 bfa_fcport_ddportenable(struct bfa_s *bfa)
4379 {
4380         /*
4381          * Assume caller check for port is in disable state
4382          */
4383         bfa_sm_send_event(BFA_FCPORT_MOD(bfa), BFA_FCPORT_SM_DDPORTENABLE);
4384 }
4385
4386 void
4387 bfa_fcport_ddportdisable(struct bfa_s *bfa)
4388 {
4389         /*
4390          * Assume caller check for port is in disable state
4391          */
4392         bfa_sm_send_event(BFA_FCPORT_MOD(bfa), BFA_FCPORT_SM_DDPORTDISABLE);
4393 }
4394
4395 /*
4396  * Rport State machine functions
4397  */
4398 /*
4399  * Beginning state, only online event expected.
4400  */
4401 static void
4402 bfa_rport_sm_uninit(struct bfa_rport_s *rp, enum bfa_rport_event event)
4403 {
4404         bfa_trc(rp->bfa, rp->rport_tag);
4405         bfa_trc(rp->bfa, event);
4406
4407         switch (event) {
4408         case BFA_RPORT_SM_CREATE:
4409                 bfa_stats(rp, sm_un_cr);
4410                 bfa_sm_set_state(rp, bfa_rport_sm_created);
4411                 break;
4412
4413         default:
4414                 bfa_stats(rp, sm_un_unexp);
4415                 bfa_sm_fault(rp->bfa, event);
4416         }
4417 }
4418
4419 static void
4420 bfa_rport_sm_created(struct bfa_rport_s *rp, enum bfa_rport_event event)
4421 {
4422         bfa_trc(rp->bfa, rp->rport_tag);
4423         bfa_trc(rp->bfa, event);
4424
4425         switch (event) {
4426         case BFA_RPORT_SM_ONLINE:
4427                 bfa_stats(rp, sm_cr_on);
4428                 if (bfa_rport_send_fwcreate(rp))
4429                         bfa_sm_set_state(rp, bfa_rport_sm_fwcreate);
4430                 else
4431                         bfa_sm_set_state(rp, bfa_rport_sm_fwcreate_qfull);
4432                 break;
4433
4434         case BFA_RPORT_SM_DELETE:
4435                 bfa_stats(rp, sm_cr_del);
4436                 bfa_sm_set_state(rp, bfa_rport_sm_uninit);
4437                 bfa_rport_free(rp);
4438                 break;
4439
4440         case BFA_RPORT_SM_HWFAIL:
4441                 bfa_stats(rp, sm_cr_hwf);
4442                 bfa_sm_set_state(rp, bfa_rport_sm_iocdisable);
4443                 break;
4444
4445         default:
4446                 bfa_stats(rp, sm_cr_unexp);
4447                 bfa_sm_fault(rp->bfa, event);
4448         }
4449 }
4450
4451 /*
4452  * Waiting for rport create response from firmware.
4453  */
4454 static void
4455 bfa_rport_sm_fwcreate(struct bfa_rport_s *rp, enum bfa_rport_event event)
4456 {
4457         bfa_trc(rp->bfa, rp->rport_tag);
4458         bfa_trc(rp->bfa, event);
4459
4460         switch (event) {
4461         case BFA_RPORT_SM_FWRSP:
4462                 bfa_stats(rp, sm_fwc_rsp);
4463                 bfa_sm_set_state(rp, bfa_rport_sm_online);
4464                 bfa_rport_online_cb(rp);
4465                 break;
4466
4467         case BFA_RPORT_SM_DELETE:
4468                 bfa_stats(rp, sm_fwc_del);
4469                 bfa_sm_set_state(rp, bfa_rport_sm_delete_pending);
4470                 break;
4471
4472         case BFA_RPORT_SM_OFFLINE:
4473                 bfa_stats(rp, sm_fwc_off);
4474                 bfa_sm_set_state(rp, bfa_rport_sm_offline_pending);
4475                 break;
4476
4477         case BFA_RPORT_SM_HWFAIL:
4478                 bfa_stats(rp, sm_fwc_hwf);
4479                 bfa_sm_set_state(rp, bfa_rport_sm_iocdisable);
4480                 break;
4481
4482         default:
4483                 bfa_stats(rp, sm_fwc_unexp);
4484                 bfa_sm_fault(rp->bfa, event);
4485         }
4486 }
4487
4488 /*
4489  * Request queue is full, awaiting queue resume to send create request.
4490  */
4491 static void
4492 bfa_rport_sm_fwcreate_qfull(struct bfa_rport_s *rp, enum bfa_rport_event event)
4493 {
4494         bfa_trc(rp->bfa, rp->rport_tag);
4495         bfa_trc(rp->bfa, event);
4496
4497         switch (event) {
4498         case BFA_RPORT_SM_QRESUME:
4499                 bfa_sm_set_state(rp, bfa_rport_sm_fwcreate);
4500                 bfa_rport_send_fwcreate(rp);
4501                 break;
4502
4503         case BFA_RPORT_SM_DELETE:
4504                 bfa_stats(rp, sm_fwc_del);
4505                 bfa_sm_set_state(rp, bfa_rport_sm_uninit);
4506                 bfa_reqq_wcancel(&rp->reqq_wait);
4507                 bfa_rport_free(rp);
4508                 break;
4509
4510         case BFA_RPORT_SM_OFFLINE:
4511                 bfa_stats(rp, sm_fwc_off);
4512                 bfa_sm_set_state(rp, bfa_rport_sm_offline);
4513                 bfa_reqq_wcancel(&rp->reqq_wait);
4514                 bfa_rport_offline_cb(rp);
4515                 break;
4516
4517         case BFA_RPORT_SM_HWFAIL:
4518                 bfa_stats(rp, sm_fwc_hwf);
4519                 bfa_sm_set_state(rp, bfa_rport_sm_iocdisable);
4520                 bfa_reqq_wcancel(&rp->reqq_wait);
4521                 break;
4522
4523         default:
4524                 bfa_stats(rp, sm_fwc_unexp);
4525                 bfa_sm_fault(rp->bfa, event);
4526         }
4527 }
4528
4529 /*
4530  * Online state - normal parking state.
4531  */
4532 static void
4533 bfa_rport_sm_online(struct bfa_rport_s *rp, enum bfa_rport_event event)
4534 {
4535         struct bfi_rport_qos_scn_s *qos_scn;
4536
4537         bfa_trc(rp->bfa, rp->rport_tag);
4538         bfa_trc(rp->bfa, event);
4539
4540         switch (event) {
4541         case BFA_RPORT_SM_OFFLINE:
4542                 bfa_stats(rp, sm_on_off);
4543                 if (bfa_rport_send_fwdelete(rp))
4544                         bfa_sm_set_state(rp, bfa_rport_sm_fwdelete);
4545                 else
4546                         bfa_sm_set_state(rp, bfa_rport_sm_fwdelete_qfull);
4547                 break;
4548
4549         case BFA_RPORT_SM_DELETE:
4550                 bfa_stats(rp, sm_on_del);
4551                 if (bfa_rport_send_fwdelete(rp))
4552                         bfa_sm_set_state(rp, bfa_rport_sm_deleting);
4553                 else
4554                         bfa_sm_set_state(rp, bfa_rport_sm_deleting_qfull);
4555                 break;
4556
4557         case BFA_RPORT_SM_HWFAIL:
4558                 bfa_stats(rp, sm_on_hwf);
4559                 bfa_sm_set_state(rp, bfa_rport_sm_iocdisable);
4560                 break;
4561
4562         case BFA_RPORT_SM_SET_SPEED:
4563                 bfa_rport_send_fwspeed(rp);
4564                 break;
4565
4566         case BFA_RPORT_SM_QOS_SCN:
4567                 qos_scn = (struct bfi_rport_qos_scn_s *) rp->event_arg.fw_msg;
4568                 rp->qos_attr = qos_scn->new_qos_attr;
4569                 bfa_trc(rp->bfa, qos_scn->old_qos_attr.qos_flow_id);
4570                 bfa_trc(rp->bfa, qos_scn->new_qos_attr.qos_flow_id);
4571                 bfa_trc(rp->bfa, qos_scn->old_qos_attr.qos_priority);
4572                 bfa_trc(rp->bfa, qos_scn->new_qos_attr.qos_priority);
4573
4574                 qos_scn->old_qos_attr.qos_flow_id  =
4575                         be32_to_cpu(qos_scn->old_qos_attr.qos_flow_id);
4576                 qos_scn->new_qos_attr.qos_flow_id  =
4577                         be32_to_cpu(qos_scn->new_qos_attr.qos_flow_id);
4578
4579                 if (qos_scn->old_qos_attr.qos_flow_id !=
4580                         qos_scn->new_qos_attr.qos_flow_id)
4581                         bfa_cb_rport_qos_scn_flowid(rp->rport_drv,
4582                                                     qos_scn->old_qos_attr,
4583                                                     qos_scn->new_qos_attr);
4584                 if (qos_scn->old_qos_attr.qos_priority !=
4585                         qos_scn->new_qos_attr.qos_priority)
4586                         bfa_cb_rport_qos_scn_prio(rp->rport_drv,
4587                                                   qos_scn->old_qos_attr,
4588                                                   qos_scn->new_qos_attr);
4589                 break;
4590
4591         default:
4592                 bfa_stats(rp, sm_on_unexp);
4593                 bfa_sm_fault(rp->bfa, event);
4594         }
4595 }
4596
4597 /*
4598  * Firmware rport is being deleted - awaiting f/w response.
4599  */
4600 static void
4601 bfa_rport_sm_fwdelete(struct bfa_rport_s *rp, enum bfa_rport_event event)
4602 {
4603         bfa_trc(rp->bfa, rp->rport_tag);
4604         bfa_trc(rp->bfa, event);
4605
4606         switch (event) {
4607         case BFA_RPORT_SM_FWRSP:
4608                 bfa_stats(rp, sm_fwd_rsp);
4609                 bfa_sm_set_state(rp, bfa_rport_sm_offline);
4610                 bfa_rport_offline_cb(rp);
4611                 break;
4612
4613         case BFA_RPORT_SM_DELETE:
4614                 bfa_stats(rp, sm_fwd_del);
4615                 bfa_sm_set_state(rp, bfa_rport_sm_deleting);
4616                 break;
4617
4618         case BFA_RPORT_SM_HWFAIL:
4619                 bfa_stats(rp, sm_fwd_hwf);
4620                 bfa_sm_set_state(rp, bfa_rport_sm_iocdisable);
4621                 bfa_rport_offline_cb(rp);
4622                 break;
4623
4624         default:
4625                 bfa_stats(rp, sm_fwd_unexp);
4626                 bfa_sm_fault(rp->bfa, event);
4627         }
4628 }
4629
4630 static void
4631 bfa_rport_sm_fwdelete_qfull(struct bfa_rport_s *rp, enum bfa_rport_event event)
4632 {
4633         bfa_trc(rp->bfa, rp->rport_tag);
4634         bfa_trc(rp->bfa, event);
4635
4636         switch (event) {
4637         case BFA_RPORT_SM_QRESUME:
4638                 bfa_sm_set_state(rp, bfa_rport_sm_fwdelete);
4639                 bfa_rport_send_fwdelete(rp);
4640                 break;
4641
4642         case BFA_RPORT_SM_DELETE:
4643                 bfa_stats(rp, sm_fwd_del);
4644                 bfa_sm_set_state(rp, bfa_rport_sm_deleting_qfull);
4645                 break;
4646
4647         case BFA_RPORT_SM_HWFAIL:
4648                 bfa_stats(rp, sm_fwd_hwf);
4649                 bfa_sm_set_state(rp, bfa_rport_sm_iocdisable);
4650                 bfa_reqq_wcancel(&rp->reqq_wait);
4651                 bfa_rport_offline_cb(rp);
4652                 break;
4653
4654         default:
4655                 bfa_stats(rp, sm_fwd_unexp);
4656                 bfa_sm_fault(rp->bfa, event);
4657         }
4658 }
4659
4660 /*
4661  * Offline state.
4662  */
4663 static void
4664 bfa_rport_sm_offline(struct bfa_rport_s *rp, enum bfa_rport_event event)
4665 {
4666         bfa_trc(rp->bfa, rp->rport_tag);
4667         bfa_trc(rp->bfa, event);
4668
4669         switch (event) {
4670         case BFA_RPORT_SM_DELETE:
4671                 bfa_stats(rp, sm_off_del);
4672                 bfa_sm_set_state(rp, bfa_rport_sm_uninit);
4673                 bfa_rport_free(rp);
4674                 break;
4675
4676         case BFA_RPORT_SM_ONLINE:
4677                 bfa_stats(rp, sm_off_on);
4678                 if (bfa_rport_send_fwcreate(rp))
4679                         bfa_sm_set_state(rp, bfa_rport_sm_fwcreate);
4680                 else
4681                         bfa_sm_set_state(rp, bfa_rport_sm_fwcreate_qfull);
4682                 break;
4683
4684         case BFA_RPORT_SM_HWFAIL:
4685                 bfa_stats(rp, sm_off_hwf);
4686                 bfa_sm_set_state(rp, bfa_rport_sm_iocdisable);
4687                 break;
4688
4689         case BFA_RPORT_SM_OFFLINE:
4690                 bfa_rport_offline_cb(rp);
4691                 break;
4692
4693         default:
4694                 bfa_stats(rp, sm_off_unexp);
4695                 bfa_sm_fault(rp->bfa, event);
4696         }
4697 }
4698
4699 /*
4700  * Rport is deleted, waiting for firmware response to delete.
4701  */
4702 static void
4703 bfa_rport_sm_deleting(struct bfa_rport_s *rp, enum bfa_rport_event event)
4704 {
4705         bfa_trc(rp->bfa, rp->rport_tag);
4706         bfa_trc(rp->bfa, event);
4707
4708         switch (event) {
4709         case BFA_RPORT_SM_FWRSP:
4710                 bfa_stats(rp, sm_del_fwrsp);
4711                 bfa_sm_set_state(rp, bfa_rport_sm_uninit);
4712                 bfa_rport_free(rp);
4713                 break;
4714
4715         case BFA_RPORT_SM_HWFAIL:
4716                 bfa_stats(rp, sm_del_hwf);
4717                 bfa_sm_set_state(rp, bfa_rport_sm_uninit);
4718                 bfa_rport_free(rp);
4719                 break;
4720
4721         default:
4722                 bfa_sm_fault(rp->bfa, event);
4723         }
4724 }
4725
4726 static void
4727 bfa_rport_sm_deleting_qfull(struct bfa_rport_s *rp, enum bfa_rport_event event)
4728 {
4729         bfa_trc(rp->bfa, rp->rport_tag);
4730         bfa_trc(rp->bfa, event);
4731
4732         switch (event) {
4733         case BFA_RPORT_SM_QRESUME:
4734                 bfa_stats(rp, sm_del_fwrsp);
4735                 bfa_sm_set_state(rp, bfa_rport_sm_deleting);
4736                 bfa_rport_send_fwdelete(rp);
4737                 break;
4738
4739         case BFA_RPORT_SM_HWFAIL:
4740                 bfa_stats(rp, sm_del_hwf);
4741                 bfa_sm_set_state(rp, bfa_rport_sm_uninit);
4742                 bfa_reqq_wcancel(&rp->reqq_wait);
4743                 bfa_rport_free(rp);
4744                 break;
4745
4746         default:
4747                 bfa_sm_fault(rp->bfa, event);
4748         }
4749 }
4750
4751 /*
4752  * Waiting for rport create response from firmware. A delete is pending.
4753  */
4754 static void
4755 bfa_rport_sm_delete_pending(struct bfa_rport_s *rp,
4756                                 enum bfa_rport_event event)
4757 {
4758         bfa_trc(rp->bfa, rp->rport_tag);
4759         bfa_trc(rp->bfa, event);
4760
4761         switch (event) {
4762         case BFA_RPORT_SM_FWRSP:
4763                 bfa_stats(rp, sm_delp_fwrsp);
4764                 if (bfa_rport_send_fwdelete(rp))
4765                         bfa_sm_set_state(rp, bfa_rport_sm_deleting);
4766                 else
4767                         bfa_sm_set_state(rp, bfa_rport_sm_deleting_qfull);
4768                 break;
4769
4770         case BFA_RPORT_SM_HWFAIL:
4771                 bfa_stats(rp, sm_delp_hwf);
4772                 bfa_sm_set_state(rp, bfa_rport_sm_uninit);
4773                 bfa_rport_free(rp);
4774                 break;
4775
4776         default:
4777                 bfa_stats(rp, sm_delp_unexp);
4778                 bfa_sm_fault(rp->bfa, event);
4779         }
4780 }
4781
4782 /*
4783  * Waiting for rport create response from firmware. Rport offline is pending.
4784  */
4785 static void
4786 bfa_rport_sm_offline_pending(struct bfa_rport_s *rp,
4787                                  enum bfa_rport_event event)
4788 {
4789         bfa_trc(rp->bfa, rp->rport_tag);
4790         bfa_trc(rp->bfa, event);
4791
4792         switch (event) {
4793         case BFA_RPORT_SM_FWRSP:
4794                 bfa_stats(rp, sm_offp_fwrsp);
4795                 if (bfa_rport_send_fwdelete(rp))
4796                         bfa_sm_set_state(rp, bfa_rport_sm_fwdelete);
4797                 else
4798                         bfa_sm_set_state(rp, bfa_rport_sm_fwdelete_qfull);
4799                 break;
4800
4801         case BFA_RPORT_SM_DELETE:
4802                 bfa_stats(rp, sm_offp_del);
4803                 bfa_sm_set_state(rp, bfa_rport_sm_delete_pending);
4804                 break;
4805
4806         case BFA_RPORT_SM_HWFAIL:
4807                 bfa_stats(rp, sm_offp_hwf);
4808                 bfa_sm_set_state(rp, bfa_rport_sm_iocdisable);
4809                 bfa_rport_offline_cb(rp);
4810                 break;
4811
4812         default:
4813                 bfa_stats(rp, sm_offp_unexp);
4814                 bfa_sm_fault(rp->bfa, event);
4815         }
4816 }
4817
4818 /*
4819  * IOC h/w failed.
4820  */
4821 static void
4822 bfa_rport_sm_iocdisable(struct bfa_rport_s *rp, enum bfa_rport_event event)
4823 {
4824         bfa_trc(rp->bfa, rp->rport_tag);
4825         bfa_trc(rp->bfa, event);
4826
4827         switch (event) {
4828         case BFA_RPORT_SM_OFFLINE:
4829                 bfa_stats(rp, sm_iocd_off);
4830                 bfa_rport_offline_cb(rp);
4831                 break;
4832
4833         case BFA_RPORT_SM_DELETE:
4834                 bfa_stats(rp, sm_iocd_del);
4835                 bfa_sm_set_state(rp, bfa_rport_sm_uninit);
4836                 bfa_rport_free(rp);
4837                 break;
4838
4839         case BFA_RPORT_SM_ONLINE:
4840                 bfa_stats(rp, sm_iocd_on);
4841                 if (bfa_rport_send_fwcreate(rp))
4842                         bfa_sm_set_state(rp, bfa_rport_sm_fwcreate);
4843                 else
4844                         bfa_sm_set_state(rp, bfa_rport_sm_fwcreate_qfull);
4845                 break;
4846
4847         case BFA_RPORT_SM_HWFAIL:
4848                 break;
4849
4850         default:
4851                 bfa_stats(rp, sm_iocd_unexp);
4852                 bfa_sm_fault(rp->bfa, event);
4853         }
4854 }
4855
4856
4857
4858 /*
4859  *  bfa_rport_private BFA rport private functions
4860  */
4861
4862 static void
4863 __bfa_cb_rport_online(void *cbarg, bfa_boolean_t complete)
4864 {
4865         struct bfa_rport_s *rp = cbarg;
4866
4867         if (complete)
4868                 bfa_cb_rport_online(rp->rport_drv);
4869 }
4870
4871 static void
4872 __bfa_cb_rport_offline(void *cbarg, bfa_boolean_t complete)
4873 {
4874         struct bfa_rport_s *rp = cbarg;
4875
4876         if (complete)
4877                 bfa_cb_rport_offline(rp->rport_drv);
4878 }
4879
4880 static void
4881 bfa_rport_qresume(void *cbarg)
4882 {
4883         struct bfa_rport_s      *rp = cbarg;
4884
4885         bfa_sm_send_event(rp, BFA_RPORT_SM_QRESUME);
4886 }
4887
4888 static void
4889 bfa_rport_meminfo(struct bfa_iocfc_cfg_s *cfg, struct bfa_meminfo_s *minfo,
4890                 struct bfa_s *bfa)
4891 {
4892         struct bfa_mem_kva_s *rport_kva = BFA_MEM_RPORT_KVA(bfa);
4893
4894         if (cfg->fwcfg.num_rports < BFA_RPORT_MIN)
4895                 cfg->fwcfg.num_rports = BFA_RPORT_MIN;
4896
4897         /* kva memory */
4898         bfa_mem_kva_setup(minfo, rport_kva,
4899                 cfg->fwcfg.num_rports * sizeof(struct bfa_rport_s));
4900 }
4901
4902 static void
4903 bfa_rport_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
4904                 struct bfa_pcidev_s *pcidev)
4905 {
4906         struct bfa_rport_mod_s *mod = BFA_RPORT_MOD(bfa);
4907         struct bfa_rport_s *rp;
4908         u16 i;
4909
4910         INIT_LIST_HEAD(&mod->rp_free_q);
4911         INIT_LIST_HEAD(&mod->rp_active_q);
4912         INIT_LIST_HEAD(&mod->rp_unused_q);
4913
4914         rp = (struct bfa_rport_s *) bfa_mem_kva_curp(mod);
4915         mod->rps_list = rp;
4916         mod->num_rports = cfg->fwcfg.num_rports;
4917
4918         WARN_ON(!mod->num_rports ||
4919                    (mod->num_rports & (mod->num_rports - 1)));
4920
4921         for (i = 0; i < mod->num_rports; i++, rp++) {
4922                 memset(rp, 0, sizeof(struct bfa_rport_s));
4923                 rp->bfa = bfa;
4924                 rp->rport_tag = i;
4925                 bfa_sm_set_state(rp, bfa_rport_sm_uninit);
4926
4927                 /*
4928                  *  - is unused
4929                  */
4930                 if (i)
4931                         list_add_tail(&rp->qe, &mod->rp_free_q);
4932
4933                 bfa_reqq_winit(&rp->reqq_wait, bfa_rport_qresume, rp);
4934         }
4935
4936         /*
4937          * consume memory
4938          */
4939         bfa_mem_kva_curp(mod) = (u8 *) rp;
4940 }
4941
4942 static void
4943 bfa_rport_detach(struct bfa_s *bfa)
4944 {
4945 }
4946
4947 static void
4948 bfa_rport_start(struct bfa_s *bfa)
4949 {
4950 }
4951
4952 static void
4953 bfa_rport_stop(struct bfa_s *bfa)
4954 {
4955 }
4956
4957 static void
4958 bfa_rport_iocdisable(struct bfa_s *bfa)
4959 {
4960         struct bfa_rport_mod_s *mod = BFA_RPORT_MOD(bfa);
4961         struct bfa_rport_s *rport;
4962         struct list_head *qe, *qen;
4963
4964         /* Enqueue unused rport resources to free_q */
4965         list_splice_tail_init(&mod->rp_unused_q, &mod->rp_free_q);
4966
4967         list_for_each_safe(qe, qen, &mod->rp_active_q) {
4968                 rport = (struct bfa_rport_s *) qe;
4969                 bfa_sm_send_event(rport, BFA_RPORT_SM_HWFAIL);
4970         }
4971 }
4972
4973 static struct bfa_rport_s *
4974 bfa_rport_alloc(struct bfa_rport_mod_s *mod)
4975 {
4976         struct bfa_rport_s *rport;
4977
4978         bfa_q_deq(&mod->rp_free_q, &rport);
4979         if (rport)
4980                 list_add_tail(&rport->qe, &mod->rp_active_q);
4981
4982         return rport;
4983 }
4984
4985 static void
4986 bfa_rport_free(struct bfa_rport_s *rport)
4987 {
4988         struct bfa_rport_mod_s *mod = BFA_RPORT_MOD(rport->bfa);
4989
4990         WARN_ON(!bfa_q_is_on_q(&mod->rp_active_q, rport));
4991         list_del(&rport->qe);
4992         list_add_tail(&rport->qe, &mod->rp_free_q);
4993 }
4994
4995 static bfa_boolean_t
4996 bfa_rport_send_fwcreate(struct bfa_rport_s *rp)
4997 {
4998         struct bfi_rport_create_req_s *m;
4999
5000         /*
5001          * check for room in queue to send request now
5002          */
5003         m = bfa_reqq_next(rp->bfa, BFA_REQQ_RPORT);
5004         if (!m) {
5005                 bfa_reqq_wait(rp->bfa, BFA_REQQ_RPORT, &rp->reqq_wait);
5006                 return BFA_FALSE;
5007         }
5008
5009         bfi_h2i_set(m->mh, BFI_MC_RPORT, BFI_RPORT_H2I_CREATE_REQ,
5010                         bfa_fn_lpu(rp->bfa));
5011         m->bfa_handle = rp->rport_tag;
5012         m->max_frmsz = cpu_to_be16(rp->rport_info.max_frmsz);
5013         m->pid = rp->rport_info.pid;
5014         m->lp_fwtag = bfa_lps_get_fwtag(rp->bfa, (u8)rp->rport_info.lp_tag);
5015         m->local_pid = rp->rport_info.local_pid;
5016         m->fc_class = rp->rport_info.fc_class;
5017         m->vf_en = rp->rport_info.vf_en;
5018         m->vf_id = rp->rport_info.vf_id;
5019         m->cisc = rp->rport_info.cisc;
5020
5021         /*
5022          * queue I/O message to firmware
5023          */
5024         bfa_reqq_produce(rp->bfa, BFA_REQQ_RPORT, m->mh);
5025         return BFA_TRUE;
5026 }
5027
5028 static bfa_boolean_t
5029 bfa_rport_send_fwdelete(struct bfa_rport_s *rp)
5030 {
5031         struct bfi_rport_delete_req_s *m;
5032
5033         /*
5034          * check for room in queue to send request now
5035          */
5036         m = bfa_reqq_next(rp->bfa, BFA_REQQ_RPORT);
5037         if (!m) {
5038                 bfa_reqq_wait(rp->bfa, BFA_REQQ_RPORT, &rp->reqq_wait);
5039                 return BFA_FALSE;
5040         }
5041
5042         bfi_h2i_set(m->mh, BFI_MC_RPORT, BFI_RPORT_H2I_DELETE_REQ,
5043                         bfa_fn_lpu(rp->bfa));
5044         m->fw_handle = rp->fw_handle;
5045
5046         /*
5047          * queue I/O message to firmware
5048          */
5049         bfa_reqq_produce(rp->bfa, BFA_REQQ_RPORT, m->mh);
5050         return BFA_TRUE;
5051 }
5052
5053 static bfa_boolean_t
5054 bfa_rport_send_fwspeed(struct bfa_rport_s *rp)
5055 {
5056         struct bfa_rport_speed_req_s *m;
5057
5058         /*
5059          * check for room in queue to send request now
5060          */
5061         m = bfa_reqq_next(rp->bfa, BFA_REQQ_RPORT);
5062         if (!m) {
5063                 bfa_trc(rp->bfa, rp->rport_info.speed);
5064                 return BFA_FALSE;
5065         }
5066
5067         bfi_h2i_set(m->mh, BFI_MC_RPORT, BFI_RPORT_H2I_SET_SPEED_REQ,
5068                         bfa_fn_lpu(rp->bfa));
5069         m->fw_handle = rp->fw_handle;
5070         m->speed = (u8)rp->rport_info.speed;
5071
5072         /*
5073          * queue I/O message to firmware
5074          */
5075         bfa_reqq_produce(rp->bfa, BFA_REQQ_RPORT, m->mh);
5076         return BFA_TRUE;
5077 }
5078
5079
5080
5081 /*
5082  *  bfa_rport_public
5083  */
5084
5085 /*
5086  * Rport interrupt processing.
5087  */
5088 void
5089 bfa_rport_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
5090 {
5091         union bfi_rport_i2h_msg_u msg;
5092         struct bfa_rport_s *rp;
5093
5094         bfa_trc(bfa, m->mhdr.msg_id);
5095
5096         msg.msg = m;
5097
5098         switch (m->mhdr.msg_id) {
5099         case BFI_RPORT_I2H_CREATE_RSP:
5100                 rp = BFA_RPORT_FROM_TAG(bfa, msg.create_rsp->bfa_handle);
5101                 rp->fw_handle = msg.create_rsp->fw_handle;
5102                 rp->qos_attr = msg.create_rsp->qos_attr;
5103                 bfa_rport_set_lunmask(bfa, rp);
5104                 WARN_ON(msg.create_rsp->status != BFA_STATUS_OK);
5105                 bfa_sm_send_event(rp, BFA_RPORT_SM_FWRSP);
5106                 break;
5107
5108         case BFI_RPORT_I2H_DELETE_RSP:
5109                 rp = BFA_RPORT_FROM_TAG(bfa, msg.delete_rsp->bfa_handle);
5110                 WARN_ON(msg.delete_rsp->status != BFA_STATUS_OK);
5111                 bfa_rport_unset_lunmask(bfa, rp);
5112                 bfa_sm_send_event(rp, BFA_RPORT_SM_FWRSP);
5113                 break;
5114
5115         case BFI_RPORT_I2H_QOS_SCN:
5116                 rp = BFA_RPORT_FROM_TAG(bfa, msg.qos_scn_evt->bfa_handle);
5117                 rp->event_arg.fw_msg = msg.qos_scn_evt;
5118                 bfa_sm_send_event(rp, BFA_RPORT_SM_QOS_SCN);
5119                 break;
5120
5121         case BFI_RPORT_I2H_LIP_SCN_ONLINE:
5122                 bfa_fcport_update_loop_info(BFA_FCPORT_MOD(bfa),
5123                                 &msg.lip_scn->loop_info);
5124                 bfa_cb_rport_scn_online(bfa);
5125                 break;
5126
5127         case BFI_RPORT_I2H_LIP_SCN_OFFLINE:
5128                 bfa_cb_rport_scn_offline(bfa);
5129                 break;
5130
5131         case BFI_RPORT_I2H_NO_DEV:
5132                 rp = BFA_RPORT_FROM_TAG(bfa, msg.lip_scn->bfa_handle);
5133                 bfa_cb_rport_scn_no_dev(rp->rport_drv);
5134                 break;
5135
5136         default:
5137                 bfa_trc(bfa, m->mhdr.msg_id);
5138                 WARN_ON(1);
5139         }
5140 }
5141
5142 void
5143 bfa_rport_res_recfg(struct bfa_s *bfa, u16 num_rport_fw)
5144 {
5145         struct bfa_rport_mod_s  *mod = BFA_RPORT_MOD(bfa);
5146         struct list_head        *qe;
5147         int     i;
5148
5149         for (i = 0; i < (mod->num_rports - num_rport_fw); i++) {
5150                 bfa_q_deq_tail(&mod->rp_free_q, &qe);
5151                 list_add_tail(qe, &mod->rp_unused_q);
5152         }
5153 }
5154
5155 /*
5156  *  bfa_rport_api
5157  */
5158
5159 struct bfa_rport_s *
5160 bfa_rport_create(struct bfa_s *bfa, void *rport_drv)
5161 {
5162         struct bfa_rport_s *rp;
5163
5164         rp = bfa_rport_alloc(BFA_RPORT_MOD(bfa));
5165
5166         if (rp == NULL)
5167                 return NULL;
5168
5169         rp->bfa = bfa;
5170         rp->rport_drv = rport_drv;
5171         memset(&rp->stats, 0, sizeof(rp->stats));
5172
5173         WARN_ON(!bfa_sm_cmp_state(rp, bfa_rport_sm_uninit));
5174         bfa_sm_send_event(rp, BFA_RPORT_SM_CREATE);
5175
5176         return rp;
5177 }
5178
5179 void
5180 bfa_rport_online(struct bfa_rport_s *rport, struct bfa_rport_info_s *rport_info)
5181 {
5182         WARN_ON(rport_info->max_frmsz == 0);
5183
5184         /*
5185          * Some JBODs are seen to be not setting PDU size correctly in PLOGI
5186          * responses. Default to minimum size.
5187          */
5188         if (rport_info->max_frmsz == 0) {
5189                 bfa_trc(rport->bfa, rport->rport_tag);
5190                 rport_info->max_frmsz = FC_MIN_PDUSZ;
5191         }
5192
5193         rport->rport_info = *rport_info;
5194         bfa_sm_send_event(rport, BFA_RPORT_SM_ONLINE);
5195 }
5196
5197 void
5198 bfa_rport_speed(struct bfa_rport_s *rport, enum bfa_port_speed speed)
5199 {
5200         WARN_ON(speed == 0);
5201         WARN_ON(speed == BFA_PORT_SPEED_AUTO);
5202
5203         if (rport) {
5204                 rport->rport_info.speed = speed;
5205                 bfa_sm_send_event(rport, BFA_RPORT_SM_SET_SPEED);
5206         }
5207 }
5208
5209 /* Set Rport LUN Mask */
5210 void
5211 bfa_rport_set_lunmask(struct bfa_s *bfa, struct bfa_rport_s *rp)
5212 {
5213         struct bfa_lps_mod_s    *lps_mod = BFA_LPS_MOD(bfa);
5214         wwn_t   lp_wwn, rp_wwn;
5215         u8 lp_tag = (u8)rp->rport_info.lp_tag;
5216
5217         rp_wwn = ((struct bfa_fcs_rport_s *)rp->rport_drv)->pwwn;
5218         lp_wwn = (BFA_LPS_FROM_TAG(lps_mod, rp->rport_info.lp_tag))->pwwn;
5219
5220         BFA_LPS_FROM_TAG(lps_mod, rp->rport_info.lp_tag)->lun_mask =
5221                                         rp->lun_mask = BFA_TRUE;
5222         bfa_fcpim_lunmask_rp_update(bfa, lp_wwn, rp_wwn, rp->rport_tag, lp_tag);
5223 }
5224
5225 /* Unset Rport LUN mask */
5226 void
5227 bfa_rport_unset_lunmask(struct bfa_s *bfa, struct bfa_rport_s *rp)
5228 {
5229         struct bfa_lps_mod_s    *lps_mod = BFA_LPS_MOD(bfa);
5230         wwn_t   lp_wwn, rp_wwn;
5231
5232         rp_wwn = ((struct bfa_fcs_rport_s *)rp->rport_drv)->pwwn;
5233         lp_wwn = (BFA_LPS_FROM_TAG(lps_mod, rp->rport_info.lp_tag))->pwwn;
5234
5235         BFA_LPS_FROM_TAG(lps_mod, rp->rport_info.lp_tag)->lun_mask =
5236                                 rp->lun_mask = BFA_FALSE;
5237         bfa_fcpim_lunmask_rp_update(bfa, lp_wwn, rp_wwn,
5238                         BFA_RPORT_TAG_INVALID, BFA_LP_TAG_INVALID);
5239 }
5240
5241 /*
5242  * SGPG related functions
5243  */
5244
5245 /*
5246  * Compute and return memory needed by FCP(im) module.
5247  */
5248 static void
5249 bfa_sgpg_meminfo(struct bfa_iocfc_cfg_s *cfg, struct bfa_meminfo_s *minfo,
5250                 struct bfa_s *bfa)
5251 {
5252         struct bfa_sgpg_mod_s *sgpg_mod = BFA_SGPG_MOD(bfa);
5253         struct bfa_mem_kva_s *sgpg_kva = BFA_MEM_SGPG_KVA(bfa);
5254         struct bfa_mem_dma_s *seg_ptr;
5255         u16     nsegs, idx, per_seg_sgpg, num_sgpg;
5256         u32     sgpg_sz = sizeof(struct bfi_sgpg_s);
5257
5258         if (cfg->drvcfg.num_sgpgs < BFA_SGPG_MIN)
5259                 cfg->drvcfg.num_sgpgs = BFA_SGPG_MIN;
5260         else if (cfg->drvcfg.num_sgpgs > BFA_SGPG_MAX)
5261                 cfg->drvcfg.num_sgpgs = BFA_SGPG_MAX;
5262
5263         num_sgpg = cfg->drvcfg.num_sgpgs;
5264
5265         nsegs = BFI_MEM_DMA_NSEGS(num_sgpg, sgpg_sz);
5266         per_seg_sgpg = BFI_MEM_NREQS_SEG(sgpg_sz);
5267
5268         bfa_mem_dma_seg_iter(sgpg_mod, seg_ptr, nsegs, idx) {
5269                 if (num_sgpg >= per_seg_sgpg) {
5270                         num_sgpg -= per_seg_sgpg;
5271                         bfa_mem_dma_setup(minfo, seg_ptr,
5272                                         per_seg_sgpg * sgpg_sz);
5273                 } else
5274                         bfa_mem_dma_setup(minfo, seg_ptr,
5275                                         num_sgpg * sgpg_sz);
5276         }
5277
5278         /* kva memory */
5279         bfa_mem_kva_setup(minfo, sgpg_kva,
5280                 cfg->drvcfg.num_sgpgs * sizeof(struct bfa_sgpg_s));
5281 }
5282
5283 static void
5284 bfa_sgpg_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
5285                 struct bfa_pcidev_s *pcidev)
5286 {
5287         struct bfa_sgpg_mod_s *mod = BFA_SGPG_MOD(bfa);
5288         struct bfa_sgpg_s *hsgpg;
5289         struct bfi_sgpg_s *sgpg;
5290         u64 align_len;
5291         struct bfa_mem_dma_s *seg_ptr;
5292         u32     sgpg_sz = sizeof(struct bfi_sgpg_s);
5293         u16     i, idx, nsegs, per_seg_sgpg, num_sgpg;
5294
5295         union {
5296                 u64 pa;
5297                 union bfi_addr_u addr;
5298         } sgpg_pa, sgpg_pa_tmp;
5299
5300         INIT_LIST_HEAD(&mod->sgpg_q);
5301         INIT_LIST_HEAD(&mod->sgpg_wait_q);
5302
5303         bfa_trc(bfa, cfg->drvcfg.num_sgpgs);
5304
5305         mod->free_sgpgs = mod->num_sgpgs = cfg->drvcfg.num_sgpgs;
5306
5307         num_sgpg = cfg->drvcfg.num_sgpgs;
5308         nsegs = BFI_MEM_DMA_NSEGS(num_sgpg, sgpg_sz);
5309
5310         /* dma/kva mem claim */
5311         hsgpg = (struct bfa_sgpg_s *) bfa_mem_kva_curp(mod);
5312
5313         bfa_mem_dma_seg_iter(mod, seg_ptr, nsegs, idx) {
5314
5315                 if (!bfa_mem_dma_virt(seg_ptr))
5316                         break;
5317
5318                 align_len = BFA_SGPG_ROUNDUP(bfa_mem_dma_phys(seg_ptr)) -
5319                                              bfa_mem_dma_phys(seg_ptr);
5320
5321                 sgpg = (struct bfi_sgpg_s *)
5322                         (((u8 *) bfa_mem_dma_virt(seg_ptr)) + align_len);
5323                 sgpg_pa.pa = bfa_mem_dma_phys(seg_ptr) + align_len;
5324                 WARN_ON(sgpg_pa.pa & (sgpg_sz - 1));
5325
5326                 per_seg_sgpg = (seg_ptr->mem_len - (u32)align_len) / sgpg_sz;
5327
5328                 for (i = 0; num_sgpg > 0 && i < per_seg_sgpg; i++, num_sgpg--) {
5329                         memset(hsgpg, 0, sizeof(*hsgpg));
5330                         memset(sgpg, 0, sizeof(*sgpg));
5331
5332                         hsgpg->sgpg = sgpg;
5333                         sgpg_pa_tmp.pa = bfa_sgaddr_le(sgpg_pa.pa);
5334                         hsgpg->sgpg_pa = sgpg_pa_tmp.addr;
5335                         list_add_tail(&hsgpg->qe, &mod->sgpg_q);
5336
5337                         sgpg++;
5338                         hsgpg++;
5339                         sgpg_pa.pa += sgpg_sz;
5340                 }
5341         }
5342
5343         bfa_mem_kva_curp(mod) = (u8 *) hsgpg;
5344 }
5345
5346 static void
5347 bfa_sgpg_detach(struct bfa_s *bfa)
5348 {
5349 }
5350
5351 static void
5352 bfa_sgpg_start(struct bfa_s *bfa)
5353 {
5354 }
5355
5356 static void
5357 bfa_sgpg_stop(struct bfa_s *bfa)
5358 {
5359 }
5360
5361 static void
5362 bfa_sgpg_iocdisable(struct bfa_s *bfa)
5363 {
5364 }
5365
5366 bfa_status_t
5367 bfa_sgpg_malloc(struct bfa_s *bfa, struct list_head *sgpg_q, int nsgpgs)
5368 {
5369         struct bfa_sgpg_mod_s *mod = BFA_SGPG_MOD(bfa);
5370         struct bfa_sgpg_s *hsgpg;
5371         int i;
5372
5373         if (mod->free_sgpgs < nsgpgs)
5374                 return BFA_STATUS_ENOMEM;
5375
5376         for (i = 0; i < nsgpgs; i++) {
5377                 bfa_q_deq(&mod->sgpg_q, &hsgpg);
5378                 WARN_ON(!hsgpg);
5379                 list_add_tail(&hsgpg->qe, sgpg_q);
5380         }
5381
5382         mod->free_sgpgs -= nsgpgs;
5383         return BFA_STATUS_OK;
5384 }
5385
5386 void
5387 bfa_sgpg_mfree(struct bfa_s *bfa, struct list_head *sgpg_q, int nsgpg)
5388 {
5389         struct bfa_sgpg_mod_s *mod = BFA_SGPG_MOD(bfa);
5390         struct bfa_sgpg_wqe_s *wqe;
5391
5392         mod->free_sgpgs += nsgpg;
5393         WARN_ON(mod->free_sgpgs > mod->num_sgpgs);
5394
5395         list_splice_tail_init(sgpg_q, &mod->sgpg_q);
5396
5397         if (list_empty(&mod->sgpg_wait_q))
5398                 return;
5399
5400         /*
5401          * satisfy as many waiting requests as possible
5402          */
5403         do {
5404                 wqe = bfa_q_first(&mod->sgpg_wait_q);
5405                 if (mod->free_sgpgs < wqe->nsgpg)
5406                         nsgpg = mod->free_sgpgs;
5407                 else
5408                         nsgpg = wqe->nsgpg;
5409                 bfa_sgpg_malloc(bfa, &wqe->sgpg_q, nsgpg);
5410                 wqe->nsgpg -= nsgpg;
5411                 if (wqe->nsgpg == 0) {
5412                         list_del(&wqe->qe);
5413                         wqe->cbfn(wqe->cbarg);
5414                 }
5415         } while (mod->free_sgpgs && !list_empty(&mod->sgpg_wait_q));
5416 }
5417
5418 void
5419 bfa_sgpg_wait(struct bfa_s *bfa, struct bfa_sgpg_wqe_s *wqe, int nsgpg)
5420 {
5421         struct bfa_sgpg_mod_s *mod = BFA_SGPG_MOD(bfa);
5422
5423         WARN_ON(nsgpg <= 0);
5424         WARN_ON(nsgpg <= mod->free_sgpgs);
5425
5426         wqe->nsgpg_total = wqe->nsgpg = nsgpg;
5427
5428         /*
5429          * allocate any left to this one first
5430          */
5431         if (mod->free_sgpgs) {
5432                 /*
5433                  * no one else is waiting for SGPG
5434                  */
5435                 WARN_ON(!list_empty(&mod->sgpg_wait_q));
5436                 list_splice_tail_init(&mod->sgpg_q, &wqe->sgpg_q);
5437                 wqe->nsgpg -= mod->free_sgpgs;
5438                 mod->free_sgpgs = 0;
5439         }
5440
5441         list_add_tail(&wqe->qe, &mod->sgpg_wait_q);
5442 }
5443
5444 void
5445 bfa_sgpg_wcancel(struct bfa_s *bfa, struct bfa_sgpg_wqe_s *wqe)
5446 {
5447         struct bfa_sgpg_mod_s *mod = BFA_SGPG_MOD(bfa);
5448
5449         WARN_ON(!bfa_q_is_on_q(&mod->sgpg_wait_q, wqe));
5450         list_del(&wqe->qe);
5451
5452         if (wqe->nsgpg_total != wqe->nsgpg)
5453                 bfa_sgpg_mfree(bfa, &wqe->sgpg_q,
5454                                    wqe->nsgpg_total - wqe->nsgpg);
5455 }
5456
5457 void
5458 bfa_sgpg_winit(struct bfa_sgpg_wqe_s *wqe, void (*cbfn) (void *cbarg),
5459                    void *cbarg)
5460 {
5461         INIT_LIST_HEAD(&wqe->sgpg_q);
5462         wqe->cbfn = cbfn;
5463         wqe->cbarg = cbarg;
5464 }
5465
5466 /*
5467  *  UF related functions
5468  */
5469 /*
5470  *****************************************************************************
5471  * Internal functions
5472  *****************************************************************************
5473  */
5474 static void
5475 __bfa_cb_uf_recv(void *cbarg, bfa_boolean_t complete)
5476 {
5477         struct bfa_uf_s   *uf = cbarg;
5478         struct bfa_uf_mod_s *ufm = BFA_UF_MOD(uf->bfa);
5479
5480         if (complete)
5481                 ufm->ufrecv(ufm->cbarg, uf);
5482 }
5483
5484 static void
5485 claim_uf_post_msgs(struct bfa_uf_mod_s *ufm)
5486 {
5487         struct bfi_uf_buf_post_s *uf_bp_msg;
5488         u16 i;
5489         u16 buf_len;
5490
5491         ufm->uf_buf_posts = (struct bfi_uf_buf_post_s *) bfa_mem_kva_curp(ufm);
5492         uf_bp_msg = ufm->uf_buf_posts;
5493
5494         for (i = 0, uf_bp_msg = ufm->uf_buf_posts; i < ufm->num_ufs;
5495              i++, uf_bp_msg++) {
5496                 memset(uf_bp_msg, 0, sizeof(struct bfi_uf_buf_post_s));
5497
5498                 uf_bp_msg->buf_tag = i;
5499                 buf_len = sizeof(struct bfa_uf_buf_s);
5500                 uf_bp_msg->buf_len = cpu_to_be16(buf_len);
5501                 bfi_h2i_set(uf_bp_msg->mh, BFI_MC_UF, BFI_UF_H2I_BUF_POST,
5502                             bfa_fn_lpu(ufm->bfa));
5503                 bfa_alen_set(&uf_bp_msg->alen, buf_len, ufm_pbs_pa(ufm, i));
5504         }
5505
5506         /*
5507          * advance pointer beyond consumed memory
5508          */
5509         bfa_mem_kva_curp(ufm) = (u8 *) uf_bp_msg;
5510 }
5511
5512 static void
5513 claim_ufs(struct bfa_uf_mod_s *ufm)
5514 {
5515         u16 i;
5516         struct bfa_uf_s   *uf;
5517
5518         /*
5519          * Claim block of memory for UF list
5520          */
5521         ufm->uf_list = (struct bfa_uf_s *) bfa_mem_kva_curp(ufm);
5522
5523         /*
5524          * Initialize UFs and queue it in UF free queue
5525          */
5526         for (i = 0, uf = ufm->uf_list; i < ufm->num_ufs; i++, uf++) {
5527                 memset(uf, 0, sizeof(struct bfa_uf_s));
5528                 uf->bfa = ufm->bfa;
5529                 uf->uf_tag = i;
5530                 uf->pb_len = BFA_PER_UF_DMA_SZ;
5531                 uf->buf_kva = bfa_mem_get_dmabuf_kva(ufm, i, BFA_PER_UF_DMA_SZ);
5532                 uf->buf_pa = ufm_pbs_pa(ufm, i);
5533                 list_add_tail(&uf->qe, &ufm->uf_free_q);
5534         }
5535
5536         /*
5537          * advance memory pointer
5538          */
5539         bfa_mem_kva_curp(ufm) = (u8 *) uf;
5540 }
5541
5542 static void
5543 uf_mem_claim(struct bfa_uf_mod_s *ufm)
5544 {
5545         claim_ufs(ufm);
5546         claim_uf_post_msgs(ufm);
5547 }
5548
5549 static void
5550 bfa_uf_meminfo(struct bfa_iocfc_cfg_s *cfg, struct bfa_meminfo_s *minfo,
5551                 struct bfa_s *bfa)
5552 {
5553         struct bfa_uf_mod_s *ufm = BFA_UF_MOD(bfa);
5554         struct bfa_mem_kva_s *uf_kva = BFA_MEM_UF_KVA(bfa);
5555         u32     num_ufs = cfg->fwcfg.num_uf_bufs;
5556         struct bfa_mem_dma_s *seg_ptr;
5557         u16     nsegs, idx, per_seg_uf = 0;
5558
5559         nsegs = BFI_MEM_DMA_NSEGS(num_ufs, BFA_PER_UF_DMA_SZ);
5560         per_seg_uf = BFI_MEM_NREQS_SEG(BFA_PER_UF_DMA_SZ);
5561
5562         bfa_mem_dma_seg_iter(ufm, seg_ptr, nsegs, idx) {
5563                 if (num_ufs >= per_seg_uf) {
5564                         num_ufs -= per_seg_uf;
5565                         bfa_mem_dma_setup(minfo, seg_ptr,
5566                                 per_seg_uf * BFA_PER_UF_DMA_SZ);
5567                 } else
5568                         bfa_mem_dma_setup(minfo, seg_ptr,
5569                                 num_ufs * BFA_PER_UF_DMA_SZ);
5570         }
5571
5572         /* kva memory */
5573         bfa_mem_kva_setup(minfo, uf_kva, cfg->fwcfg.num_uf_bufs *
5574                 (sizeof(struct bfa_uf_s) + sizeof(struct bfi_uf_buf_post_s)));
5575 }
5576
5577 static void
5578 bfa_uf_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
5579                 struct bfa_pcidev_s *pcidev)
5580 {
5581         struct bfa_uf_mod_s *ufm = BFA_UF_MOD(bfa);
5582
5583         ufm->bfa = bfa;
5584         ufm->num_ufs = cfg->fwcfg.num_uf_bufs;
5585         INIT_LIST_HEAD(&ufm->uf_free_q);
5586         INIT_LIST_HEAD(&ufm->uf_posted_q);
5587         INIT_LIST_HEAD(&ufm->uf_unused_q);
5588
5589         uf_mem_claim(ufm);
5590 }
5591
5592 static void
5593 bfa_uf_detach(struct bfa_s *bfa)
5594 {
5595 }
5596
5597 static struct bfa_uf_s *
5598 bfa_uf_get(struct bfa_uf_mod_s *uf_mod)
5599 {
5600         struct bfa_uf_s   *uf;
5601
5602         bfa_q_deq(&uf_mod->uf_free_q, &uf);
5603         return uf;
5604 }
5605
5606 static void
5607 bfa_uf_put(struct bfa_uf_mod_s *uf_mod, struct bfa_uf_s *uf)
5608 {
5609         list_add_tail(&uf->qe, &uf_mod->uf_free_q);
5610 }
5611
5612 static bfa_status_t
5613 bfa_uf_post(struct bfa_uf_mod_s *ufm, struct bfa_uf_s *uf)
5614 {
5615         struct bfi_uf_buf_post_s *uf_post_msg;
5616
5617         uf_post_msg = bfa_reqq_next(ufm->bfa, BFA_REQQ_FCXP);
5618         if (!uf_post_msg)
5619                 return BFA_STATUS_FAILED;
5620
5621         memcpy(uf_post_msg, &ufm->uf_buf_posts[uf->uf_tag],
5622                       sizeof(struct bfi_uf_buf_post_s));
5623         bfa_reqq_produce(ufm->bfa, BFA_REQQ_FCXP, uf_post_msg->mh);
5624
5625         bfa_trc(ufm->bfa, uf->uf_tag);
5626
5627         list_add_tail(&uf->qe, &ufm->uf_posted_q);
5628         return BFA_STATUS_OK;
5629 }
5630
5631 static void
5632 bfa_uf_post_all(struct bfa_uf_mod_s *uf_mod)
5633 {
5634         struct bfa_uf_s   *uf;
5635
5636         while ((uf = bfa_uf_get(uf_mod)) != NULL) {
5637                 if (bfa_uf_post(uf_mod, uf) != BFA_STATUS_OK)
5638                         break;
5639         }
5640 }
5641
5642 static void
5643 uf_recv(struct bfa_s *bfa, struct bfi_uf_frm_rcvd_s *m)
5644 {
5645         struct bfa_uf_mod_s *ufm = BFA_UF_MOD(bfa);
5646         u16 uf_tag = m->buf_tag;
5647         struct bfa_uf_s *uf = &ufm->uf_list[uf_tag];
5648         struct bfa_uf_buf_s *uf_buf;
5649         uint8_t *buf;
5650         struct fchs_s *fchs;
5651
5652         uf_buf = (struct bfa_uf_buf_s *)
5653                         bfa_mem_get_dmabuf_kva(ufm, uf_tag, uf->pb_len);
5654         buf = &uf_buf->d[0];
5655
5656         m->frm_len = be16_to_cpu(m->frm_len);
5657         m->xfr_len = be16_to_cpu(m->xfr_len);
5658
5659         fchs = (struct fchs_s *)uf_buf;
5660
5661         list_del(&uf->qe);      /* dequeue from posted queue */
5662
5663         uf->data_ptr = buf;
5664         uf->data_len = m->xfr_len;
5665
5666         WARN_ON(uf->data_len < sizeof(struct fchs_s));
5667
5668         if (uf->data_len == sizeof(struct fchs_s)) {
5669                 bfa_plog_fchdr(bfa->plog, BFA_PL_MID_HAL_UF, BFA_PL_EID_RX,
5670                                uf->data_len, (struct fchs_s *)buf);
5671         } else {
5672                 u32 pld_w0 = *((u32 *) (buf + sizeof(struct fchs_s)));
5673                 bfa_plog_fchdr_and_pl(bfa->plog, BFA_PL_MID_HAL_UF,
5674                                       BFA_PL_EID_RX, uf->data_len,
5675                                       (struct fchs_s *)buf, pld_w0);
5676         }
5677
5678         if (bfa->fcs)
5679                 __bfa_cb_uf_recv(uf, BFA_TRUE);
5680         else
5681                 bfa_cb_queue(bfa, &uf->hcb_qe, __bfa_cb_uf_recv, uf);
5682 }
5683
5684 static void
5685 bfa_uf_stop(struct bfa_s *bfa)
5686 {
5687 }
5688
5689 static void
5690 bfa_uf_iocdisable(struct bfa_s *bfa)
5691 {
5692         struct bfa_uf_mod_s *ufm = BFA_UF_MOD(bfa);
5693         struct bfa_uf_s *uf;
5694         struct list_head *qe, *qen;
5695
5696         /* Enqueue unused uf resources to free_q */
5697         list_splice_tail_init(&ufm->uf_unused_q, &ufm->uf_free_q);
5698
5699         list_for_each_safe(qe, qen, &ufm->uf_posted_q) {
5700                 uf = (struct bfa_uf_s *) qe;
5701                 list_del(&uf->qe);
5702                 bfa_uf_put(ufm, uf);
5703         }
5704 }
5705
5706 static void
5707 bfa_uf_start(struct bfa_s *bfa)
5708 {
5709         bfa_uf_post_all(BFA_UF_MOD(bfa));
5710 }
5711
5712 /*
5713  * Register handler for all unsolicted receive frames.
5714  *
5715  * @param[in]   bfa             BFA instance
5716  * @param[in]   ufrecv  receive handler function
5717  * @param[in]   cbarg   receive handler arg
5718  */
5719 void
5720 bfa_uf_recv_register(struct bfa_s *bfa, bfa_cb_uf_recv_t ufrecv, void *cbarg)
5721 {
5722         struct bfa_uf_mod_s *ufm = BFA_UF_MOD(bfa);
5723
5724         ufm->ufrecv = ufrecv;
5725         ufm->cbarg = cbarg;
5726 }
5727
5728 /*
5729  *      Free an unsolicited frame back to BFA.
5730  *
5731  * @param[in]           uf              unsolicited frame to be freed
5732  *
5733  * @return None
5734  */
5735 void
5736 bfa_uf_free(struct bfa_uf_s *uf)
5737 {
5738         bfa_uf_put(BFA_UF_MOD(uf->bfa), uf);
5739         bfa_uf_post_all(BFA_UF_MOD(uf->bfa));
5740 }
5741
5742
5743
5744 /*
5745  *  uf_pub BFA uf module public functions
5746  */
5747 void
5748 bfa_uf_isr(struct bfa_s *bfa, struct bfi_msg_s *msg)
5749 {
5750         bfa_trc(bfa, msg->mhdr.msg_id);
5751
5752         switch (msg->mhdr.msg_id) {
5753         case BFI_UF_I2H_FRM_RCVD:
5754                 uf_recv(bfa, (struct bfi_uf_frm_rcvd_s *) msg);
5755                 break;
5756
5757         default:
5758                 bfa_trc(bfa, msg->mhdr.msg_id);
5759                 WARN_ON(1);
5760         }
5761 }
5762
5763 void
5764 bfa_uf_res_recfg(struct bfa_s *bfa, u16 num_uf_fw)
5765 {
5766         struct bfa_uf_mod_s     *mod = BFA_UF_MOD(bfa);
5767         struct list_head        *qe;
5768         int     i;
5769
5770         for (i = 0; i < (mod->num_ufs - num_uf_fw); i++) {
5771                 bfa_q_deq_tail(&mod->uf_free_q, &qe);
5772                 list_add_tail(qe, &mod->uf_unused_q);
5773         }
5774 }
5775
5776 /*
5777  *      Dport forward declaration
5778  */
5779
5780 enum bfa_dport_test_state_e {
5781         BFA_DPORT_ST_DISABLED   = 0,    /*!< dport is disabled */
5782         BFA_DPORT_ST_INP        = 1,    /*!< test in progress */
5783         BFA_DPORT_ST_COMP       = 2,    /*!< test complete successfully */
5784         BFA_DPORT_ST_NO_SFP     = 3,    /*!< sfp is not present */
5785         BFA_DPORT_ST_NOTSTART   = 4,    /*!< test not start dport is enabled */
5786 };
5787
5788 /*
5789  * BFA DPORT state machine events
5790  */
5791 enum bfa_dport_sm_event {
5792         BFA_DPORT_SM_ENABLE     = 1,    /* dport enable event         */
5793         BFA_DPORT_SM_DISABLE    = 2,    /* dport disable event        */
5794         BFA_DPORT_SM_FWRSP      = 3,    /* fw enable/disable rsp      */
5795         BFA_DPORT_SM_QRESUME    = 4,    /* CQ space available         */
5796         BFA_DPORT_SM_HWFAIL     = 5,    /* IOC h/w failure            */
5797         BFA_DPORT_SM_START      = 6,    /* re-start dport test        */
5798         BFA_DPORT_SM_REQFAIL    = 7,    /* request failure            */
5799         BFA_DPORT_SM_SCN        = 8,    /* state change notify frm fw */
5800 };
5801
5802 static void bfa_dport_sm_disabled(struct bfa_dport_s *dport,
5803                                   enum bfa_dport_sm_event event);
5804 static void bfa_dport_sm_enabling_qwait(struct bfa_dport_s *dport,
5805                                   enum bfa_dport_sm_event event);
5806 static void bfa_dport_sm_enabling(struct bfa_dport_s *dport,
5807                                   enum bfa_dport_sm_event event);
5808 static void bfa_dport_sm_enabled(struct bfa_dport_s *dport,
5809                                  enum bfa_dport_sm_event event);
5810 static void bfa_dport_sm_disabling_qwait(struct bfa_dport_s *dport,
5811                                  enum bfa_dport_sm_event event);
5812 static void bfa_dport_sm_disabling(struct bfa_dport_s *dport,
5813                                    enum bfa_dport_sm_event event);
5814 static void bfa_dport_sm_starting_qwait(struct bfa_dport_s *dport,
5815                                         enum bfa_dport_sm_event event);
5816 static void bfa_dport_sm_starting(struct bfa_dport_s *dport,
5817                                   enum bfa_dport_sm_event event);
5818 static void bfa_dport_sm_dynamic_disabling(struct bfa_dport_s *dport,
5819                                    enum bfa_dport_sm_event event);
5820 static void bfa_dport_sm_dynamic_disabling_qwait(struct bfa_dport_s *dport,
5821                                    enum bfa_dport_sm_event event);
5822 static void bfa_dport_qresume(void *cbarg);
5823 static void bfa_dport_req_comp(struct bfa_dport_s *dport,
5824                                 struct bfi_diag_dport_rsp_s *msg);
5825 static void bfa_dport_scn(struct bfa_dport_s *dport,
5826                                 struct bfi_diag_dport_scn_s *msg);
5827
5828 /*
5829  *      BFA fcdiag module
5830  */
5831 #define BFA_DIAG_QTEST_TOV      1000    /* msec */
5832
5833 /*
5834  *      Set port status to busy
5835  */
5836 static void
5837 bfa_fcdiag_set_busy_status(struct bfa_fcdiag_s *fcdiag)
5838 {
5839         struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(fcdiag->bfa);
5840
5841         if (fcdiag->lb.lock)
5842                 fcport->diag_busy = BFA_TRUE;
5843         else
5844                 fcport->diag_busy = BFA_FALSE;
5845 }
5846
5847 static void
5848 bfa_fcdiag_meminfo(struct bfa_iocfc_cfg_s *cfg, struct bfa_meminfo_s *meminfo,
5849                 struct bfa_s *bfa)
5850 {
5851 }
5852
5853 static void
5854 bfa_fcdiag_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
5855                 struct bfa_pcidev_s *pcidev)
5856 {
5857         struct bfa_fcdiag_s *fcdiag = BFA_FCDIAG_MOD(bfa);
5858         struct bfa_dport_s  *dport = &fcdiag->dport;
5859
5860         fcdiag->bfa             = bfa;
5861         fcdiag->trcmod  = bfa->trcmod;
5862         /* The common DIAG attach bfa_diag_attach() will do all memory claim */
5863         dport->bfa = bfa;
5864         bfa_sm_set_state(dport, bfa_dport_sm_disabled);
5865         bfa_reqq_winit(&dport->reqq_wait, bfa_dport_qresume, dport);
5866         dport->cbfn = NULL;
5867         dport->cbarg = NULL;
5868         dport->test_state = BFA_DPORT_ST_DISABLED;
5869         memset(&dport->result, 0, sizeof(struct bfa_diag_dport_result_s));
5870 }
5871
5872 static void
5873 bfa_fcdiag_iocdisable(struct bfa_s *bfa)
5874 {
5875         struct bfa_fcdiag_s *fcdiag = BFA_FCDIAG_MOD(bfa);
5876         struct bfa_dport_s *dport = &fcdiag->dport;
5877
5878         bfa_trc(fcdiag, fcdiag->lb.lock);
5879         if (fcdiag->lb.lock) {
5880                 fcdiag->lb.status = BFA_STATUS_IOC_FAILURE;
5881                 fcdiag->lb.cbfn(fcdiag->lb.cbarg, fcdiag->lb.status);
5882                 fcdiag->lb.lock = 0;
5883                 bfa_fcdiag_set_busy_status(fcdiag);
5884         }
5885
5886         bfa_sm_send_event(dport, BFA_DPORT_SM_HWFAIL);
5887 }
5888
5889 static void
5890 bfa_fcdiag_detach(struct bfa_s *bfa)
5891 {
5892 }
5893
5894 static void
5895 bfa_fcdiag_start(struct bfa_s *bfa)
5896 {
5897 }
5898
5899 static void
5900 bfa_fcdiag_stop(struct bfa_s *bfa)
5901 {
5902 }
5903
5904 static void
5905 bfa_fcdiag_queuetest_timeout(void *cbarg)
5906 {
5907         struct bfa_fcdiag_s       *fcdiag = cbarg;
5908         struct bfa_diag_qtest_result_s *res = fcdiag->qtest.result;
5909
5910         bfa_trc(fcdiag, fcdiag->qtest.all);
5911         bfa_trc(fcdiag, fcdiag->qtest.count);
5912
5913         fcdiag->qtest.timer_active = 0;
5914
5915         res->status = BFA_STATUS_ETIMER;
5916         res->count  = QTEST_CNT_DEFAULT - fcdiag->qtest.count;
5917         if (fcdiag->qtest.all)
5918                 res->queue  = fcdiag->qtest.all;
5919
5920         bfa_trc(fcdiag, BFA_STATUS_ETIMER);
5921         fcdiag->qtest.status = BFA_STATUS_ETIMER;
5922         fcdiag->qtest.cbfn(fcdiag->qtest.cbarg, fcdiag->qtest.status);
5923         fcdiag->qtest.lock = 0;
5924 }
5925
5926 static bfa_status_t
5927 bfa_fcdiag_queuetest_send(struct bfa_fcdiag_s *fcdiag)
5928 {
5929         u32     i;
5930         struct bfi_diag_qtest_req_s *req;
5931
5932         req = bfa_reqq_next(fcdiag->bfa, fcdiag->qtest.queue);
5933         if (!req)
5934                 return BFA_STATUS_DEVBUSY;
5935
5936         /* build host command */
5937         bfi_h2i_set(req->mh, BFI_MC_DIAG, BFI_DIAG_H2I_QTEST,
5938                 bfa_fn_lpu(fcdiag->bfa));
5939
5940         for (i = 0; i < BFI_LMSG_PL_WSZ; i++)
5941                 req->data[i] = QTEST_PAT_DEFAULT;
5942
5943         bfa_trc(fcdiag, fcdiag->qtest.queue);
5944         /* ring door bell */
5945         bfa_reqq_produce(fcdiag->bfa, fcdiag->qtest.queue, req->mh);
5946         return BFA_STATUS_OK;
5947 }
5948
5949 static void
5950 bfa_fcdiag_queuetest_comp(struct bfa_fcdiag_s *fcdiag,
5951                         bfi_diag_qtest_rsp_t *rsp)
5952 {
5953         struct bfa_diag_qtest_result_s *res = fcdiag->qtest.result;
5954         bfa_status_t status = BFA_STATUS_OK;
5955         int i;
5956
5957         /* Check timer, should still be active   */
5958         if (!fcdiag->qtest.timer_active) {
5959                 bfa_trc(fcdiag, fcdiag->qtest.timer_active);
5960                 return;
5961         }
5962
5963         /* update count */
5964         fcdiag->qtest.count--;
5965
5966         /* Check result */
5967         for (i = 0; i < BFI_LMSG_PL_WSZ; i++) {
5968                 if (rsp->data[i] != ~(QTEST_PAT_DEFAULT)) {
5969                         res->status = BFA_STATUS_DATACORRUPTED;
5970                         break;
5971                 }
5972         }
5973
5974         if (res->status == BFA_STATUS_OK) {
5975                 if (fcdiag->qtest.count > 0) {
5976                         status = bfa_fcdiag_queuetest_send(fcdiag);
5977                         if (status == BFA_STATUS_OK)
5978                                 return;
5979                         else
5980                                 res->status = status;
5981                 } else if (fcdiag->qtest.all > 0 &&
5982                         fcdiag->qtest.queue < (BFI_IOC_MAX_CQS - 1)) {
5983                         fcdiag->qtest.count = QTEST_CNT_DEFAULT;
5984                         fcdiag->qtest.queue++;
5985                         status = bfa_fcdiag_queuetest_send(fcdiag);
5986                         if (status == BFA_STATUS_OK)
5987                                 return;
5988                         else
5989                                 res->status = status;
5990                 }
5991         }
5992
5993         /* Stop timer when we comp all queue */
5994         if (fcdiag->qtest.timer_active) {
5995                 bfa_timer_stop(&fcdiag->qtest.timer);
5996                 fcdiag->qtest.timer_active = 0;
5997         }
5998         res->queue = fcdiag->qtest.queue;
5999         res->count = QTEST_CNT_DEFAULT - fcdiag->qtest.count;
6000         bfa_trc(fcdiag, res->count);
6001         bfa_trc(fcdiag, res->status);
6002         fcdiag->qtest.status = res->status;
6003         fcdiag->qtest.cbfn(fcdiag->qtest.cbarg, fcdiag->qtest.status);
6004         fcdiag->qtest.lock = 0;
6005 }
6006
6007 static void
6008 bfa_fcdiag_loopback_comp(struct bfa_fcdiag_s *fcdiag,
6009                         struct bfi_diag_lb_rsp_s *rsp)
6010 {
6011         struct bfa_diag_loopback_result_s *res = fcdiag->lb.result;
6012
6013         res->numtxmfrm  = be32_to_cpu(rsp->res.numtxmfrm);
6014         res->numosffrm  = be32_to_cpu(rsp->res.numosffrm);
6015         res->numrcvfrm  = be32_to_cpu(rsp->res.numrcvfrm);
6016         res->badfrminf  = be32_to_cpu(rsp->res.badfrminf);
6017         res->badfrmnum  = be32_to_cpu(rsp->res.badfrmnum);
6018         res->status     = rsp->res.status;
6019         fcdiag->lb.status = rsp->res.status;
6020         bfa_trc(fcdiag, fcdiag->lb.status);
6021         fcdiag->lb.cbfn(fcdiag->lb.cbarg, fcdiag->lb.status);
6022         fcdiag->lb.lock = 0;
6023         bfa_fcdiag_set_busy_status(fcdiag);
6024 }
6025
6026 static bfa_status_t
6027 bfa_fcdiag_loopback_send(struct bfa_fcdiag_s *fcdiag,
6028                         struct bfa_diag_loopback_s *loopback)
6029 {
6030         struct bfi_diag_lb_req_s *lb_req;
6031
6032         lb_req = bfa_reqq_next(fcdiag->bfa, BFA_REQQ_DIAG);
6033         if (!lb_req)
6034                 return BFA_STATUS_DEVBUSY;
6035
6036         /* build host command */
6037         bfi_h2i_set(lb_req->mh, BFI_MC_DIAG, BFI_DIAG_H2I_LOOPBACK,
6038                 bfa_fn_lpu(fcdiag->bfa));
6039
6040         lb_req->lb_mode = loopback->lb_mode;
6041         lb_req->speed = loopback->speed;
6042         lb_req->loopcnt = loopback->loopcnt;
6043         lb_req->pattern = loopback->pattern;
6044
6045         /* ring door bell */
6046         bfa_reqq_produce(fcdiag->bfa, BFA_REQQ_DIAG, lb_req->mh);
6047
6048         bfa_trc(fcdiag, loopback->lb_mode);
6049         bfa_trc(fcdiag, loopback->speed);
6050         bfa_trc(fcdiag, loopback->loopcnt);
6051         bfa_trc(fcdiag, loopback->pattern);
6052         return BFA_STATUS_OK;
6053 }
6054
6055 /*
6056  *      cpe/rme intr handler
6057  */
6058 void
6059 bfa_fcdiag_intr(struct bfa_s *bfa, struct bfi_msg_s *msg)
6060 {
6061         struct bfa_fcdiag_s *fcdiag = BFA_FCDIAG_MOD(bfa);
6062
6063         switch (msg->mhdr.msg_id) {
6064         case BFI_DIAG_I2H_LOOPBACK:
6065                 bfa_fcdiag_loopback_comp(fcdiag,
6066                                 (struct bfi_diag_lb_rsp_s *) msg);
6067                 break;
6068         case BFI_DIAG_I2H_QTEST:
6069                 bfa_fcdiag_queuetest_comp(fcdiag, (bfi_diag_qtest_rsp_t *)msg);
6070                 break;
6071         case BFI_DIAG_I2H_DPORT:
6072                 bfa_dport_req_comp(&fcdiag->dport,
6073                                 (struct bfi_diag_dport_rsp_s *)msg);
6074                 break;
6075         case BFI_DIAG_I2H_DPORT_SCN:
6076                 bfa_dport_scn(&fcdiag->dport,
6077                                 (struct bfi_diag_dport_scn_s *)msg);
6078                 break;
6079         default:
6080                 bfa_trc(fcdiag, msg->mhdr.msg_id);
6081                 WARN_ON(1);
6082         }
6083 }
6084
6085 /*
6086  *      Loopback test
6087  *
6088  *   @param[in] *bfa            - bfa data struct
6089  *   @param[in] opmode          - port operation mode
6090  *   @param[in] speed           - port speed
6091  *   @param[in] lpcnt           - loop count
6092  *   @param[in] pat                     - pattern to build packet
6093  *   @param[in] *result         - pt to bfa_diag_loopback_result_t data struct
6094  *   @param[in] cbfn            - callback function
6095  *   @param[in] cbarg           - callback functioin arg
6096  *
6097  *   @param[out]
6098  */
6099 bfa_status_t
6100 bfa_fcdiag_loopback(struct bfa_s *bfa, enum bfa_port_opmode opmode,
6101                 enum bfa_port_speed speed, u32 lpcnt, u32 pat,
6102                 struct bfa_diag_loopback_result_s *result, bfa_cb_diag_t cbfn,
6103                 void *cbarg)
6104 {
6105         struct  bfa_diag_loopback_s loopback;
6106         struct bfa_port_attr_s attr;
6107         bfa_status_t status;
6108         struct bfa_fcdiag_s *fcdiag = BFA_FCDIAG_MOD(bfa);
6109
6110         if (!bfa_iocfc_is_operational(bfa))
6111                 return BFA_STATUS_IOC_NON_OP;
6112
6113         /* if port is PBC disabled, return error */
6114         if (bfa_fcport_is_pbcdisabled(bfa)) {
6115                 bfa_trc(fcdiag, BFA_STATUS_PBC);
6116                 return BFA_STATUS_PBC;
6117         }
6118
6119         if (bfa_fcport_is_disabled(bfa) == BFA_FALSE) {
6120                 bfa_trc(fcdiag, opmode);
6121                 return BFA_STATUS_PORT_NOT_DISABLED;
6122         }
6123
6124         /*
6125          * Check if input speed is supported by the port mode
6126          */
6127         if (bfa_ioc_get_type(&bfa->ioc) == BFA_IOC_TYPE_FC) {
6128                 if (!(speed == BFA_PORT_SPEED_1GBPS ||
6129                       speed == BFA_PORT_SPEED_2GBPS ||
6130                       speed == BFA_PORT_SPEED_4GBPS ||
6131                       speed == BFA_PORT_SPEED_8GBPS ||
6132                       speed == BFA_PORT_SPEED_16GBPS ||
6133                       speed == BFA_PORT_SPEED_AUTO)) {
6134                         bfa_trc(fcdiag, speed);
6135                         return BFA_STATUS_UNSUPP_SPEED;
6136                 }
6137                 bfa_fcport_get_attr(bfa, &attr);
6138                 bfa_trc(fcdiag, attr.speed_supported);
6139                 if (speed > attr.speed_supported)
6140                         return BFA_STATUS_UNSUPP_SPEED;
6141         } else {
6142                 if (speed != BFA_PORT_SPEED_10GBPS) {
6143                         bfa_trc(fcdiag, speed);
6144                         return BFA_STATUS_UNSUPP_SPEED;
6145                 }
6146         }
6147
6148         /*
6149          * For CT2, 1G is not supported
6150          */
6151         if ((speed == BFA_PORT_SPEED_1GBPS) &&
6152             (bfa_asic_id_ct2(bfa->ioc.pcidev.device_id))) {
6153                 bfa_trc(fcdiag, speed);
6154                 return BFA_STATUS_UNSUPP_SPEED;
6155         }
6156
6157         /* For Mezz card, port speed entered needs to be checked */
6158         if (bfa_mfg_is_mezz(bfa->ioc.attr->card_type)) {
6159                 if (bfa_ioc_get_type(&bfa->ioc) == BFA_IOC_TYPE_FC) {
6160                         if (!(speed == BFA_PORT_SPEED_1GBPS ||
6161                               speed == BFA_PORT_SPEED_2GBPS ||
6162                               speed == BFA_PORT_SPEED_4GBPS ||
6163                               speed == BFA_PORT_SPEED_8GBPS ||
6164                               speed == BFA_PORT_SPEED_16GBPS ||
6165                               speed == BFA_PORT_SPEED_AUTO))
6166                                 return BFA_STATUS_UNSUPP_SPEED;
6167                 } else {
6168                         if (speed != BFA_PORT_SPEED_10GBPS)
6169                                 return BFA_STATUS_UNSUPP_SPEED;
6170                 }
6171         }
6172         /* check to see if fcport is dport */
6173         if (bfa_fcport_is_dport(bfa)) {
6174                 bfa_trc(fcdiag, fcdiag->lb.lock);
6175                 return BFA_STATUS_DPORT_ENABLED;
6176         }
6177         /* check to see if there is another destructive diag cmd running */
6178         if (fcdiag->lb.lock) {
6179                 bfa_trc(fcdiag, fcdiag->lb.lock);
6180                 return BFA_STATUS_DEVBUSY;
6181         }
6182
6183         fcdiag->lb.lock = 1;
6184         loopback.lb_mode = opmode;
6185         loopback.speed = speed;
6186         loopback.loopcnt = lpcnt;
6187         loopback.pattern = pat;
6188         fcdiag->lb.result = result;
6189         fcdiag->lb.cbfn = cbfn;
6190         fcdiag->lb.cbarg = cbarg;
6191         memset(result, 0, sizeof(struct bfa_diag_loopback_result_s));
6192         bfa_fcdiag_set_busy_status(fcdiag);
6193
6194         /* Send msg to fw */
6195         status = bfa_fcdiag_loopback_send(fcdiag, &loopback);
6196         return status;
6197 }
6198
6199 /*
6200  *      DIAG queue test command
6201  *
6202  *   @param[in] *bfa            - bfa data struct
6203  *   @param[in] force           - 1: don't do ioc op checking
6204  *   @param[in] queue           - queue no. to test
6205  *   @param[in] *result         - pt to bfa_diag_qtest_result_t data struct
6206  *   @param[in] cbfn            - callback function
6207  *   @param[in] *cbarg          - callback functioin arg
6208  *
6209  *   @param[out]
6210  */
6211 bfa_status_t
6212 bfa_fcdiag_queuetest(struct bfa_s *bfa, u32 force, u32 queue,
6213                 struct bfa_diag_qtest_result_s *result, bfa_cb_diag_t cbfn,
6214                 void *cbarg)
6215 {
6216         struct bfa_fcdiag_s *fcdiag = BFA_FCDIAG_MOD(bfa);
6217         bfa_status_t status;
6218         bfa_trc(fcdiag, force);
6219         bfa_trc(fcdiag, queue);
6220
6221         if (!force && !bfa_iocfc_is_operational(bfa))
6222                 return BFA_STATUS_IOC_NON_OP;
6223
6224         /* check to see if there is another destructive diag cmd running */
6225         if (fcdiag->qtest.lock) {
6226                 bfa_trc(fcdiag, fcdiag->qtest.lock);
6227                 return BFA_STATUS_DEVBUSY;
6228         }
6229
6230         /* Initialization */
6231         fcdiag->qtest.lock = 1;
6232         fcdiag->qtest.cbfn = cbfn;
6233         fcdiag->qtest.cbarg = cbarg;
6234         fcdiag->qtest.result = result;
6235         fcdiag->qtest.count = QTEST_CNT_DEFAULT;
6236
6237         /* Init test results */
6238         fcdiag->qtest.result->status = BFA_STATUS_OK;
6239         fcdiag->qtest.result->count  = 0;
6240
6241         /* send */
6242         if (queue < BFI_IOC_MAX_CQS) {
6243                 fcdiag->qtest.result->queue  = (u8)queue;
6244                 fcdiag->qtest.queue = (u8)queue;
6245                 fcdiag->qtest.all   = 0;
6246         } else {
6247                 fcdiag->qtest.result->queue  = 0;
6248                 fcdiag->qtest.queue = 0;
6249                 fcdiag->qtest.all   = 1;
6250         }
6251         status = bfa_fcdiag_queuetest_send(fcdiag);
6252
6253         /* Start a timer */
6254         if (status == BFA_STATUS_OK) {
6255                 bfa_timer_start(bfa, &fcdiag->qtest.timer,
6256                                 bfa_fcdiag_queuetest_timeout, fcdiag,
6257                                 BFA_DIAG_QTEST_TOV);
6258                 fcdiag->qtest.timer_active = 1;
6259         }
6260         return status;
6261 }
6262
6263 /*
6264  * DIAG PLB is running
6265  *
6266  *   @param[in] *bfa    - bfa data struct
6267  *
6268  *   @param[out]
6269  */
6270 bfa_status_t
6271 bfa_fcdiag_lb_is_running(struct bfa_s *bfa)
6272 {
6273         struct bfa_fcdiag_s *fcdiag = BFA_FCDIAG_MOD(bfa);
6274         return fcdiag->lb.lock ?  BFA_STATUS_DIAG_BUSY : BFA_STATUS_OK;
6275 }
6276
6277 /*
6278  *      D-port
6279  */
6280 #define bfa_dport_result_start(__dport, __mode) do {                    \
6281                 (__dport)->result.start_time = bfa_get_log_time();      \
6282                 (__dport)->result.status = DPORT_TEST_ST_INPRG;         \
6283                 (__dport)->result.mode = (__mode);                      \
6284                 (__dport)->result.rp_pwwn = (__dport)->rp_pwwn;         \
6285                 (__dport)->result.rp_nwwn = (__dport)->rp_nwwn;         \
6286                 (__dport)->result.lpcnt = (__dport)->lpcnt;             \
6287 } while (0)
6288
6289 static bfa_boolean_t bfa_dport_send_req(struct bfa_dport_s *dport,
6290                                         enum bfi_dport_req req);
6291 static void
6292 bfa_cb_fcdiag_dport(struct bfa_dport_s *dport, bfa_status_t bfa_status)
6293 {
6294         if (dport->cbfn != NULL) {
6295                 dport->cbfn(dport->cbarg, bfa_status);
6296                 dport->cbfn = NULL;
6297                 dport->cbarg = NULL;
6298         }
6299 }
6300
6301 static void
6302 bfa_dport_sm_disabled(struct bfa_dport_s *dport, enum bfa_dport_sm_event event)
6303 {
6304         bfa_trc(dport->bfa, event);
6305
6306         switch (event) {
6307         case BFA_DPORT_SM_ENABLE:
6308                 bfa_fcport_dportenable(dport->bfa);
6309                 if (bfa_dport_send_req(dport, BFI_DPORT_ENABLE))
6310                         bfa_sm_set_state(dport, bfa_dport_sm_enabling);
6311                 else
6312                         bfa_sm_set_state(dport, bfa_dport_sm_enabling_qwait);
6313                 break;
6314
6315         case BFA_DPORT_SM_DISABLE:
6316                 /* Already disabled */
6317                 break;
6318
6319         case BFA_DPORT_SM_HWFAIL:
6320                 /* ignore */
6321                 break;
6322
6323         case BFA_DPORT_SM_SCN:
6324                 if (dport->i2hmsg.scn.state ==  BFI_DPORT_SCN_DDPORT_ENABLE) {
6325                         bfa_fcport_ddportenable(dport->bfa);
6326                         dport->dynamic = BFA_TRUE;
6327                         dport->test_state = BFA_DPORT_ST_NOTSTART;
6328                         bfa_sm_set_state(dport, bfa_dport_sm_enabled);
6329                 } else {
6330                         bfa_trc(dport->bfa, dport->i2hmsg.scn.state);
6331                         WARN_ON(1);
6332                 }
6333                 break;
6334
6335         default:
6336                 bfa_sm_fault(dport->bfa, event);
6337         }
6338 }
6339
6340 static void
6341 bfa_dport_sm_enabling_qwait(struct bfa_dport_s *dport,
6342                             enum bfa_dport_sm_event event)
6343 {
6344         bfa_trc(dport->bfa, event);
6345
6346         switch (event) {
6347         case BFA_DPORT_SM_QRESUME:
6348                 bfa_sm_set_state(dport, bfa_dport_sm_enabling);
6349                 bfa_dport_send_req(dport, BFI_DPORT_ENABLE);
6350                 break;
6351
6352         case BFA_DPORT_SM_HWFAIL:
6353                 bfa_reqq_wcancel(&dport->reqq_wait);
6354                 bfa_sm_set_state(dport, bfa_dport_sm_disabled);
6355                 bfa_cb_fcdiag_dport(dport, BFA_STATUS_FAILED);
6356                 break;
6357
6358         default:
6359                 bfa_sm_fault(dport->bfa, event);
6360         }
6361 }
6362
6363 static void
6364 bfa_dport_sm_enabling(struct bfa_dport_s *dport, enum bfa_dport_sm_event event)
6365 {
6366         bfa_trc(dport->bfa, event);
6367
6368         switch (event) {
6369         case BFA_DPORT_SM_FWRSP:
6370                 memset(&dport->result, 0,
6371                                 sizeof(struct bfa_diag_dport_result_s));
6372                 if (dport->i2hmsg.rsp.status == BFA_STATUS_DPORT_INV_SFP) {
6373                         dport->test_state = BFA_DPORT_ST_NO_SFP;
6374                 } else {
6375                         dport->test_state = BFA_DPORT_ST_INP;
6376                         bfa_dport_result_start(dport, BFA_DPORT_OPMODE_AUTO);
6377                 }
6378                 bfa_sm_set_state(dport, bfa_dport_sm_enabled);
6379                 break;
6380
6381         case BFA_DPORT_SM_REQFAIL:
6382                 dport->test_state = BFA_DPORT_ST_DISABLED;
6383                 bfa_fcport_dportdisable(dport->bfa);
6384                 bfa_sm_set_state(dport, bfa_dport_sm_disabled);
6385                 break;
6386
6387         case BFA_DPORT_SM_HWFAIL:
6388                 bfa_sm_set_state(dport, bfa_dport_sm_disabled);
6389                 bfa_cb_fcdiag_dport(dport, BFA_STATUS_FAILED);
6390                 break;
6391
6392         default:
6393                 bfa_sm_fault(dport->bfa, event);
6394         }
6395 }
6396
6397 static void
6398 bfa_dport_sm_enabled(struct bfa_dport_s *dport, enum bfa_dport_sm_event event)
6399 {
6400         bfa_trc(dport->bfa, event);
6401
6402         switch (event) {
6403         case BFA_DPORT_SM_START:
6404                 if (bfa_dport_send_req(dport, BFI_DPORT_START))
6405                         bfa_sm_set_state(dport, bfa_dport_sm_starting);
6406                 else
6407                         bfa_sm_set_state(dport, bfa_dport_sm_starting_qwait);
6408                 break;
6409
6410         case BFA_DPORT_SM_DISABLE:
6411                 bfa_fcport_dportdisable(dport->bfa);
6412                 if (bfa_dport_send_req(dport, BFI_DPORT_DISABLE))
6413                         bfa_sm_set_state(dport, bfa_dport_sm_disabling);
6414                 else
6415                         bfa_sm_set_state(dport, bfa_dport_sm_disabling_qwait);
6416                 break;
6417
6418         case BFA_DPORT_SM_HWFAIL:
6419                 bfa_sm_set_state(dport, bfa_dport_sm_disabled);
6420                 break;
6421
6422         case BFA_DPORT_SM_SCN:
6423                 switch (dport->i2hmsg.scn.state) {
6424                 case BFI_DPORT_SCN_TESTCOMP:
6425                         dport->test_state = BFA_DPORT_ST_COMP;
6426                         break;
6427
6428                 case BFI_DPORT_SCN_TESTSTART:
6429                         dport->test_state = BFA_DPORT_ST_INP;
6430                         break;
6431
6432                 case BFI_DPORT_SCN_TESTSKIP:
6433                 case BFI_DPORT_SCN_SUBTESTSTART:
6434                         /* no state change */
6435                         break;
6436
6437                 case BFI_DPORT_SCN_SFP_REMOVED:
6438                         dport->test_state = BFA_DPORT_ST_NO_SFP;
6439                         break;
6440
6441                 case BFI_DPORT_SCN_DDPORT_DISABLE:
6442                         bfa_fcport_ddportdisable(dport->bfa);
6443
6444                         if (bfa_dport_send_req(dport, BFI_DPORT_DYN_DISABLE))
6445                                 bfa_sm_set_state(dport,
6446                                          bfa_dport_sm_dynamic_disabling);
6447                         else
6448                                 bfa_sm_set_state(dport,
6449                                          bfa_dport_sm_dynamic_disabling_qwait);
6450                         break;
6451
6452                 case BFI_DPORT_SCN_FCPORT_DISABLE:
6453                         bfa_fcport_ddportdisable(dport->bfa);
6454
6455                         bfa_sm_set_state(dport, bfa_dport_sm_disabled);
6456                         dport->dynamic = BFA_FALSE;
6457                         break;
6458
6459                 default:
6460                         bfa_trc(dport->bfa, dport->i2hmsg.scn.state);
6461                         bfa_sm_fault(dport->bfa, event);
6462                 }
6463                 break;
6464         default:
6465                 bfa_sm_fault(dport->bfa, event);
6466         }
6467 }
6468
6469 static void
6470 bfa_dport_sm_disabling_qwait(struct bfa_dport_s *dport,
6471                              enum bfa_dport_sm_event event)
6472 {
6473         bfa_trc(dport->bfa, event);
6474
6475         switch (event) {
6476         case BFA_DPORT_SM_QRESUME:
6477                 bfa_sm_set_state(dport, bfa_dport_sm_disabling);
6478                 bfa_dport_send_req(dport, BFI_DPORT_DISABLE);
6479                 break;
6480
6481         case BFA_DPORT_SM_HWFAIL:
6482                 bfa_sm_set_state(dport, bfa_dport_sm_disabled);
6483                 bfa_reqq_wcancel(&dport->reqq_wait);
6484                 bfa_cb_fcdiag_dport(dport, BFA_STATUS_OK);
6485                 break;
6486
6487         case BFA_DPORT_SM_SCN:
6488                 /* ignore */
6489                 break;
6490
6491         default:
6492                 bfa_sm_fault(dport->bfa, event);
6493         }
6494 }
6495
6496 static void
6497 bfa_dport_sm_disabling(struct bfa_dport_s *dport, enum bfa_dport_sm_event event)
6498 {
6499         bfa_trc(dport->bfa, event);
6500
6501         switch (event) {
6502         case BFA_DPORT_SM_FWRSP:
6503                 dport->test_state = BFA_DPORT_ST_DISABLED;
6504                 bfa_sm_set_state(dport, bfa_dport_sm_disabled);
6505                 break;
6506
6507         case BFA_DPORT_SM_HWFAIL:
6508                 bfa_sm_set_state(dport, bfa_dport_sm_disabled);
6509                 bfa_cb_fcdiag_dport(dport, BFA_STATUS_OK);
6510                 break;
6511
6512         case BFA_DPORT_SM_SCN:
6513                 /* no state change */
6514                 break;
6515
6516         default:
6517                 bfa_sm_fault(dport->bfa, event);
6518         }
6519 }
6520
6521 static void
6522 bfa_dport_sm_starting_qwait(struct bfa_dport_s *dport,
6523                             enum bfa_dport_sm_event event)
6524 {
6525         bfa_trc(dport->bfa, event);
6526
6527         switch (event) {
6528         case BFA_DPORT_SM_QRESUME:
6529                 bfa_sm_set_state(dport, bfa_dport_sm_starting);
6530                 bfa_dport_send_req(dport, BFI_DPORT_START);
6531                 break;
6532
6533         case BFA_DPORT_SM_HWFAIL:
6534                 bfa_reqq_wcancel(&dport->reqq_wait);
6535                 bfa_sm_set_state(dport, bfa_dport_sm_disabled);
6536                 bfa_cb_fcdiag_dport(dport, BFA_STATUS_FAILED);
6537                 break;
6538
6539         default:
6540                 bfa_sm_fault(dport->bfa, event);
6541         }
6542 }
6543
6544 static void
6545 bfa_dport_sm_starting(struct bfa_dport_s *dport, enum bfa_dport_sm_event event)
6546 {
6547         bfa_trc(dport->bfa, event);
6548
6549         switch (event) {
6550         case BFA_DPORT_SM_FWRSP:
6551                 memset(&dport->result, 0,
6552                                 sizeof(struct bfa_diag_dport_result_s));
6553                 if (dport->i2hmsg.rsp.status == BFA_STATUS_DPORT_INV_SFP) {
6554                         dport->test_state = BFA_DPORT_ST_NO_SFP;
6555                 } else {
6556                         dport->test_state = BFA_DPORT_ST_INP;
6557                         bfa_dport_result_start(dport, BFA_DPORT_OPMODE_MANU);
6558                 }
6559                 /* fall thru */
6560
6561         case BFA_DPORT_SM_REQFAIL:
6562                 bfa_sm_set_state(dport, bfa_dport_sm_enabled);
6563                 break;
6564
6565         case BFA_DPORT_SM_HWFAIL:
6566                 bfa_sm_set_state(dport, bfa_dport_sm_disabled);
6567                 bfa_cb_fcdiag_dport(dport, BFA_STATUS_FAILED);
6568                 break;
6569
6570         default:
6571                 bfa_sm_fault(dport->bfa, event);
6572         }
6573 }
6574
6575 static void
6576 bfa_dport_sm_dynamic_disabling(struct bfa_dport_s *dport,
6577                                enum bfa_dport_sm_event event)
6578 {
6579         bfa_trc(dport->bfa, event);
6580
6581         switch (event) {
6582         case BFA_DPORT_SM_SCN:
6583                 switch (dport->i2hmsg.scn.state) {
6584                 case BFI_DPORT_SCN_DDPORT_DISABLED:
6585                         bfa_sm_set_state(dport, bfa_dport_sm_disabled);
6586                         dport->dynamic = BFA_FALSE;
6587                         bfa_fcport_enable(dport->bfa);
6588                         break;
6589
6590                 default:
6591                         bfa_trc(dport->bfa, dport->i2hmsg.scn.state);
6592                         bfa_sm_fault(dport->bfa, event);
6593
6594                 }
6595                 break;
6596
6597         case BFA_DPORT_SM_HWFAIL:
6598                 bfa_sm_set_state(dport, bfa_dport_sm_disabled);
6599                 bfa_cb_fcdiag_dport(dport, BFA_STATUS_OK);
6600                 break;
6601
6602         default:
6603                 bfa_sm_fault(dport->bfa, event);
6604         }
6605 }
6606
6607 static void
6608 bfa_dport_sm_dynamic_disabling_qwait(struct bfa_dport_s *dport,
6609                             enum bfa_dport_sm_event event)
6610 {
6611         bfa_trc(dport->bfa, event);
6612
6613         switch (event) {
6614         case BFA_DPORT_SM_QRESUME:
6615                 bfa_sm_set_state(dport, bfa_dport_sm_dynamic_disabling);
6616                 bfa_dport_send_req(dport, BFI_DPORT_DYN_DISABLE);
6617                 break;
6618
6619         case BFA_DPORT_SM_HWFAIL:
6620                 bfa_sm_set_state(dport, bfa_dport_sm_disabled);
6621                 bfa_reqq_wcancel(&dport->reqq_wait);
6622                 bfa_cb_fcdiag_dport(dport, BFA_STATUS_OK);
6623                 break;
6624
6625         case BFA_DPORT_SM_SCN:
6626                 /* ignore */
6627                 break;
6628
6629         default:
6630                 bfa_sm_fault(dport->bfa, event);
6631         }
6632 }
6633
6634 static bfa_boolean_t
6635 bfa_dport_send_req(struct bfa_dport_s *dport, enum bfi_dport_req req)
6636 {
6637         struct bfi_diag_dport_req_s *m;
6638
6639         /*
6640          * check for room in queue to send request now
6641          */
6642         m = bfa_reqq_next(dport->bfa, BFA_REQQ_DIAG);
6643         if (!m) {
6644                 bfa_reqq_wait(dport->bfa, BFA_REQQ_PORT, &dport->reqq_wait);
6645                 return BFA_FALSE;
6646         }
6647
6648         bfi_h2i_set(m->mh, BFI_MC_DIAG, BFI_DIAG_H2I_DPORT,
6649                     bfa_fn_lpu(dport->bfa));
6650         m->req  = req;
6651         if ((req == BFI_DPORT_ENABLE) || (req == BFI_DPORT_START)) {
6652                 m->lpcnt = cpu_to_be32(dport->lpcnt);
6653                 m->payload = cpu_to_be32(dport->payload);
6654         }
6655
6656         /*
6657          * queue I/O message to firmware
6658          */
6659         bfa_reqq_produce(dport->bfa, BFA_REQQ_DIAG, m->mh);
6660
6661         return BFA_TRUE;
6662 }
6663
6664 static void
6665 bfa_dport_qresume(void *cbarg)
6666 {
6667         struct bfa_dport_s *dport = cbarg;
6668
6669         bfa_sm_send_event(dport, BFA_DPORT_SM_QRESUME);
6670 }
6671
6672 static void
6673 bfa_dport_req_comp(struct bfa_dport_s *dport, struct bfi_diag_dport_rsp_s *msg)
6674 {
6675         msg->status = cpu_to_be32(msg->status);
6676         dport->i2hmsg.rsp.status = msg->status;
6677         dport->rp_pwwn = msg->pwwn;
6678         dport->rp_nwwn = msg->nwwn;
6679
6680         if ((msg->status == BFA_STATUS_OK) ||
6681             (msg->status == BFA_STATUS_DPORT_NO_SFP)) {
6682                 bfa_trc(dport->bfa, msg->status);
6683                 bfa_trc(dport->bfa, dport->rp_pwwn);
6684                 bfa_trc(dport->bfa, dport->rp_nwwn);
6685                 bfa_sm_send_event(dport, BFA_DPORT_SM_FWRSP);
6686
6687         } else {
6688                 bfa_trc(dport->bfa, msg->status);
6689                 bfa_sm_send_event(dport, BFA_DPORT_SM_REQFAIL);
6690         }
6691         bfa_cb_fcdiag_dport(dport, msg->status);
6692 }
6693
6694 static bfa_boolean_t
6695 bfa_dport_is_sending_req(struct bfa_dport_s *dport)
6696 {
6697         if (bfa_sm_cmp_state(dport, bfa_dport_sm_enabling)      ||
6698             bfa_sm_cmp_state(dport, bfa_dport_sm_enabling_qwait) ||
6699             bfa_sm_cmp_state(dport, bfa_dport_sm_disabling)     ||
6700             bfa_sm_cmp_state(dport, bfa_dport_sm_disabling_qwait) ||
6701             bfa_sm_cmp_state(dport, bfa_dport_sm_starting)      ||
6702             bfa_sm_cmp_state(dport, bfa_dport_sm_starting_qwait)) {
6703                 return BFA_TRUE;
6704         } else {
6705                 return BFA_FALSE;
6706         }
6707 }
6708
6709 static void
6710 bfa_dport_scn(struct bfa_dport_s *dport, struct bfi_diag_dport_scn_s *msg)
6711 {
6712         int i;
6713         uint8_t subtesttype;
6714
6715         bfa_trc(dport->bfa, msg->state);
6716         dport->i2hmsg.scn.state = msg->state;
6717
6718         switch (dport->i2hmsg.scn.state) {
6719         case BFI_DPORT_SCN_TESTCOMP:
6720                 dport->result.end_time = bfa_get_log_time();
6721                 bfa_trc(dport->bfa, dport->result.end_time);
6722
6723                 dport->result.status = msg->info.testcomp.status;
6724                 bfa_trc(dport->bfa, dport->result.status);
6725
6726                 dport->result.roundtrip_latency =
6727                         cpu_to_be32(msg->info.testcomp.latency);
6728                 dport->result.est_cable_distance =
6729                         cpu_to_be32(msg->info.testcomp.distance);
6730                 dport->result.buffer_required =
6731                         be16_to_cpu(msg->info.testcomp.numbuffer);
6732
6733                 dport->result.frmsz = be16_to_cpu(msg->info.testcomp.frm_sz);
6734                 dport->result.speed = msg->info.testcomp.speed;
6735
6736                 bfa_trc(dport->bfa, dport->result.roundtrip_latency);
6737                 bfa_trc(dport->bfa, dport->result.est_cable_distance);
6738                 bfa_trc(dport->bfa, dport->result.buffer_required);
6739                 bfa_trc(dport->bfa, dport->result.frmsz);
6740                 bfa_trc(dport->bfa, dport->result.speed);
6741
6742                 for (i = DPORT_TEST_ELOOP; i < DPORT_TEST_MAX; i++) {
6743                         dport->result.subtest[i].status =
6744                                 msg->info.testcomp.subtest_status[i];
6745                         bfa_trc(dport->bfa, dport->result.subtest[i].status);
6746                 }
6747                 break;
6748
6749         case BFI_DPORT_SCN_TESTSKIP:
6750         case BFI_DPORT_SCN_DDPORT_ENABLE:
6751                 memset(&dport->result, 0,
6752                                 sizeof(struct bfa_diag_dport_result_s));
6753                 break;
6754
6755         case BFI_DPORT_SCN_TESTSTART:
6756                 memset(&dport->result, 0,
6757                                 sizeof(struct bfa_diag_dport_result_s));
6758                 dport->rp_pwwn = msg->info.teststart.pwwn;
6759                 dport->rp_nwwn = msg->info.teststart.nwwn;
6760                 dport->lpcnt = cpu_to_be32(msg->info.teststart.numfrm);
6761                 bfa_dport_result_start(dport, msg->info.teststart.mode);
6762                 break;
6763
6764         case BFI_DPORT_SCN_SUBTESTSTART:
6765                 subtesttype = msg->info.teststart.type;
6766                 dport->result.subtest[subtesttype].start_time =
6767                         bfa_get_log_time();
6768                 dport->result.subtest[subtesttype].status =
6769                         DPORT_TEST_ST_INPRG;
6770
6771                 bfa_trc(dport->bfa, subtesttype);
6772                 bfa_trc(dport->bfa,
6773                         dport->result.subtest[subtesttype].start_time);
6774                 break;
6775
6776         case BFI_DPORT_SCN_SFP_REMOVED:
6777         case BFI_DPORT_SCN_DDPORT_DISABLED:
6778         case BFI_DPORT_SCN_DDPORT_DISABLE:
6779         case BFI_DPORT_SCN_FCPORT_DISABLE:
6780                 dport->result.status = DPORT_TEST_ST_IDLE;
6781                 break;
6782
6783         default:
6784                 bfa_sm_fault(dport->bfa, msg->state);
6785         }
6786
6787         bfa_sm_send_event(dport, BFA_DPORT_SM_SCN);
6788 }
6789
6790 /*
6791  * Dport enable
6792  *
6793  * @param[in] *bfa            - bfa data struct
6794  */
6795 bfa_status_t
6796 bfa_dport_enable(struct bfa_s *bfa, u32 lpcnt, u32 pat,
6797                                 bfa_cb_diag_t cbfn, void *cbarg)
6798 {
6799         struct bfa_fcdiag_s *fcdiag = BFA_FCDIAG_MOD(bfa);
6800         struct bfa_dport_s  *dport = &fcdiag->dport;
6801
6802         /*
6803          * Dport is not support in MEZZ card
6804          */
6805         if (bfa_mfg_is_mezz(dport->bfa->ioc.attr->card_type)) {
6806                 bfa_trc(dport->bfa, BFA_STATUS_PBC);
6807                 return BFA_STATUS_CMD_NOTSUPP_MEZZ;
6808         }
6809
6810         /*
6811          * Dport is supported in CT2 or above
6812          */
6813         if (!(bfa_asic_id_ct2(dport->bfa->ioc.pcidev.device_id))) {
6814                 bfa_trc(dport->bfa, dport->bfa->ioc.pcidev.device_id);
6815                 return BFA_STATUS_FEATURE_NOT_SUPPORTED;
6816         }
6817
6818         /*
6819          * Check to see if IOC is down
6820         */
6821         if (!bfa_iocfc_is_operational(bfa))
6822                 return BFA_STATUS_IOC_NON_OP;
6823
6824         /* if port is PBC disabled, return error */
6825         if (bfa_fcport_is_pbcdisabled(bfa)) {
6826                 bfa_trc(dport->bfa, BFA_STATUS_PBC);
6827                 return BFA_STATUS_PBC;
6828         }
6829
6830         /*
6831          * Check if port mode is FC port
6832          */
6833         if (bfa_ioc_get_type(&bfa->ioc) != BFA_IOC_TYPE_FC) {
6834                 bfa_trc(dport->bfa, bfa_ioc_get_type(&bfa->ioc));
6835                 return BFA_STATUS_CMD_NOTSUPP_CNA;
6836         }
6837
6838         /*
6839          * Check if port is in LOOP mode
6840          */
6841         if ((bfa_fcport_get_cfg_topology(bfa) == BFA_PORT_TOPOLOGY_LOOP) ||
6842             (bfa_fcport_get_topology(bfa) == BFA_PORT_TOPOLOGY_LOOP)) {
6843                 bfa_trc(dport->bfa, 0);
6844                 return BFA_STATUS_TOPOLOGY_LOOP;
6845         }
6846
6847         /*
6848          * Check if port is TRUNK mode
6849          */
6850         if (bfa_fcport_is_trunk_enabled(bfa)) {
6851                 bfa_trc(dport->bfa, 0);
6852                 return BFA_STATUS_ERROR_TRUNK_ENABLED;
6853         }
6854
6855         /*
6856          * Check if diag loopback is running
6857          */
6858         if (bfa_fcdiag_lb_is_running(bfa)) {
6859                 bfa_trc(dport->bfa, 0);
6860                 return BFA_STATUS_DIAG_BUSY;
6861         }
6862
6863         /*
6864          * Check to see if port is disable or in dport state
6865          */
6866         if ((bfa_fcport_is_disabled(bfa) == BFA_FALSE) &&
6867             (bfa_fcport_is_dport(bfa) == BFA_FALSE)) {
6868                 bfa_trc(dport->bfa, 0);
6869                 return BFA_STATUS_PORT_NOT_DISABLED;
6870         }
6871
6872         /*
6873          * Check if dport is in dynamic mode
6874          */
6875         if (dport->dynamic)
6876                 return BFA_STATUS_DDPORT_ERR;
6877
6878         /*
6879          * Check if dport is busy
6880          */
6881         if (bfa_dport_is_sending_req(dport))
6882                 return BFA_STATUS_DEVBUSY;
6883
6884         /*
6885          * Check if dport is already enabled
6886          */
6887         if (bfa_sm_cmp_state(dport, bfa_dport_sm_enabled)) {
6888                 bfa_trc(dport->bfa, 0);
6889                 return BFA_STATUS_DPORT_ENABLED;
6890         }
6891
6892         bfa_trc(dport->bfa, lpcnt);
6893         bfa_trc(dport->bfa, pat);
6894         dport->lpcnt = (lpcnt) ? lpcnt : DPORT_ENABLE_LOOPCNT_DEFAULT;
6895         dport->payload = (pat) ? pat : LB_PATTERN_DEFAULT;
6896         dport->cbfn = cbfn;
6897         dport->cbarg = cbarg;
6898
6899         bfa_sm_send_event(dport, BFA_DPORT_SM_ENABLE);
6900         return BFA_STATUS_OK;
6901 }
6902
6903 /*
6904  *      Dport disable
6905  *
6906  *      @param[in] *bfa            - bfa data struct
6907  */
6908 bfa_status_t
6909 bfa_dport_disable(struct bfa_s *bfa, bfa_cb_diag_t cbfn, void *cbarg)
6910 {
6911         struct bfa_fcdiag_s *fcdiag = BFA_FCDIAG_MOD(bfa);
6912         struct bfa_dport_s *dport = &fcdiag->dport;
6913
6914         if (bfa_ioc_is_disabled(&bfa->ioc))
6915                 return BFA_STATUS_IOC_DISABLED;
6916
6917         /* if port is PBC disabled, return error */
6918         if (bfa_fcport_is_pbcdisabled(bfa)) {
6919                 bfa_trc(dport->bfa, BFA_STATUS_PBC);
6920                 return BFA_STATUS_PBC;
6921         }
6922
6923         /*
6924          * Check if dport is in dynamic mode
6925          */
6926         if (dport->dynamic) {
6927                 return BFA_STATUS_DDPORT_ERR;
6928         }
6929
6930         /*
6931          * Check to see if port is disable or in dport state
6932          */
6933         if ((bfa_fcport_is_disabled(bfa) == BFA_FALSE) &&
6934             (bfa_fcport_is_dport(bfa) == BFA_FALSE)) {
6935                 bfa_trc(dport->bfa, 0);
6936                 return BFA_STATUS_PORT_NOT_DISABLED;
6937         }
6938
6939         /*
6940          * Check if dport is busy
6941          */
6942         if (bfa_dport_is_sending_req(dport))
6943                 return BFA_STATUS_DEVBUSY;
6944
6945         /*
6946          * Check if dport is already disabled
6947          */
6948         if (bfa_sm_cmp_state(dport, bfa_dport_sm_disabled)) {
6949                 bfa_trc(dport->bfa, 0);
6950                 return BFA_STATUS_DPORT_DISABLED;
6951         }
6952
6953         dport->cbfn = cbfn;
6954         dport->cbarg = cbarg;
6955
6956         bfa_sm_send_event(dport, BFA_DPORT_SM_DISABLE);
6957         return BFA_STATUS_OK;
6958 }
6959
6960 /*
6961  * Dport start -- restart dport test
6962  *
6963  *   @param[in] *bfa            - bfa data struct
6964  */
6965 bfa_status_t
6966 bfa_dport_start(struct bfa_s *bfa, u32 lpcnt, u32 pat,
6967                         bfa_cb_diag_t cbfn, void *cbarg)
6968 {
6969         struct bfa_fcdiag_s *fcdiag = BFA_FCDIAG_MOD(bfa);
6970         struct bfa_dport_s *dport = &fcdiag->dport;
6971
6972         /*
6973          * Check to see if IOC is down
6974          */
6975         if (!bfa_iocfc_is_operational(bfa))
6976                 return BFA_STATUS_IOC_NON_OP;
6977
6978         /*
6979          * Check if dport is in dynamic mode
6980          */
6981         if (dport->dynamic)
6982                 return BFA_STATUS_DDPORT_ERR;
6983
6984         /*
6985          * Check if dport is busy
6986          */
6987         if (bfa_dport_is_sending_req(dport))
6988                 return BFA_STATUS_DEVBUSY;
6989
6990         /*
6991          * Check if dport is in enabled state.
6992          * Test can only be restart when previous test has completed
6993          */
6994         if (!bfa_sm_cmp_state(dport, bfa_dport_sm_enabled)) {
6995                 bfa_trc(dport->bfa, 0);
6996                 return BFA_STATUS_DPORT_DISABLED;
6997
6998         } else {
6999                 if (dport->test_state == BFA_DPORT_ST_NO_SFP)
7000                         return BFA_STATUS_DPORT_INV_SFP;
7001
7002                 if (dport->test_state == BFA_DPORT_ST_INP)
7003                         return BFA_STATUS_DEVBUSY;
7004
7005                 WARN_ON(dport->test_state != BFA_DPORT_ST_COMP);
7006         }
7007
7008         bfa_trc(dport->bfa, lpcnt);
7009         bfa_trc(dport->bfa, pat);
7010
7011         dport->lpcnt = (lpcnt) ? lpcnt : DPORT_ENABLE_LOOPCNT_DEFAULT;
7012         dport->payload = (pat) ? pat : LB_PATTERN_DEFAULT;
7013
7014         dport->cbfn = cbfn;
7015         dport->cbarg = cbarg;
7016
7017         bfa_sm_send_event(dport, BFA_DPORT_SM_START);
7018         return BFA_STATUS_OK;
7019 }
7020
7021 /*
7022  * Dport show -- return dport test result
7023  *
7024  *   @param[in] *bfa            - bfa data struct
7025  */
7026 bfa_status_t
7027 bfa_dport_show(struct bfa_s *bfa, struct bfa_diag_dport_result_s *result)
7028 {
7029         struct bfa_fcdiag_s *fcdiag = BFA_FCDIAG_MOD(bfa);
7030         struct bfa_dport_s *dport = &fcdiag->dport;
7031
7032         /*
7033          * Check to see if IOC is down
7034          */
7035         if (!bfa_iocfc_is_operational(bfa))
7036                 return BFA_STATUS_IOC_NON_OP;
7037
7038         /*
7039          * Check if dport is busy
7040          */
7041         if (bfa_dport_is_sending_req(dport))
7042                 return BFA_STATUS_DEVBUSY;
7043
7044         /*
7045          * Check if dport is in enabled state.
7046          */
7047         if (!bfa_sm_cmp_state(dport, bfa_dport_sm_enabled)) {
7048                 bfa_trc(dport->bfa, 0);
7049                 return BFA_STATUS_DPORT_DISABLED;
7050
7051         }
7052
7053         /*
7054          * Check if there is SFP
7055          */
7056         if (dport->test_state == BFA_DPORT_ST_NO_SFP)
7057                 return BFA_STATUS_DPORT_INV_SFP;
7058
7059         memcpy(result, &dport->result, sizeof(struct bfa_diag_dport_result_s));
7060
7061         return BFA_STATUS_OK;
7062 }