2 * Linux network driver for QLogic BR-series Converged Network Adapter.
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of the GNU General Public License (GPL) Version 2 as
6 * published by the Free Software Foundation
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
14 * Copyright (c) 2005-2014 Brocade Communications Systems, Inc.
15 * Copyright (c) 2014-2015 QLogic Corporation
24 /* IOC local definitions */
26 #define bfa_ioc_state_disabled(__sm) \
27 (((__sm) == BFI_IOC_UNINIT) || \
28 ((__sm) == BFI_IOC_INITING) || \
29 ((__sm) == BFI_IOC_HWINIT) || \
30 ((__sm) == BFI_IOC_DISABLED) || \
31 ((__sm) == BFI_IOC_FAIL) || \
32 ((__sm) == BFI_IOC_CFG_DISABLED))
34 /* Asic specific macros : see bfa_hw_cb.c and bfa_hw_ct.c for details. */
36 #define bfa_ioc_firmware_lock(__ioc) \
37 ((__ioc)->ioc_hwif->ioc_firmware_lock(__ioc))
38 #define bfa_ioc_firmware_unlock(__ioc) \
39 ((__ioc)->ioc_hwif->ioc_firmware_unlock(__ioc))
40 #define bfa_ioc_reg_init(__ioc) ((__ioc)->ioc_hwif->ioc_reg_init(__ioc))
41 #define bfa_ioc_map_port(__ioc) ((__ioc)->ioc_hwif->ioc_map_port(__ioc))
42 #define bfa_ioc_notify_fail(__ioc) \
43 ((__ioc)->ioc_hwif->ioc_notify_fail(__ioc))
44 #define bfa_ioc_sync_start(__ioc) \
45 ((__ioc)->ioc_hwif->ioc_sync_start(__ioc))
46 #define bfa_ioc_sync_join(__ioc) \
47 ((__ioc)->ioc_hwif->ioc_sync_join(__ioc))
48 #define bfa_ioc_sync_leave(__ioc) \
49 ((__ioc)->ioc_hwif->ioc_sync_leave(__ioc))
50 #define bfa_ioc_sync_ack(__ioc) \
51 ((__ioc)->ioc_hwif->ioc_sync_ack(__ioc))
52 #define bfa_ioc_sync_complete(__ioc) \
53 ((__ioc)->ioc_hwif->ioc_sync_complete(__ioc))
54 #define bfa_ioc_set_cur_ioc_fwstate(__ioc, __fwstate) \
55 ((__ioc)->ioc_hwif->ioc_set_fwstate(__ioc, __fwstate))
56 #define bfa_ioc_get_cur_ioc_fwstate(__ioc) \
57 ((__ioc)->ioc_hwif->ioc_get_fwstate(__ioc))
58 #define bfa_ioc_set_alt_ioc_fwstate(__ioc, __fwstate) \
59 ((__ioc)->ioc_hwif->ioc_set_alt_fwstate(__ioc, __fwstate))
60 #define bfa_ioc_get_alt_ioc_fwstate(__ioc) \
61 ((__ioc)->ioc_hwif->ioc_get_alt_fwstate(__ioc))
63 #define bfa_ioc_mbox_cmd_pending(__ioc) \
64 (!list_empty(&((__ioc)->mbox_mod.cmd_q)) || \
65 readl((__ioc)->ioc_regs.hfn_mbox_cmd))
67 static bool bfa_nw_auto_recover = true;
70 * forward declarations
72 static void bfa_ioc_hw_sem_init(struct bfa_ioc *ioc);
73 static void bfa_ioc_hw_sem_get(struct bfa_ioc *ioc);
74 static void bfa_ioc_hw_sem_get_cancel(struct bfa_ioc *ioc);
75 static void bfa_ioc_hwinit(struct bfa_ioc *ioc, bool force);
76 static void bfa_ioc_poll_fwinit(struct bfa_ioc *ioc);
77 static void bfa_ioc_send_enable(struct bfa_ioc *ioc);
78 static void bfa_ioc_send_disable(struct bfa_ioc *ioc);
79 static void bfa_ioc_send_getattr(struct bfa_ioc *ioc);
80 static void bfa_ioc_hb_monitor(struct bfa_ioc *ioc);
81 static void bfa_ioc_hb_stop(struct bfa_ioc *ioc);
82 static void bfa_ioc_reset(struct bfa_ioc *ioc, bool force);
83 static void bfa_ioc_mbox_poll(struct bfa_ioc *ioc);
84 static void bfa_ioc_mbox_flush(struct bfa_ioc *ioc);
85 static void bfa_ioc_recover(struct bfa_ioc *ioc);
86 static void bfa_ioc_event_notify(struct bfa_ioc *, enum bfa_ioc_event);
87 static void bfa_ioc_disable_comp(struct bfa_ioc *ioc);
88 static void bfa_ioc_lpu_stop(struct bfa_ioc *ioc);
89 static void bfa_nw_ioc_debug_save_ftrc(struct bfa_ioc *ioc);
90 static void bfa_ioc_fail_notify(struct bfa_ioc *ioc);
91 static void bfa_ioc_pf_enabled(struct bfa_ioc *ioc);
92 static void bfa_ioc_pf_disabled(struct bfa_ioc *ioc);
93 static void bfa_ioc_pf_failed(struct bfa_ioc *ioc);
94 static void bfa_ioc_pf_hwfailed(struct bfa_ioc *ioc);
95 static void bfa_ioc_pf_fwmismatch(struct bfa_ioc *ioc);
96 static enum bfa_status bfa_ioc_boot(struct bfa_ioc *ioc,
97 enum bfi_fwboot_type boot_type, u32 boot_param);
98 static u32 bfa_ioc_smem_pgnum(struct bfa_ioc *ioc, u32 fmaddr);
99 static void bfa_ioc_get_adapter_serial_num(struct bfa_ioc *ioc,
101 static void bfa_ioc_get_adapter_fw_ver(struct bfa_ioc *ioc,
103 static void bfa_ioc_get_pci_chip_rev(struct bfa_ioc *ioc,
105 static void bfa_ioc_get_adapter_optrom_ver(struct bfa_ioc *ioc,
107 static void bfa_ioc_get_adapter_manufacturer(struct bfa_ioc *ioc,
109 static void bfa_ioc_get_adapter_model(struct bfa_ioc *ioc, char *model);
110 static u64 bfa_ioc_get_pwwn(struct bfa_ioc *ioc);
112 /* IOC state machine definitions/declarations */
114 IOC_E_RESET = 1, /*!< IOC reset request */
115 IOC_E_ENABLE = 2, /*!< IOC enable request */
116 IOC_E_DISABLE = 3, /*!< IOC disable request */
117 IOC_E_DETACH = 4, /*!< driver detach cleanup */
118 IOC_E_ENABLED = 5, /*!< f/w enabled */
119 IOC_E_FWRSP_GETATTR = 6, /*!< IOC get attribute response */
120 IOC_E_DISABLED = 7, /*!< f/w disabled */
121 IOC_E_PFFAILED = 8, /*!< failure notice by iocpf sm */
122 IOC_E_HBFAIL = 9, /*!< heartbeat failure */
123 IOC_E_HWERROR = 10, /*!< hardware error interrupt */
124 IOC_E_TIMEOUT = 11, /*!< timeout */
125 IOC_E_HWFAILED = 12, /*!< PCI mapping failure notice */
128 bfa_fsm_state_decl(bfa_ioc, uninit, struct bfa_ioc, enum ioc_event);
129 bfa_fsm_state_decl(bfa_ioc, reset, struct bfa_ioc, enum ioc_event);
130 bfa_fsm_state_decl(bfa_ioc, enabling, struct bfa_ioc, enum ioc_event);
131 bfa_fsm_state_decl(bfa_ioc, getattr, struct bfa_ioc, enum ioc_event);
132 bfa_fsm_state_decl(bfa_ioc, op, struct bfa_ioc, enum ioc_event);
133 bfa_fsm_state_decl(bfa_ioc, fail_retry, struct bfa_ioc, enum ioc_event);
134 bfa_fsm_state_decl(bfa_ioc, fail, struct bfa_ioc, enum ioc_event);
135 bfa_fsm_state_decl(bfa_ioc, disabling, struct bfa_ioc, enum ioc_event);
136 bfa_fsm_state_decl(bfa_ioc, disabled, struct bfa_ioc, enum ioc_event);
137 bfa_fsm_state_decl(bfa_ioc, hwfail, struct bfa_ioc, enum ioc_event);
139 static struct bfa_sm_table ioc_sm_table[] = {
140 {BFA_SM(bfa_ioc_sm_uninit), BFA_IOC_UNINIT},
141 {BFA_SM(bfa_ioc_sm_reset), BFA_IOC_RESET},
142 {BFA_SM(bfa_ioc_sm_enabling), BFA_IOC_ENABLING},
143 {BFA_SM(bfa_ioc_sm_getattr), BFA_IOC_GETATTR},
144 {BFA_SM(bfa_ioc_sm_op), BFA_IOC_OPERATIONAL},
145 {BFA_SM(bfa_ioc_sm_fail_retry), BFA_IOC_INITFAIL},
146 {BFA_SM(bfa_ioc_sm_fail), BFA_IOC_FAIL},
147 {BFA_SM(bfa_ioc_sm_disabling), BFA_IOC_DISABLING},
148 {BFA_SM(bfa_ioc_sm_disabled), BFA_IOC_DISABLED},
149 {BFA_SM(bfa_ioc_sm_hwfail), BFA_IOC_HWFAIL},
153 * Forward declareations for iocpf state machine
155 static void bfa_iocpf_enable(struct bfa_ioc *ioc);
156 static void bfa_iocpf_disable(struct bfa_ioc *ioc);
157 static void bfa_iocpf_fail(struct bfa_ioc *ioc);
158 static void bfa_iocpf_initfail(struct bfa_ioc *ioc);
159 static void bfa_iocpf_getattrfail(struct bfa_ioc *ioc);
160 static void bfa_iocpf_stop(struct bfa_ioc *ioc);
162 /* IOCPF state machine events */
164 IOCPF_E_ENABLE = 1, /*!< IOCPF enable request */
165 IOCPF_E_DISABLE = 2, /*!< IOCPF disable request */
166 IOCPF_E_STOP = 3, /*!< stop on driver detach */
167 IOCPF_E_FWREADY = 4, /*!< f/w initialization done */
168 IOCPF_E_FWRSP_ENABLE = 5, /*!< enable f/w response */
169 IOCPF_E_FWRSP_DISABLE = 6, /*!< disable f/w response */
170 IOCPF_E_FAIL = 7, /*!< failure notice by ioc sm */
171 IOCPF_E_INITFAIL = 8, /*!< init fail notice by ioc sm */
172 IOCPF_E_GETATTRFAIL = 9, /*!< init fail notice by ioc sm */
173 IOCPF_E_SEMLOCKED = 10, /*!< h/w semaphore is locked */
174 IOCPF_E_TIMEOUT = 11, /*!< f/w response timeout */
175 IOCPF_E_SEM_ERROR = 12, /*!< h/w sem mapping error */
179 enum bfa_iocpf_state {
180 BFA_IOCPF_RESET = 1, /*!< IOC is in reset state */
181 BFA_IOCPF_SEMWAIT = 2, /*!< Waiting for IOC h/w semaphore */
182 BFA_IOCPF_HWINIT = 3, /*!< IOC h/w is being initialized */
183 BFA_IOCPF_READY = 4, /*!< IOCPF is initialized */
184 BFA_IOCPF_INITFAIL = 5, /*!< IOCPF failed */
185 BFA_IOCPF_FAIL = 6, /*!< IOCPF failed */
186 BFA_IOCPF_DISABLING = 7, /*!< IOCPF is being disabled */
187 BFA_IOCPF_DISABLED = 8, /*!< IOCPF is disabled */
188 BFA_IOCPF_FWMISMATCH = 9, /*!< IOC f/w different from drivers */
191 bfa_fsm_state_decl(bfa_iocpf, reset, struct bfa_iocpf, enum iocpf_event);
192 bfa_fsm_state_decl(bfa_iocpf, fwcheck, struct bfa_iocpf, enum iocpf_event);
193 bfa_fsm_state_decl(bfa_iocpf, mismatch, struct bfa_iocpf, enum iocpf_event);
194 bfa_fsm_state_decl(bfa_iocpf, semwait, struct bfa_iocpf, enum iocpf_event);
195 bfa_fsm_state_decl(bfa_iocpf, hwinit, struct bfa_iocpf, enum iocpf_event);
196 bfa_fsm_state_decl(bfa_iocpf, enabling, struct bfa_iocpf, enum iocpf_event);
197 bfa_fsm_state_decl(bfa_iocpf, ready, struct bfa_iocpf, enum iocpf_event);
198 bfa_fsm_state_decl(bfa_iocpf, initfail_sync, struct bfa_iocpf,
200 bfa_fsm_state_decl(bfa_iocpf, initfail, struct bfa_iocpf, enum iocpf_event);
201 bfa_fsm_state_decl(bfa_iocpf, fail_sync, struct bfa_iocpf, enum iocpf_event);
202 bfa_fsm_state_decl(bfa_iocpf, fail, struct bfa_iocpf, enum iocpf_event);
203 bfa_fsm_state_decl(bfa_iocpf, disabling, struct bfa_iocpf, enum iocpf_event);
204 bfa_fsm_state_decl(bfa_iocpf, disabling_sync, struct bfa_iocpf,
206 bfa_fsm_state_decl(bfa_iocpf, disabled, struct bfa_iocpf, enum iocpf_event);
208 static struct bfa_sm_table iocpf_sm_table[] = {
209 {BFA_SM(bfa_iocpf_sm_reset), BFA_IOCPF_RESET},
210 {BFA_SM(bfa_iocpf_sm_fwcheck), BFA_IOCPF_FWMISMATCH},
211 {BFA_SM(bfa_iocpf_sm_mismatch), BFA_IOCPF_FWMISMATCH},
212 {BFA_SM(bfa_iocpf_sm_semwait), BFA_IOCPF_SEMWAIT},
213 {BFA_SM(bfa_iocpf_sm_hwinit), BFA_IOCPF_HWINIT},
214 {BFA_SM(bfa_iocpf_sm_enabling), BFA_IOCPF_HWINIT},
215 {BFA_SM(bfa_iocpf_sm_ready), BFA_IOCPF_READY},
216 {BFA_SM(bfa_iocpf_sm_initfail_sync), BFA_IOCPF_INITFAIL},
217 {BFA_SM(bfa_iocpf_sm_initfail), BFA_IOCPF_INITFAIL},
218 {BFA_SM(bfa_iocpf_sm_fail_sync), BFA_IOCPF_FAIL},
219 {BFA_SM(bfa_iocpf_sm_fail), BFA_IOCPF_FAIL},
220 {BFA_SM(bfa_iocpf_sm_disabling), BFA_IOCPF_DISABLING},
221 {BFA_SM(bfa_iocpf_sm_disabling_sync), BFA_IOCPF_DISABLING},
222 {BFA_SM(bfa_iocpf_sm_disabled), BFA_IOCPF_DISABLED},
225 /* IOC State Machine */
227 /* Beginning state. IOC uninit state. */
229 bfa_ioc_sm_uninit_entry(struct bfa_ioc *ioc)
233 /* IOC is in uninit state. */
235 bfa_ioc_sm_uninit(struct bfa_ioc *ioc, enum ioc_event event)
239 bfa_fsm_set_state(ioc, bfa_ioc_sm_reset);
247 /* Reset entry actions -- initialize state machine */
249 bfa_ioc_sm_reset_entry(struct bfa_ioc *ioc)
251 bfa_fsm_set_state(&ioc->iocpf, bfa_iocpf_sm_reset);
254 /* IOC is in reset state. */
256 bfa_ioc_sm_reset(struct bfa_ioc *ioc, enum ioc_event event)
260 bfa_fsm_set_state(ioc, bfa_ioc_sm_enabling);
264 bfa_ioc_disable_comp(ioc);
268 bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
277 bfa_ioc_sm_enabling_entry(struct bfa_ioc *ioc)
279 bfa_iocpf_enable(ioc);
282 /* Host IOC function is being enabled, awaiting response from firmware.
283 * Semaphore is acquired.
286 bfa_ioc_sm_enabling(struct bfa_ioc *ioc, enum ioc_event event)
290 bfa_fsm_set_state(ioc, bfa_ioc_sm_getattr);
294 /* !!! fall through !!! */
296 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
297 bfa_fsm_set_state(ioc, bfa_ioc_sm_fail);
298 if (event != IOC_E_PFFAILED)
299 bfa_iocpf_initfail(ioc);
303 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
304 bfa_fsm_set_state(ioc, bfa_ioc_sm_hwfail);
308 bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling);
312 bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
324 /* Semaphore should be acquired for version check. */
326 bfa_ioc_sm_getattr_entry(struct bfa_ioc *ioc)
328 mod_timer(&ioc->ioc_timer, jiffies +
329 msecs_to_jiffies(BFA_IOC_TOV));
330 bfa_ioc_send_getattr(ioc);
333 /* IOC configuration in progress. Timer is active. */
335 bfa_ioc_sm_getattr(struct bfa_ioc *ioc, enum ioc_event event)
338 case IOC_E_FWRSP_GETATTR:
339 del_timer(&ioc->ioc_timer);
340 bfa_fsm_set_state(ioc, bfa_ioc_sm_op);
345 del_timer(&ioc->ioc_timer);
348 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
349 bfa_fsm_set_state(ioc, bfa_ioc_sm_fail);
350 if (event != IOC_E_PFFAILED)
351 bfa_iocpf_getattrfail(ioc);
355 del_timer(&ioc->ioc_timer);
356 bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling);
368 bfa_ioc_sm_op_entry(struct bfa_ioc *ioc)
370 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_OK);
371 bfa_ioc_event_notify(ioc, BFA_IOC_E_ENABLED);
372 bfa_ioc_hb_monitor(ioc);
376 bfa_ioc_sm_op(struct bfa_ioc *ioc, enum ioc_event event)
383 bfa_ioc_hb_stop(ioc);
384 bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling);
389 bfa_ioc_hb_stop(ioc);
390 /* !!! fall through !!! */
392 if (ioc->iocpf.auto_recover)
393 bfa_fsm_set_state(ioc, bfa_ioc_sm_fail_retry);
395 bfa_fsm_set_state(ioc, bfa_ioc_sm_fail);
397 bfa_ioc_fail_notify(ioc);
399 if (event != IOC_E_PFFAILED)
409 bfa_ioc_sm_disabling_entry(struct bfa_ioc *ioc)
411 bfa_iocpf_disable(ioc);
414 /* IOC is being disabled */
416 bfa_ioc_sm_disabling(struct bfa_ioc *ioc, enum ioc_event event)
420 bfa_fsm_set_state(ioc, bfa_ioc_sm_disabled);
425 * No state change. Will move to disabled state
426 * after iocpf sm completes failure processing and
427 * moves to disabled state.
433 bfa_fsm_set_state(ioc, bfa_ioc_sm_hwfail);
434 bfa_ioc_disable_comp(ioc);
442 /* IOC disable completion entry. */
444 bfa_ioc_sm_disabled_entry(struct bfa_ioc *ioc)
446 bfa_ioc_disable_comp(ioc);
450 bfa_ioc_sm_disabled(struct bfa_ioc *ioc, enum ioc_event event)
454 bfa_fsm_set_state(ioc, bfa_ioc_sm_enabling);
458 ioc->cbfn->disable_cbfn(ioc->bfa);
462 bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
472 bfa_ioc_sm_fail_retry_entry(struct bfa_ioc *ioc)
476 /* Hardware initialization retry. */
478 bfa_ioc_sm_fail_retry(struct bfa_ioc *ioc, enum ioc_event event)
482 bfa_fsm_set_state(ioc, bfa_ioc_sm_getattr);
488 * Initialization retry failed.
490 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
491 bfa_fsm_set_state(ioc, bfa_ioc_sm_fail);
492 if (event != IOC_E_PFFAILED)
493 bfa_iocpf_initfail(ioc);
497 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
498 bfa_fsm_set_state(ioc, bfa_ioc_sm_hwfail);
505 bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling);
509 bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
519 bfa_ioc_sm_fail_entry(struct bfa_ioc *ioc)
525 bfa_ioc_sm_fail(struct bfa_ioc *ioc, enum ioc_event event)
529 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
533 bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling);
537 bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
542 /* HB failure notification, ignore. */
551 bfa_ioc_sm_hwfail_entry(struct bfa_ioc *ioc)
557 bfa_ioc_sm_hwfail(struct bfa_ioc *ioc, enum ioc_event event)
562 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
566 ioc->cbfn->disable_cbfn(ioc->bfa);
570 bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
578 /* IOCPF State Machine */
580 /* Reset entry actions -- initialize state machine */
582 bfa_iocpf_sm_reset_entry(struct bfa_iocpf *iocpf)
584 iocpf->fw_mismatch_notified = false;
585 iocpf->auto_recover = bfa_nw_auto_recover;
588 /* Beginning state. IOC is in reset state. */
590 bfa_iocpf_sm_reset(struct bfa_iocpf *iocpf, enum iocpf_event event)
594 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fwcheck);
605 /* Semaphore should be acquired for version check. */
607 bfa_iocpf_sm_fwcheck_entry(struct bfa_iocpf *iocpf)
609 bfa_ioc_hw_sem_init(iocpf->ioc);
610 bfa_ioc_hw_sem_get(iocpf->ioc);
613 /* Awaiting h/w semaphore to continue with version check. */
615 bfa_iocpf_sm_fwcheck(struct bfa_iocpf *iocpf, enum iocpf_event event)
617 struct bfa_ioc *ioc = iocpf->ioc;
620 case IOCPF_E_SEMLOCKED:
621 if (bfa_ioc_firmware_lock(ioc)) {
622 if (bfa_ioc_sync_start(ioc)) {
623 bfa_ioc_sync_join(ioc);
624 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_hwinit);
626 bfa_ioc_firmware_unlock(ioc);
627 bfa_nw_ioc_hw_sem_release(ioc);
628 mod_timer(&ioc->sem_timer, jiffies +
629 msecs_to_jiffies(BFA_IOC_HWSEM_TOV));
632 bfa_nw_ioc_hw_sem_release(ioc);
633 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_mismatch);
637 case IOCPF_E_SEM_ERROR:
638 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail);
639 bfa_ioc_pf_hwfailed(ioc);
642 case IOCPF_E_DISABLE:
643 bfa_ioc_hw_sem_get_cancel(ioc);
644 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
645 bfa_ioc_pf_disabled(ioc);
649 bfa_ioc_hw_sem_get_cancel(ioc);
650 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
658 /* Notify enable completion callback */
660 bfa_iocpf_sm_mismatch_entry(struct bfa_iocpf *iocpf)
662 /* Call only the first time sm enters fwmismatch state. */
663 if (!iocpf->fw_mismatch_notified)
664 bfa_ioc_pf_fwmismatch(iocpf->ioc);
666 iocpf->fw_mismatch_notified = true;
667 mod_timer(&(iocpf->ioc)->iocpf_timer, jiffies +
668 msecs_to_jiffies(BFA_IOC_TOV));
671 /* Awaiting firmware version match. */
673 bfa_iocpf_sm_mismatch(struct bfa_iocpf *iocpf, enum iocpf_event event)
675 struct bfa_ioc *ioc = iocpf->ioc;
678 case IOCPF_E_TIMEOUT:
679 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fwcheck);
682 case IOCPF_E_DISABLE:
683 del_timer(&ioc->iocpf_timer);
684 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
685 bfa_ioc_pf_disabled(ioc);
689 del_timer(&ioc->iocpf_timer);
690 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
698 /* Request for semaphore. */
700 bfa_iocpf_sm_semwait_entry(struct bfa_iocpf *iocpf)
702 bfa_ioc_hw_sem_get(iocpf->ioc);
705 /* Awaiting semaphore for h/w initialzation. */
707 bfa_iocpf_sm_semwait(struct bfa_iocpf *iocpf, enum iocpf_event event)
709 struct bfa_ioc *ioc = iocpf->ioc;
712 case IOCPF_E_SEMLOCKED:
713 if (bfa_ioc_sync_complete(ioc)) {
714 bfa_ioc_sync_join(ioc);
715 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_hwinit);
717 bfa_nw_ioc_hw_sem_release(ioc);
718 mod_timer(&ioc->sem_timer, jiffies +
719 msecs_to_jiffies(BFA_IOC_HWSEM_TOV));
723 case IOCPF_E_SEM_ERROR:
724 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail);
725 bfa_ioc_pf_hwfailed(ioc);
728 case IOCPF_E_DISABLE:
729 bfa_ioc_hw_sem_get_cancel(ioc);
730 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync);
739 bfa_iocpf_sm_hwinit_entry(struct bfa_iocpf *iocpf)
741 iocpf->poll_time = 0;
742 bfa_ioc_reset(iocpf->ioc, false);
745 /* Hardware is being initialized. Interrupts are enabled.
746 * Holding hardware semaphore lock.
749 bfa_iocpf_sm_hwinit(struct bfa_iocpf *iocpf, enum iocpf_event event)
751 struct bfa_ioc *ioc = iocpf->ioc;
754 case IOCPF_E_FWREADY:
755 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_enabling);
758 case IOCPF_E_TIMEOUT:
759 bfa_nw_ioc_hw_sem_release(ioc);
760 bfa_ioc_pf_failed(ioc);
761 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail_sync);
764 case IOCPF_E_DISABLE:
765 del_timer(&ioc->iocpf_timer);
766 bfa_ioc_sync_leave(ioc);
767 bfa_nw_ioc_hw_sem_release(ioc);
768 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabled);
777 bfa_iocpf_sm_enabling_entry(struct bfa_iocpf *iocpf)
779 mod_timer(&(iocpf->ioc)->iocpf_timer, jiffies +
780 msecs_to_jiffies(BFA_IOC_TOV));
782 * Enable Interrupts before sending fw IOC ENABLE cmd.
784 iocpf->ioc->cbfn->reset_cbfn(iocpf->ioc->bfa);
785 bfa_ioc_send_enable(iocpf->ioc);
788 /* Host IOC function is being enabled, awaiting response from firmware.
789 * Semaphore is acquired.
792 bfa_iocpf_sm_enabling(struct bfa_iocpf *iocpf, enum iocpf_event event)
794 struct bfa_ioc *ioc = iocpf->ioc;
797 case IOCPF_E_FWRSP_ENABLE:
798 del_timer(&ioc->iocpf_timer);
799 bfa_nw_ioc_hw_sem_release(ioc);
800 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_ready);
803 case IOCPF_E_INITFAIL:
804 del_timer(&ioc->iocpf_timer);
806 * !!! fall through !!!
808 case IOCPF_E_TIMEOUT:
809 bfa_nw_ioc_hw_sem_release(ioc);
810 if (event == IOCPF_E_TIMEOUT)
811 bfa_ioc_pf_failed(ioc);
812 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail_sync);
815 case IOCPF_E_DISABLE:
816 del_timer(&ioc->iocpf_timer);
817 bfa_nw_ioc_hw_sem_release(ioc);
818 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling);
827 bfa_iocpf_sm_ready_entry(struct bfa_iocpf *iocpf)
829 bfa_ioc_pf_enabled(iocpf->ioc);
833 bfa_iocpf_sm_ready(struct bfa_iocpf *iocpf, enum iocpf_event event)
836 case IOCPF_E_DISABLE:
837 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling);
840 case IOCPF_E_GETATTRFAIL:
841 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail_sync);
845 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail_sync);
854 bfa_iocpf_sm_disabling_entry(struct bfa_iocpf *iocpf)
856 mod_timer(&(iocpf->ioc)->iocpf_timer, jiffies +
857 msecs_to_jiffies(BFA_IOC_TOV));
858 bfa_ioc_send_disable(iocpf->ioc);
861 /* IOC is being disabled */
863 bfa_iocpf_sm_disabling(struct bfa_iocpf *iocpf, enum iocpf_event event)
865 struct bfa_ioc *ioc = iocpf->ioc;
868 case IOCPF_E_FWRSP_DISABLE:
869 del_timer(&ioc->iocpf_timer);
870 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync);
874 del_timer(&ioc->iocpf_timer);
876 * !!! fall through !!!
879 case IOCPF_E_TIMEOUT:
880 bfa_ioc_set_cur_ioc_fwstate(ioc, BFI_IOC_FAIL);
881 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync);
884 case IOCPF_E_FWRSP_ENABLE:
893 bfa_iocpf_sm_disabling_sync_entry(struct bfa_iocpf *iocpf)
895 bfa_ioc_hw_sem_get(iocpf->ioc);
898 /* IOC hb ack request is being removed. */
900 bfa_iocpf_sm_disabling_sync(struct bfa_iocpf *iocpf, enum iocpf_event event)
902 struct bfa_ioc *ioc = iocpf->ioc;
905 case IOCPF_E_SEMLOCKED:
906 bfa_ioc_sync_leave(ioc);
907 bfa_nw_ioc_hw_sem_release(ioc);
908 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabled);
911 case IOCPF_E_SEM_ERROR:
912 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail);
913 bfa_ioc_pf_hwfailed(ioc);
924 /* IOC disable completion entry. */
926 bfa_iocpf_sm_disabled_entry(struct bfa_iocpf *iocpf)
928 bfa_ioc_mbox_flush(iocpf->ioc);
929 bfa_ioc_pf_disabled(iocpf->ioc);
933 bfa_iocpf_sm_disabled(struct bfa_iocpf *iocpf, enum iocpf_event event)
935 struct bfa_ioc *ioc = iocpf->ioc;
939 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_semwait);
943 bfa_ioc_firmware_unlock(ioc);
944 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
953 bfa_iocpf_sm_initfail_sync_entry(struct bfa_iocpf *iocpf)
955 bfa_nw_ioc_debug_save_ftrc(iocpf->ioc);
956 bfa_ioc_hw_sem_get(iocpf->ioc);
959 /* Hardware initialization failed. */
961 bfa_iocpf_sm_initfail_sync(struct bfa_iocpf *iocpf, enum iocpf_event event)
963 struct bfa_ioc *ioc = iocpf->ioc;
966 case IOCPF_E_SEMLOCKED:
967 bfa_ioc_notify_fail(ioc);
968 bfa_ioc_sync_leave(ioc);
969 bfa_ioc_set_cur_ioc_fwstate(ioc, BFI_IOC_FAIL);
970 bfa_nw_ioc_hw_sem_release(ioc);
971 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail);
974 case IOCPF_E_SEM_ERROR:
975 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail);
976 bfa_ioc_pf_hwfailed(ioc);
979 case IOCPF_E_DISABLE:
980 bfa_ioc_hw_sem_get_cancel(ioc);
981 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync);
985 bfa_ioc_hw_sem_get_cancel(ioc);
986 bfa_ioc_firmware_unlock(ioc);
987 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
999 bfa_iocpf_sm_initfail_entry(struct bfa_iocpf *iocpf)
1003 /* Hardware initialization failed. */
1005 bfa_iocpf_sm_initfail(struct bfa_iocpf *iocpf, enum iocpf_event event)
1007 struct bfa_ioc *ioc = iocpf->ioc;
1010 case IOCPF_E_DISABLE:
1011 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabled);
1015 bfa_ioc_firmware_unlock(ioc);
1016 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
1020 bfa_sm_fault(event);
1025 bfa_iocpf_sm_fail_sync_entry(struct bfa_iocpf *iocpf)
1028 * Mark IOC as failed in hardware and stop firmware.
1030 bfa_ioc_lpu_stop(iocpf->ioc);
1033 * Flush any queued up mailbox requests.
1035 bfa_ioc_mbox_flush(iocpf->ioc);
1036 bfa_ioc_hw_sem_get(iocpf->ioc);
1039 /* IOC is in failed state. */
1041 bfa_iocpf_sm_fail_sync(struct bfa_iocpf *iocpf, enum iocpf_event event)
1043 struct bfa_ioc *ioc = iocpf->ioc;
1046 case IOCPF_E_SEMLOCKED:
1047 bfa_ioc_sync_ack(ioc);
1048 bfa_ioc_notify_fail(ioc);
1049 if (!iocpf->auto_recover) {
1050 bfa_ioc_sync_leave(ioc);
1051 bfa_ioc_set_cur_ioc_fwstate(ioc, BFI_IOC_FAIL);
1052 bfa_nw_ioc_hw_sem_release(ioc);
1053 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail);
1055 if (bfa_ioc_sync_complete(ioc))
1056 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_hwinit);
1058 bfa_nw_ioc_hw_sem_release(ioc);
1059 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_semwait);
1064 case IOCPF_E_SEM_ERROR:
1065 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail);
1066 bfa_ioc_pf_hwfailed(ioc);
1069 case IOCPF_E_DISABLE:
1070 bfa_ioc_hw_sem_get_cancel(ioc);
1071 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync);
1078 bfa_sm_fault(event);
1083 bfa_iocpf_sm_fail_entry(struct bfa_iocpf *iocpf)
1087 /* IOC is in failed state. */
1089 bfa_iocpf_sm_fail(struct bfa_iocpf *iocpf, enum iocpf_event event)
1092 case IOCPF_E_DISABLE:
1093 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabled);
1097 bfa_sm_fault(event);
1101 /* BFA IOC private functions */
1103 /* Notify common modules registered for notification. */
1105 bfa_ioc_event_notify(struct bfa_ioc *ioc, enum bfa_ioc_event event)
1107 struct bfa_ioc_notify *notify;
1108 struct list_head *qe;
1110 list_for_each(qe, &ioc->notify_q) {
1111 notify = (struct bfa_ioc_notify *)qe;
1112 notify->cbfn(notify->cbarg, event);
1117 bfa_ioc_disable_comp(struct bfa_ioc *ioc)
1119 ioc->cbfn->disable_cbfn(ioc->bfa);
1120 bfa_ioc_event_notify(ioc, BFA_IOC_E_DISABLED);
1124 bfa_nw_ioc_sem_get(void __iomem *sem_reg)
1128 #define BFA_SEM_SPINCNT 3000
1130 r32 = readl(sem_reg);
1132 while ((r32 & 1) && (cnt < BFA_SEM_SPINCNT)) {
1135 r32 = readl(sem_reg);
1145 bfa_nw_ioc_sem_release(void __iomem *sem_reg)
1151 /* Clear fwver hdr */
1153 bfa_ioc_fwver_clear(struct bfa_ioc *ioc)
1155 u32 pgnum, pgoff, loff = 0;
1158 pgnum = PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, loff);
1159 pgoff = PSS_SMEM_PGOFF(loff);
1160 writel(pgnum, ioc->ioc_regs.host_page_num_fn);
1162 for (i = 0; i < (sizeof(struct bfi_ioc_image_hdr) / sizeof(u32)); i++) {
1163 writel(0, ioc->ioc_regs.smem_page_start + loff);
1164 loff += sizeof(u32);
1170 bfa_ioc_hw_sem_init(struct bfa_ioc *ioc)
1172 struct bfi_ioc_image_hdr fwhdr;
1175 /* Spin on init semaphore to serialize. */
1176 r32 = readl(ioc->ioc_regs.ioc_init_sem_reg);
1179 r32 = readl(ioc->ioc_regs.ioc_init_sem_reg);
1182 fwstate = bfa_ioc_get_cur_ioc_fwstate(ioc);
1183 if (fwstate == BFI_IOC_UNINIT) {
1184 writel(1, ioc->ioc_regs.ioc_init_sem_reg);
1188 bfa_nw_ioc_fwver_get(ioc, &fwhdr);
1190 if (swab32(fwhdr.exec) == BFI_FWBOOT_TYPE_NORMAL) {
1191 writel(1, ioc->ioc_regs.ioc_init_sem_reg);
1195 bfa_ioc_fwver_clear(ioc);
1196 bfa_ioc_set_cur_ioc_fwstate(ioc, BFI_IOC_UNINIT);
1197 bfa_ioc_set_alt_ioc_fwstate(ioc, BFI_IOC_UNINIT);
1200 * Try to lock and then unlock the semaphore.
1202 readl(ioc->ioc_regs.ioc_sem_reg);
1203 writel(1, ioc->ioc_regs.ioc_sem_reg);
1205 /* Unlock init semaphore */
1206 writel(1, ioc->ioc_regs.ioc_init_sem_reg);
1210 bfa_ioc_hw_sem_get(struct bfa_ioc *ioc)
1215 * First read to the semaphore register will return 0, subsequent reads
1216 * will return 1. Semaphore is released by writing 1 to the register
1218 r32 = readl(ioc->ioc_regs.ioc_sem_reg);
1220 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_SEM_ERROR);
1224 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_SEMLOCKED);
1228 mod_timer(&ioc->sem_timer, jiffies +
1229 msecs_to_jiffies(BFA_IOC_HWSEM_TOV));
1233 bfa_nw_ioc_hw_sem_release(struct bfa_ioc *ioc)
1235 writel(1, ioc->ioc_regs.ioc_sem_reg);
1239 bfa_ioc_hw_sem_get_cancel(struct bfa_ioc *ioc)
1241 del_timer(&ioc->sem_timer);
1244 /* Initialize LPU local memory (aka secondary memory / SRAM) */
1246 bfa_ioc_lmem_init(struct bfa_ioc *ioc)
1250 #define PSS_LMEM_INIT_TIME 10000
1252 pss_ctl = readl(ioc->ioc_regs.pss_ctl_reg);
1253 pss_ctl &= ~__PSS_LMEM_RESET;
1254 pss_ctl |= __PSS_LMEM_INIT_EN;
1257 * i2c workaround 12.5khz clock
1259 pss_ctl |= __PSS_I2C_CLK_DIV(3UL);
1260 writel(pss_ctl, ioc->ioc_regs.pss_ctl_reg);
1263 * wait for memory initialization to be complete
1267 pss_ctl = readl(ioc->ioc_regs.pss_ctl_reg);
1269 } while (!(pss_ctl & __PSS_LMEM_INIT_DONE) && (i < PSS_LMEM_INIT_TIME));
1272 * If memory initialization is not successful, IOC timeout will catch
1275 BUG_ON(!(pss_ctl & __PSS_LMEM_INIT_DONE));
1277 pss_ctl &= ~(__PSS_LMEM_INIT_DONE | __PSS_LMEM_INIT_EN);
1278 writel(pss_ctl, ioc->ioc_regs.pss_ctl_reg);
1282 bfa_ioc_lpu_start(struct bfa_ioc *ioc)
1287 * Take processor out of reset.
1289 pss_ctl = readl(ioc->ioc_regs.pss_ctl_reg);
1290 pss_ctl &= ~__PSS_LPU0_RESET;
1292 writel(pss_ctl, ioc->ioc_regs.pss_ctl_reg);
1296 bfa_ioc_lpu_stop(struct bfa_ioc *ioc)
1301 * Put processors in reset.
1303 pss_ctl = readl(ioc->ioc_regs.pss_ctl_reg);
1304 pss_ctl |= (__PSS_LPU0_RESET | __PSS_LPU1_RESET);
1306 writel(pss_ctl, ioc->ioc_regs.pss_ctl_reg);
1309 /* Get driver and firmware versions. */
1311 bfa_nw_ioc_fwver_get(struct bfa_ioc *ioc, struct bfi_ioc_image_hdr *fwhdr)
1316 u32 *fwsig = (u32 *) fwhdr;
1318 pgnum = bfa_ioc_smem_pgnum(ioc, loff);
1319 writel(pgnum, ioc->ioc_regs.host_page_num_fn);
1321 for (i = 0; i < (sizeof(struct bfi_ioc_image_hdr) / sizeof(u32));
1324 swab32(readl((loff) + (ioc->ioc_regs.smem_page_start)));
1325 loff += sizeof(u32);
1330 bfa_ioc_fwver_md5_check(struct bfi_ioc_image_hdr *fwhdr_1,
1331 struct bfi_ioc_image_hdr *fwhdr_2)
1335 for (i = 0; i < BFI_IOC_MD5SUM_SZ; i++) {
1336 if (fwhdr_1->md5sum[i] != fwhdr_2->md5sum[i])
1343 /* Returns TRUE if major minor and maintenance are same.
1344 * If patch version are same, check for MD5 Checksum to be same.
1347 bfa_ioc_fw_ver_compatible(struct bfi_ioc_image_hdr *drv_fwhdr,
1348 struct bfi_ioc_image_hdr *fwhdr_to_cmp)
1350 if (drv_fwhdr->signature != fwhdr_to_cmp->signature)
1352 if (drv_fwhdr->fwver.major != fwhdr_to_cmp->fwver.major)
1354 if (drv_fwhdr->fwver.minor != fwhdr_to_cmp->fwver.minor)
1356 if (drv_fwhdr->fwver.maint != fwhdr_to_cmp->fwver.maint)
1358 if (drv_fwhdr->fwver.patch == fwhdr_to_cmp->fwver.patch &&
1359 drv_fwhdr->fwver.phase == fwhdr_to_cmp->fwver.phase &&
1360 drv_fwhdr->fwver.build == fwhdr_to_cmp->fwver.build)
1361 return bfa_ioc_fwver_md5_check(drv_fwhdr, fwhdr_to_cmp);
1367 bfa_ioc_flash_fwver_valid(struct bfi_ioc_image_hdr *flash_fwhdr)
1369 if (flash_fwhdr->fwver.major == 0 || flash_fwhdr->fwver.major == 0xFF)
1376 fwhdr_is_ga(struct bfi_ioc_image_hdr *fwhdr)
1378 if (fwhdr->fwver.phase == 0 &&
1379 fwhdr->fwver.build == 0)
1385 /* Returns TRUE if both are compatible and patch of fwhdr_to_cmp is better. */
1386 static enum bfi_ioc_img_ver_cmp
1387 bfa_ioc_fw_ver_patch_cmp(struct bfi_ioc_image_hdr *base_fwhdr,
1388 struct bfi_ioc_image_hdr *fwhdr_to_cmp)
1390 if (bfa_ioc_fw_ver_compatible(base_fwhdr, fwhdr_to_cmp) == false)
1391 return BFI_IOC_IMG_VER_INCOMP;
1393 if (fwhdr_to_cmp->fwver.patch > base_fwhdr->fwver.patch)
1394 return BFI_IOC_IMG_VER_BETTER;
1395 else if (fwhdr_to_cmp->fwver.patch < base_fwhdr->fwver.patch)
1396 return BFI_IOC_IMG_VER_OLD;
1398 /* GA takes priority over internal builds of the same patch stream.
1399 * At this point major minor maint and patch numbers are same.
1401 if (fwhdr_is_ga(base_fwhdr) == true)
1402 if (fwhdr_is_ga(fwhdr_to_cmp))
1403 return BFI_IOC_IMG_VER_SAME;
1405 return BFI_IOC_IMG_VER_OLD;
1407 if (fwhdr_is_ga(fwhdr_to_cmp))
1408 return BFI_IOC_IMG_VER_BETTER;
1410 if (fwhdr_to_cmp->fwver.phase > base_fwhdr->fwver.phase)
1411 return BFI_IOC_IMG_VER_BETTER;
1412 else if (fwhdr_to_cmp->fwver.phase < base_fwhdr->fwver.phase)
1413 return BFI_IOC_IMG_VER_OLD;
1415 if (fwhdr_to_cmp->fwver.build > base_fwhdr->fwver.build)
1416 return BFI_IOC_IMG_VER_BETTER;
1417 else if (fwhdr_to_cmp->fwver.build < base_fwhdr->fwver.build)
1418 return BFI_IOC_IMG_VER_OLD;
1420 /* All Version Numbers are equal.
1421 * Md5 check to be done as a part of compatibility check.
1423 return BFI_IOC_IMG_VER_SAME;
1426 /* register definitions */
1427 #define FLI_CMD_REG 0x0001d000
1428 #define FLI_WRDATA_REG 0x0001d00c
1429 #define FLI_RDDATA_REG 0x0001d010
1430 #define FLI_ADDR_REG 0x0001d004
1431 #define FLI_DEV_STATUS_REG 0x0001d014
1433 #define BFA_FLASH_FIFO_SIZE 128 /* fifo size */
1434 #define BFA_FLASH_CHECK_MAX 10000 /* max # of status check */
1435 #define BFA_FLASH_BLOCKING_OP_MAX 1000000 /* max # of blocking op check */
1436 #define BFA_FLASH_WIP_MASK 0x01 /* write in progress bit mask */
1438 #define NFC_STATE_RUNNING 0x20000001
1439 #define NFC_STATE_PAUSED 0x00004560
1440 #define NFC_VER_VALID 0x147
1442 enum bfa_flash_cmd {
1443 BFA_FLASH_FAST_READ = 0x0b, /* fast read */
1444 BFA_FLASH_WRITE_ENABLE = 0x06, /* write enable */
1445 BFA_FLASH_SECTOR_ERASE = 0xd8, /* sector erase */
1446 BFA_FLASH_WRITE = 0x02, /* write */
1447 BFA_FLASH_READ_STATUS = 0x05, /* read status */
1450 /* hardware error definition */
1451 enum bfa_flash_err {
1452 BFA_FLASH_NOT_PRESENT = -1, /*!< flash not present */
1453 BFA_FLASH_UNINIT = -2, /*!< flash not initialized */
1454 BFA_FLASH_BAD = -3, /*!< flash bad */
1455 BFA_FLASH_BUSY = -4, /*!< flash busy */
1456 BFA_FLASH_ERR_CMD_ACT = -5, /*!< command active never cleared */
1457 BFA_FLASH_ERR_FIFO_CNT = -6, /*!< fifo count never cleared */
1458 BFA_FLASH_ERR_WIP = -7, /*!< write-in-progress never cleared */
1459 BFA_FLASH_ERR_TIMEOUT = -8, /*!< fli timeout */
1460 BFA_FLASH_ERR_LEN = -9, /*!< invalid length */
1463 /* flash command register data structure */
1464 union bfa_flash_cmd_reg {
1485 /* flash device status register data structure */
1486 union bfa_flash_dev_status_reg {
1509 /* flash address register data structure */
1510 union bfa_flash_addr_reg {
1523 /* Flash raw private functions */
1525 bfa_flash_set_cmd(void __iomem *pci_bar, u8 wr_cnt,
1526 u8 rd_cnt, u8 ad_cnt, u8 op)
1528 union bfa_flash_cmd_reg cmd;
1532 cmd.r.write_cnt = wr_cnt;
1533 cmd.r.read_cnt = rd_cnt;
1534 cmd.r.addr_cnt = ad_cnt;
1536 writel(cmd.i, (pci_bar + FLI_CMD_REG));
1540 bfa_flash_set_addr(void __iomem *pci_bar, u32 address)
1542 union bfa_flash_addr_reg addr;
1544 addr.r.addr = address & 0x00ffffff;
1546 writel(addr.i, (pci_bar + FLI_ADDR_REG));
1550 bfa_flash_cmd_act_check(void __iomem *pci_bar)
1552 union bfa_flash_cmd_reg cmd;
1554 cmd.i = readl(pci_bar + FLI_CMD_REG);
1557 return BFA_FLASH_ERR_CMD_ACT;
1562 /* Flush FLI data fifo. */
1564 bfa_flash_fifo_flush(void __iomem *pci_bar)
1568 union bfa_flash_dev_status_reg dev_status;
1570 dev_status.i = readl(pci_bar + FLI_DEV_STATUS_REG);
1572 if (!dev_status.r.fifo_cnt)
1575 /* fifo counter in terms of words */
1576 for (i = 0; i < dev_status.r.fifo_cnt; i++)
1577 t = readl(pci_bar + FLI_RDDATA_REG);
1579 /* Check the device status. It may take some time. */
1580 for (i = 0; i < BFA_FLASH_CHECK_MAX; i++) {
1581 dev_status.i = readl(pci_bar + FLI_DEV_STATUS_REG);
1582 if (!dev_status.r.fifo_cnt)
1586 if (dev_status.r.fifo_cnt)
1587 return BFA_FLASH_ERR_FIFO_CNT;
1592 /* Read flash status. */
1594 bfa_flash_status_read(void __iomem *pci_bar)
1596 union bfa_flash_dev_status_reg dev_status;
1601 status = bfa_flash_fifo_flush(pci_bar);
1605 bfa_flash_set_cmd(pci_bar, 0, 4, 0, BFA_FLASH_READ_STATUS);
1607 for (i = 0; i < BFA_FLASH_CHECK_MAX; i++) {
1608 status = bfa_flash_cmd_act_check(pci_bar);
1616 dev_status.i = readl(pci_bar + FLI_DEV_STATUS_REG);
1617 if (!dev_status.r.fifo_cnt)
1618 return BFA_FLASH_BUSY;
1620 ret_status = readl(pci_bar + FLI_RDDATA_REG);
1623 status = bfa_flash_fifo_flush(pci_bar);
1630 /* Start flash read operation. */
1632 bfa_flash_read_start(void __iomem *pci_bar, u32 offset, u32 len,
1637 /* len must be mutiple of 4 and not exceeding fifo size */
1638 if (len == 0 || len > BFA_FLASH_FIFO_SIZE || (len & 0x03) != 0)
1639 return BFA_FLASH_ERR_LEN;
1642 status = bfa_flash_status_read(pci_bar);
1643 if (status == BFA_FLASH_BUSY)
1644 status = bfa_flash_status_read(pci_bar);
1649 /* check if write-in-progress bit is cleared */
1650 if (status & BFA_FLASH_WIP_MASK)
1651 return BFA_FLASH_ERR_WIP;
1653 bfa_flash_set_addr(pci_bar, offset);
1655 bfa_flash_set_cmd(pci_bar, 0, (u8)len, 4, BFA_FLASH_FAST_READ);
1660 /* Check flash read operation. */
1662 bfa_flash_read_check(void __iomem *pci_bar)
1664 if (bfa_flash_cmd_act_check(pci_bar))
1670 /* End flash read operation. */
1672 bfa_flash_read_end(void __iomem *pci_bar, u32 len, char *buf)
1676 /* read data fifo up to 32 words */
1677 for (i = 0; i < len; i += 4) {
1678 u32 w = readl(pci_bar + FLI_RDDATA_REG);
1679 *((u32 *)(buf + i)) = swab32(w);
1682 bfa_flash_fifo_flush(pci_bar);
1685 /* Perform flash raw read. */
1687 #define FLASH_BLOCKING_OP_MAX 500
1688 #define FLASH_SEM_LOCK_REG 0x18820
1691 bfa_raw_sem_get(void __iomem *bar)
1695 locked = readl((bar + FLASH_SEM_LOCK_REG));
1700 static enum bfa_status
1701 bfa_flash_sem_get(void __iomem *bar)
1703 u32 n = FLASH_BLOCKING_OP_MAX;
1705 while (!bfa_raw_sem_get(bar)) {
1707 return BFA_STATUS_BADFLASH;
1710 return BFA_STATUS_OK;
1714 bfa_flash_sem_put(void __iomem *bar)
1716 writel(0, (bar + FLASH_SEM_LOCK_REG));
1719 static enum bfa_status
1720 bfa_flash_raw_read(void __iomem *pci_bar, u32 offset, char *buf,
1724 u32 off, l, s, residue, fifo_sz;
1728 fifo_sz = BFA_FLASH_FIFO_SIZE;
1729 status = bfa_flash_sem_get(pci_bar);
1730 if (status != BFA_STATUS_OK)
1736 l = (n + 1) * fifo_sz - s;
1740 status = bfa_flash_read_start(pci_bar, offset + off, l,
1743 bfa_flash_sem_put(pci_bar);
1744 return BFA_STATUS_FAILED;
1747 n = BFA_FLASH_BLOCKING_OP_MAX;
1748 while (bfa_flash_read_check(pci_bar)) {
1750 bfa_flash_sem_put(pci_bar);
1751 return BFA_STATUS_FAILED;
1755 bfa_flash_read_end(pci_bar, l, &buf[off]);
1760 bfa_flash_sem_put(pci_bar);
1762 return BFA_STATUS_OK;
1765 #define BFA_FLASH_PART_FWIMG_ADDR 0x100000 /* fw image address */
1767 static enum bfa_status
1768 bfa_nw_ioc_flash_img_get_chnk(struct bfa_ioc *ioc, u32 off,
1771 return bfa_flash_raw_read(ioc->pcidev.pci_bar_kva,
1772 BFA_FLASH_PART_FWIMG_ADDR + (off * sizeof(u32)),
1773 (char *)fwimg, BFI_FLASH_CHUNK_SZ);
1776 static enum bfi_ioc_img_ver_cmp
1777 bfa_ioc_flash_fwver_cmp(struct bfa_ioc *ioc,
1778 struct bfi_ioc_image_hdr *base_fwhdr)
1780 struct bfi_ioc_image_hdr *flash_fwhdr;
1781 enum bfa_status status;
1782 u32 fwimg[BFI_FLASH_CHUNK_SZ_WORDS];
1784 status = bfa_nw_ioc_flash_img_get_chnk(ioc, 0, fwimg);
1785 if (status != BFA_STATUS_OK)
1786 return BFI_IOC_IMG_VER_INCOMP;
1788 flash_fwhdr = (struct bfi_ioc_image_hdr *)fwimg;
1789 if (bfa_ioc_flash_fwver_valid(flash_fwhdr))
1790 return bfa_ioc_fw_ver_patch_cmp(base_fwhdr, flash_fwhdr);
1792 return BFI_IOC_IMG_VER_INCOMP;
1796 * Returns TRUE if driver is willing to work with current smem f/w version.
1799 bfa_nw_ioc_fwver_cmp(struct bfa_ioc *ioc, struct bfi_ioc_image_hdr *fwhdr)
1801 struct bfi_ioc_image_hdr *drv_fwhdr;
1802 enum bfi_ioc_img_ver_cmp smem_flash_cmp, drv_smem_cmp;
1804 drv_fwhdr = (struct bfi_ioc_image_hdr *)
1805 bfa_cb_image_get_chunk(bfa_ioc_asic_gen(ioc), 0);
1807 /* If smem is incompatible or old, driver should not work with it. */
1808 drv_smem_cmp = bfa_ioc_fw_ver_patch_cmp(drv_fwhdr, fwhdr);
1809 if (drv_smem_cmp == BFI_IOC_IMG_VER_INCOMP ||
1810 drv_smem_cmp == BFI_IOC_IMG_VER_OLD) {
1814 /* IF Flash has a better F/W than smem do not work with smem.
1815 * If smem f/w == flash f/w, as smem f/w not old | incmp, work with it.
1816 * If Flash is old or incomp work with smem iff smem f/w == drv f/w.
1818 smem_flash_cmp = bfa_ioc_flash_fwver_cmp(ioc, fwhdr);
1820 if (smem_flash_cmp == BFI_IOC_IMG_VER_BETTER)
1822 else if (smem_flash_cmp == BFI_IOC_IMG_VER_SAME)
1825 return (drv_smem_cmp == BFI_IOC_IMG_VER_SAME) ?
1829 /* Return true if current running version is valid. Firmware signature and
1830 * execution context (driver/bios) must match.
1833 bfa_ioc_fwver_valid(struct bfa_ioc *ioc, u32 boot_env)
1835 struct bfi_ioc_image_hdr fwhdr;
1837 bfa_nw_ioc_fwver_get(ioc, &fwhdr);
1838 if (swab32(fwhdr.bootenv) != boot_env)
1841 return bfa_nw_ioc_fwver_cmp(ioc, &fwhdr);
1844 /* Conditionally flush any pending message from firmware at start. */
1846 bfa_ioc_msgflush(struct bfa_ioc *ioc)
1850 r32 = readl(ioc->ioc_regs.lpu_mbox_cmd);
1852 writel(1, ioc->ioc_regs.lpu_mbox_cmd);
1856 bfa_ioc_hwinit(struct bfa_ioc *ioc, bool force)
1858 enum bfi_ioc_state ioc_fwstate;
1862 ioc_fwstate = bfa_ioc_get_cur_ioc_fwstate(ioc);
1865 ioc_fwstate = BFI_IOC_UNINIT;
1867 boot_env = BFI_FWBOOT_ENV_OS;
1870 * check if firmware is valid
1872 fwvalid = (ioc_fwstate == BFI_IOC_UNINIT) ?
1873 false : bfa_ioc_fwver_valid(ioc, boot_env);
1876 if (bfa_ioc_boot(ioc, BFI_FWBOOT_TYPE_NORMAL, boot_env) ==
1878 bfa_ioc_poll_fwinit(ioc);
1884 * If hardware initialization is in progress (initialized by other IOC),
1885 * just wait for an initialization completion interrupt.
1887 if (ioc_fwstate == BFI_IOC_INITING) {
1888 bfa_ioc_poll_fwinit(ioc);
1893 * If IOC function is disabled and firmware version is same,
1894 * just re-enable IOC.
1896 if (ioc_fwstate == BFI_IOC_DISABLED || ioc_fwstate == BFI_IOC_OP) {
1898 * When using MSI-X any pending firmware ready event should
1899 * be flushed. Otherwise MSI-X interrupts are not delivered.
1901 bfa_ioc_msgflush(ioc);
1902 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_FWREADY);
1907 * Initialize the h/w for any other states.
1909 if (bfa_ioc_boot(ioc, BFI_FWBOOT_TYPE_NORMAL, boot_env) ==
1911 bfa_ioc_poll_fwinit(ioc);
1915 bfa_nw_ioc_timeout(void *ioc_arg)
1917 struct bfa_ioc *ioc = (struct bfa_ioc *) ioc_arg;
1919 bfa_fsm_send_event(ioc, IOC_E_TIMEOUT);
1923 bfa_ioc_mbox_send(struct bfa_ioc *ioc, void *ioc_msg, int len)
1925 u32 *msgp = (u32 *) ioc_msg;
1928 BUG_ON(!(len <= BFI_IOC_MSGLEN_MAX));
1931 * first write msg to mailbox registers
1933 for (i = 0; i < len / sizeof(u32); i++)
1934 writel(cpu_to_le32(msgp[i]),
1935 ioc->ioc_regs.hfn_mbox + i * sizeof(u32));
1937 for (; i < BFI_IOC_MSGLEN_MAX / sizeof(u32); i++)
1938 writel(0, ioc->ioc_regs.hfn_mbox + i * sizeof(u32));
1941 * write 1 to mailbox CMD to trigger LPU event
1943 writel(1, ioc->ioc_regs.hfn_mbox_cmd);
1944 (void) readl(ioc->ioc_regs.hfn_mbox_cmd);
1948 bfa_ioc_send_enable(struct bfa_ioc *ioc)
1950 struct bfi_ioc_ctrl_req enable_req;
1953 bfi_h2i_set(enable_req.mh, BFI_MC_IOC, BFI_IOC_H2I_ENABLE_REQ,
1954 bfa_ioc_portid(ioc));
1955 enable_req.clscode = htons(ioc->clscode);
1956 do_gettimeofday(&tv);
1957 enable_req.tv_sec = ntohl(tv.tv_sec);
1958 bfa_ioc_mbox_send(ioc, &enable_req, sizeof(struct bfi_ioc_ctrl_req));
1962 bfa_ioc_send_disable(struct bfa_ioc *ioc)
1964 struct bfi_ioc_ctrl_req disable_req;
1966 bfi_h2i_set(disable_req.mh, BFI_MC_IOC, BFI_IOC_H2I_DISABLE_REQ,
1967 bfa_ioc_portid(ioc));
1968 bfa_ioc_mbox_send(ioc, &disable_req, sizeof(struct bfi_ioc_ctrl_req));
1972 bfa_ioc_send_getattr(struct bfa_ioc *ioc)
1974 struct bfi_ioc_getattr_req attr_req;
1976 bfi_h2i_set(attr_req.mh, BFI_MC_IOC, BFI_IOC_H2I_GETATTR_REQ,
1977 bfa_ioc_portid(ioc));
1978 bfa_dma_be_addr_set(attr_req.attr_addr, ioc->attr_dma.pa);
1979 bfa_ioc_mbox_send(ioc, &attr_req, sizeof(attr_req));
1983 bfa_nw_ioc_hb_check(void *cbarg)
1985 struct bfa_ioc *ioc = cbarg;
1988 hb_count = readl(ioc->ioc_regs.heartbeat);
1989 if (ioc->hb_count == hb_count) {
1990 bfa_ioc_recover(ioc);
1993 ioc->hb_count = hb_count;
1996 bfa_ioc_mbox_poll(ioc);
1997 mod_timer(&ioc->hb_timer, jiffies +
1998 msecs_to_jiffies(BFA_IOC_HB_TOV));
2002 bfa_ioc_hb_monitor(struct bfa_ioc *ioc)
2004 ioc->hb_count = readl(ioc->ioc_regs.heartbeat);
2005 mod_timer(&ioc->hb_timer, jiffies +
2006 msecs_to_jiffies(BFA_IOC_HB_TOV));
2010 bfa_ioc_hb_stop(struct bfa_ioc *ioc)
2012 del_timer(&ioc->hb_timer);
2015 /* Initiate a full firmware download. */
2016 static enum bfa_status
2017 bfa_ioc_download_fw(struct bfa_ioc *ioc, u32 boot_type,
2027 u32 fwimg_buf[BFI_FLASH_CHUNK_SZ_WORDS];
2028 enum bfa_status status;
2030 if (boot_env == BFI_FWBOOT_ENV_OS &&
2031 boot_type == BFI_FWBOOT_TYPE_FLASH) {
2032 fwimg_size = BFI_FLASH_IMAGE_SZ/sizeof(u32);
2034 status = bfa_nw_ioc_flash_img_get_chnk(ioc,
2035 BFA_IOC_FLASH_CHUNK_ADDR(chunkno), fwimg_buf);
2036 if (status != BFA_STATUS_OK)
2041 fwimg_size = bfa_cb_image_get_size(bfa_ioc_asic_gen(ioc));
2042 fwimg = bfa_cb_image_get_chunk(bfa_ioc_asic_gen(ioc),
2043 BFA_IOC_FLASH_CHUNK_ADDR(chunkno));
2046 pgnum = bfa_ioc_smem_pgnum(ioc, loff);
2048 writel(pgnum, ioc->ioc_regs.host_page_num_fn);
2050 for (i = 0; i < fwimg_size; i++) {
2051 if (BFA_IOC_FLASH_CHUNK_NO(i) != chunkno) {
2052 chunkno = BFA_IOC_FLASH_CHUNK_NO(i);
2053 if (boot_env == BFI_FWBOOT_ENV_OS &&
2054 boot_type == BFI_FWBOOT_TYPE_FLASH) {
2055 status = bfa_nw_ioc_flash_img_get_chnk(ioc,
2056 BFA_IOC_FLASH_CHUNK_ADDR(chunkno),
2058 if (status != BFA_STATUS_OK)
2063 fwimg = bfa_cb_image_get_chunk(
2064 bfa_ioc_asic_gen(ioc),
2065 BFA_IOC_FLASH_CHUNK_ADDR(chunkno));
2072 writel((swab32(fwimg[BFA_IOC_FLASH_OFFSET_IN_CHUNK(i)])),
2073 ((ioc->ioc_regs.smem_page_start) + (loff)));
2075 loff += sizeof(u32);
2078 * handle page offset wrap around
2080 loff = PSS_SMEM_PGOFF(loff);
2084 ioc->ioc_regs.host_page_num_fn);
2088 writel(bfa_ioc_smem_pgnum(ioc, 0),
2089 ioc->ioc_regs.host_page_num_fn);
2092 * Set boot type, env and device mode at the end.
2094 if (boot_env == BFI_FWBOOT_ENV_OS &&
2095 boot_type == BFI_FWBOOT_TYPE_FLASH) {
2096 boot_type = BFI_FWBOOT_TYPE_NORMAL;
2098 asicmode = BFI_FWBOOT_DEVMODE(ioc->asic_gen, ioc->asic_mode,
2099 ioc->port0_mode, ioc->port1_mode);
2100 writel(asicmode, ((ioc->ioc_regs.smem_page_start)
2101 + BFI_FWBOOT_DEVMODE_OFF));
2102 writel(boot_type, ((ioc->ioc_regs.smem_page_start)
2103 + (BFI_FWBOOT_TYPE_OFF)));
2104 writel(boot_env, ((ioc->ioc_regs.smem_page_start)
2105 + (BFI_FWBOOT_ENV_OFF)));
2106 return BFA_STATUS_OK;
2110 bfa_ioc_reset(struct bfa_ioc *ioc, bool force)
2112 bfa_ioc_hwinit(ioc, force);
2115 /* BFA ioc enable reply by firmware */
2117 bfa_ioc_enable_reply(struct bfa_ioc *ioc, enum bfa_mode port_mode,
2120 struct bfa_iocpf *iocpf = &ioc->iocpf;
2122 ioc->port_mode = ioc->port_mode_cfg = port_mode;
2123 ioc->ad_cap_bm = cap_bm;
2124 bfa_fsm_send_event(iocpf, IOCPF_E_FWRSP_ENABLE);
2127 /* Update BFA configuration from firmware configuration. */
2129 bfa_ioc_getattr_reply(struct bfa_ioc *ioc)
2131 struct bfi_ioc_attr *attr = ioc->attr;
2133 attr->adapter_prop = ntohl(attr->adapter_prop);
2134 attr->card_type = ntohl(attr->card_type);
2135 attr->maxfrsize = ntohs(attr->maxfrsize);
2137 bfa_fsm_send_event(ioc, IOC_E_FWRSP_GETATTR);
2140 /* Attach time initialization of mbox logic. */
2142 bfa_ioc_mbox_attach(struct bfa_ioc *ioc)
2144 struct bfa_ioc_mbox_mod *mod = &ioc->mbox_mod;
2147 INIT_LIST_HEAD(&mod->cmd_q);
2148 for (mc = 0; mc < BFI_MC_MAX; mc++) {
2149 mod->mbhdlr[mc].cbfn = NULL;
2150 mod->mbhdlr[mc].cbarg = ioc->bfa;
2154 /* Mbox poll timer -- restarts any pending mailbox requests. */
2156 bfa_ioc_mbox_poll(struct bfa_ioc *ioc)
2158 struct bfa_ioc_mbox_mod *mod = &ioc->mbox_mod;
2159 struct bfa_mbox_cmd *cmd;
2160 bfa_mbox_cmd_cbfn_t cbfn;
2165 * If no command pending, do nothing
2167 if (list_empty(&mod->cmd_q))
2171 * If previous command is not yet fetched by firmware, do nothing
2173 stat = readl(ioc->ioc_regs.hfn_mbox_cmd);
2178 * Enqueue command to firmware.
2180 bfa_q_deq(&mod->cmd_q, &cmd);
2181 bfa_ioc_mbox_send(ioc, cmd->msg, sizeof(cmd->msg));
2184 * Give a callback to the client, indicating that the command is sent
2194 /* Cleanup any pending requests. */
2196 bfa_ioc_mbox_flush(struct bfa_ioc *ioc)
2198 struct bfa_ioc_mbox_mod *mod = &ioc->mbox_mod;
2199 struct bfa_mbox_cmd *cmd;
2201 while (!list_empty(&mod->cmd_q))
2202 bfa_q_deq(&mod->cmd_q, &cmd);
2206 * bfa_nw_ioc_smem_read - Read data from SMEM to host through PCI memmap
2208 * @ioc: memory for IOC
2209 * @tbuf: app memory to store data from smem
2210 * @soff: smem offset
2211 * @sz: size of smem in bytes
2214 bfa_nw_ioc_smem_read(struct bfa_ioc *ioc, void *tbuf, u32 soff, u32 sz)
2216 u32 pgnum, loff, r32;
2220 pgnum = PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, soff);
2221 loff = PSS_SMEM_PGOFF(soff);
2224 * Hold semaphore to serialize pll init and fwtrc.
2226 if (bfa_nw_ioc_sem_get(ioc->ioc_regs.ioc_init_sem_reg) == 0)
2229 writel(pgnum, ioc->ioc_regs.host_page_num_fn);
2231 len = sz/sizeof(u32);
2232 for (i = 0; i < len; i++) {
2233 r32 = swab32(readl((loff) + (ioc->ioc_regs.smem_page_start)));
2234 buf[i] = be32_to_cpu(r32);
2235 loff += sizeof(u32);
2238 * handle page offset wrap around
2240 loff = PSS_SMEM_PGOFF(loff);
2243 writel(pgnum, ioc->ioc_regs.host_page_num_fn);
2247 writel(PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, 0),
2248 ioc->ioc_regs.host_page_num_fn);
2253 readl(ioc->ioc_regs.ioc_init_sem_reg);
2254 writel(1, ioc->ioc_regs.ioc_init_sem_reg);
2258 /* Retrieve saved firmware trace from a prior IOC failure. */
2260 bfa_nw_ioc_debug_fwtrc(struct bfa_ioc *ioc, void *trcdata, int *trclen)
2262 u32 loff = BFI_IOC_TRC_OFF + BNA_DBG_FWTRC_LEN * ioc->port_id;
2263 int tlen, status = 0;
2266 if (tlen > BNA_DBG_FWTRC_LEN)
2267 tlen = BNA_DBG_FWTRC_LEN;
2269 status = bfa_nw_ioc_smem_read(ioc, trcdata, loff, tlen);
2274 /* Save firmware trace if configured. */
2276 bfa_nw_ioc_debug_save_ftrc(struct bfa_ioc *ioc)
2280 if (ioc->dbg_fwsave_once) {
2281 ioc->dbg_fwsave_once = 0;
2282 if (ioc->dbg_fwsave_len) {
2283 tlen = ioc->dbg_fwsave_len;
2284 bfa_nw_ioc_debug_fwtrc(ioc, ioc->dbg_fwsave, &tlen);
2289 /* Retrieve saved firmware trace from a prior IOC failure. */
2291 bfa_nw_ioc_debug_fwsave(struct bfa_ioc *ioc, void *trcdata, int *trclen)
2295 if (ioc->dbg_fwsave_len == 0)
2296 return BFA_STATUS_ENOFSAVE;
2299 if (tlen > ioc->dbg_fwsave_len)
2300 tlen = ioc->dbg_fwsave_len;
2302 memcpy(trcdata, ioc->dbg_fwsave, tlen);
2304 return BFA_STATUS_OK;
2308 bfa_ioc_fail_notify(struct bfa_ioc *ioc)
2311 * Notify driver and common modules registered for notification.
2313 ioc->cbfn->hbfail_cbfn(ioc->bfa);
2314 bfa_ioc_event_notify(ioc, BFA_IOC_E_FAILED);
2315 bfa_nw_ioc_debug_save_ftrc(ioc);
2318 /* IOCPF to IOC interface */
2320 bfa_ioc_pf_enabled(struct bfa_ioc *ioc)
2322 bfa_fsm_send_event(ioc, IOC_E_ENABLED);
2326 bfa_ioc_pf_disabled(struct bfa_ioc *ioc)
2328 bfa_fsm_send_event(ioc, IOC_E_DISABLED);
2332 bfa_ioc_pf_failed(struct bfa_ioc *ioc)
2334 bfa_fsm_send_event(ioc, IOC_E_PFFAILED);
2338 bfa_ioc_pf_hwfailed(struct bfa_ioc *ioc)
2340 bfa_fsm_send_event(ioc, IOC_E_HWFAILED);
2344 bfa_ioc_pf_fwmismatch(struct bfa_ioc *ioc)
2347 * Provide enable completion callback and AEN notification.
2349 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
2353 static enum bfa_status
2354 bfa_ioc_pll_init(struct bfa_ioc *ioc)
2357 * Hold semaphore so that nobody can access the chip during init.
2359 bfa_nw_ioc_sem_get(ioc->ioc_regs.ioc_init_sem_reg);
2361 bfa_ioc_pll_init_asic(ioc);
2363 ioc->pllinit = true;
2365 /* Initialize LMEM */
2366 bfa_ioc_lmem_init(ioc);
2369 * release semaphore.
2371 bfa_nw_ioc_sem_release(ioc->ioc_regs.ioc_init_sem_reg);
2373 return BFA_STATUS_OK;
2376 /* Interface used by diag module to do firmware boot with memory test
2377 * as the entry vector.
2379 static enum bfa_status
2380 bfa_ioc_boot(struct bfa_ioc *ioc, enum bfi_fwboot_type boot_type,
2383 struct bfi_ioc_image_hdr *drv_fwhdr;
2384 enum bfa_status status;
2385 bfa_ioc_stats(ioc, ioc_boots);
2387 if (bfa_ioc_pll_init(ioc) != BFA_STATUS_OK)
2388 return BFA_STATUS_FAILED;
2389 if (boot_env == BFI_FWBOOT_ENV_OS &&
2390 boot_type == BFI_FWBOOT_TYPE_NORMAL) {
2391 drv_fwhdr = (struct bfi_ioc_image_hdr *)
2392 bfa_cb_image_get_chunk(bfa_ioc_asic_gen(ioc), 0);
2393 /* Work with Flash iff flash f/w is better than driver f/w.
2394 * Otherwise push drivers firmware.
2396 if (bfa_ioc_flash_fwver_cmp(ioc, drv_fwhdr) ==
2397 BFI_IOC_IMG_VER_BETTER)
2398 boot_type = BFI_FWBOOT_TYPE_FLASH;
2402 * Initialize IOC state of all functions on a chip reset.
2404 if (boot_type == BFI_FWBOOT_TYPE_MEMTEST) {
2405 bfa_ioc_set_cur_ioc_fwstate(ioc, BFI_IOC_MEMTEST);
2406 bfa_ioc_set_alt_ioc_fwstate(ioc, BFI_IOC_MEMTEST);
2408 bfa_ioc_set_cur_ioc_fwstate(ioc, BFI_IOC_INITING);
2409 bfa_ioc_set_alt_ioc_fwstate(ioc, BFI_IOC_INITING);
2412 bfa_ioc_msgflush(ioc);
2413 status = bfa_ioc_download_fw(ioc, boot_type, boot_env);
2414 if (status == BFA_STATUS_OK)
2415 bfa_ioc_lpu_start(ioc);
2417 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_TIMEOUT);
2422 /* Enable/disable IOC failure auto recovery. */
2424 bfa_nw_ioc_auto_recover(bool auto_recover)
2426 bfa_nw_auto_recover = auto_recover;
2430 bfa_ioc_msgget(struct bfa_ioc *ioc, void *mbmsg)
2436 r32 = readl(ioc->ioc_regs.lpu_mbox_cmd);
2443 for (i = 0; i < (sizeof(union bfi_ioc_i2h_msg_u) / sizeof(u32));
2445 r32 = readl(ioc->ioc_regs.lpu_mbox +
2447 msgp[i] = htonl(r32);
2451 * turn off mailbox interrupt by clearing mailbox status
2453 writel(1, ioc->ioc_regs.lpu_mbox_cmd);
2454 readl(ioc->ioc_regs.lpu_mbox_cmd);
2460 bfa_ioc_isr(struct bfa_ioc *ioc, struct bfi_mbmsg *m)
2462 union bfi_ioc_i2h_msg_u *msg;
2463 struct bfa_iocpf *iocpf = &ioc->iocpf;
2465 msg = (union bfi_ioc_i2h_msg_u *) m;
2467 bfa_ioc_stats(ioc, ioc_isrs);
2469 switch (msg->mh.msg_id) {
2470 case BFI_IOC_I2H_HBEAT:
2473 case BFI_IOC_I2H_ENABLE_REPLY:
2474 bfa_ioc_enable_reply(ioc,
2475 (enum bfa_mode)msg->fw_event.port_mode,
2476 msg->fw_event.cap_bm);
2479 case BFI_IOC_I2H_DISABLE_REPLY:
2480 bfa_fsm_send_event(iocpf, IOCPF_E_FWRSP_DISABLE);
2483 case BFI_IOC_I2H_GETATTR_REPLY:
2484 bfa_ioc_getattr_reply(ioc);
2493 * bfa_nw_ioc_attach - IOC attach time initialization and setup.
2495 * @ioc: memory for IOC
2496 * @bfa: driver instance structure
2499 bfa_nw_ioc_attach(struct bfa_ioc *ioc, void *bfa, struct bfa_ioc_cbfn *cbfn)
2503 ioc->fcmode = false;
2504 ioc->pllinit = false;
2505 ioc->dbg_fwsave_once = true;
2506 ioc->iocpf.ioc = ioc;
2508 bfa_ioc_mbox_attach(ioc);
2509 INIT_LIST_HEAD(&ioc->notify_q);
2511 bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
2512 bfa_fsm_send_event(ioc, IOC_E_RESET);
2515 /* Driver detach time IOC cleanup. */
2517 bfa_nw_ioc_detach(struct bfa_ioc *ioc)
2519 bfa_fsm_send_event(ioc, IOC_E_DETACH);
2521 /* Done with detach, empty the notify_q. */
2522 INIT_LIST_HEAD(&ioc->notify_q);
2526 * bfa_nw_ioc_pci_init - Setup IOC PCI properties.
2528 * @pcidev: PCI device information for this IOC
2531 bfa_nw_ioc_pci_init(struct bfa_ioc *ioc, struct bfa_pcidev *pcidev,
2532 enum bfi_pcifn_class clscode)
2534 ioc->clscode = clscode;
2535 ioc->pcidev = *pcidev;
2538 * Initialize IOC and device personality
2540 ioc->port0_mode = ioc->port1_mode = BFI_PORT_MODE_FC;
2541 ioc->asic_mode = BFI_ASIC_MODE_FC;
2543 switch (pcidev->device_id) {
2544 case PCI_DEVICE_ID_BROCADE_CT:
2545 ioc->asic_gen = BFI_ASIC_GEN_CT;
2546 ioc->port0_mode = ioc->port1_mode = BFI_PORT_MODE_ETH;
2547 ioc->asic_mode = BFI_ASIC_MODE_ETH;
2548 ioc->port_mode = ioc->port_mode_cfg = BFA_MODE_CNA;
2549 ioc->ad_cap_bm = BFA_CM_CNA;
2552 case BFA_PCI_DEVICE_ID_CT2:
2553 ioc->asic_gen = BFI_ASIC_GEN_CT2;
2554 if (clscode == BFI_PCIFN_CLASS_FC &&
2555 pcidev->ssid == BFA_PCI_CT2_SSID_FC) {
2556 ioc->asic_mode = BFI_ASIC_MODE_FC16;
2558 ioc->port_mode = ioc->port_mode_cfg = BFA_MODE_HBA;
2559 ioc->ad_cap_bm = BFA_CM_HBA;
2561 ioc->port0_mode = ioc->port1_mode = BFI_PORT_MODE_ETH;
2562 ioc->asic_mode = BFI_ASIC_MODE_ETH;
2563 if (pcidev->ssid == BFA_PCI_CT2_SSID_FCoE) {
2565 ioc->port_mode_cfg = BFA_MODE_CNA;
2566 ioc->ad_cap_bm = BFA_CM_CNA;
2569 ioc->port_mode_cfg = BFA_MODE_NIC;
2570 ioc->ad_cap_bm = BFA_CM_NIC;
2580 * Set asic specific interfaces.
2582 if (ioc->asic_gen == BFI_ASIC_GEN_CT)
2583 bfa_nw_ioc_set_ct_hwif(ioc);
2585 WARN_ON(ioc->asic_gen != BFI_ASIC_GEN_CT2);
2586 bfa_nw_ioc_set_ct2_hwif(ioc);
2587 bfa_nw_ioc_ct2_poweron(ioc);
2590 bfa_ioc_map_port(ioc);
2591 bfa_ioc_reg_init(ioc);
2595 * bfa_nw_ioc_mem_claim - Initialize IOC dma memory
2597 * @dm_kva: kernel virtual address of IOC dma memory
2598 * @dm_pa: physical address of IOC dma memory
2601 bfa_nw_ioc_mem_claim(struct bfa_ioc *ioc, u8 *dm_kva, u64 dm_pa)
2604 * dma memory for firmware attribute
2606 ioc->attr_dma.kva = dm_kva;
2607 ioc->attr_dma.pa = dm_pa;
2608 ioc->attr = (struct bfi_ioc_attr *) dm_kva;
2611 /* Return size of dma memory required. */
2613 bfa_nw_ioc_meminfo(void)
2615 return roundup(sizeof(struct bfi_ioc_attr), BFA_DMA_ALIGN_SZ);
2619 bfa_nw_ioc_enable(struct bfa_ioc *ioc)
2621 bfa_ioc_stats(ioc, ioc_enables);
2622 ioc->dbg_fwsave_once = true;
2624 bfa_fsm_send_event(ioc, IOC_E_ENABLE);
2628 bfa_nw_ioc_disable(struct bfa_ioc *ioc)
2630 bfa_ioc_stats(ioc, ioc_disables);
2631 bfa_fsm_send_event(ioc, IOC_E_DISABLE);
2634 /* Initialize memory for saving firmware trace. */
2636 bfa_nw_ioc_debug_memclaim(struct bfa_ioc *ioc, void *dbg_fwsave)
2638 ioc->dbg_fwsave = dbg_fwsave;
2639 ioc->dbg_fwsave_len = ioc->iocpf.auto_recover ? BNA_DBG_FWTRC_LEN : 0;
2643 bfa_ioc_smem_pgnum(struct bfa_ioc *ioc, u32 fmaddr)
2645 return PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, fmaddr);
2648 /* Register mailbox message handler function, to be called by common modules */
2650 bfa_nw_ioc_mbox_regisr(struct bfa_ioc *ioc, enum bfi_mclass mc,
2651 bfa_ioc_mbox_mcfunc_t cbfn, void *cbarg)
2653 struct bfa_ioc_mbox_mod *mod = &ioc->mbox_mod;
2655 mod->mbhdlr[mc].cbfn = cbfn;
2656 mod->mbhdlr[mc].cbarg = cbarg;
2660 * bfa_nw_ioc_mbox_queue - Queue a mailbox command request to firmware.
2662 * @ioc: IOC instance
2663 * @cmd: Mailbox command
2665 * Waits if mailbox is busy. Responsibility of caller to serialize
2668 bfa_nw_ioc_mbox_queue(struct bfa_ioc *ioc, struct bfa_mbox_cmd *cmd,
2669 bfa_mbox_cmd_cbfn_t cbfn, void *cbarg)
2671 struct bfa_ioc_mbox_mod *mod = &ioc->mbox_mod;
2678 * If a previous command is pending, queue new command
2680 if (!list_empty(&mod->cmd_q)) {
2681 list_add_tail(&cmd->qe, &mod->cmd_q);
2686 * If mailbox is busy, queue command for poll timer
2688 stat = readl(ioc->ioc_regs.hfn_mbox_cmd);
2690 list_add_tail(&cmd->qe, &mod->cmd_q);
2695 * mailbox is free -- queue command to firmware
2697 bfa_ioc_mbox_send(ioc, cmd->msg, sizeof(cmd->msg));
2702 /* Handle mailbox interrupts */
2704 bfa_nw_ioc_mbox_isr(struct bfa_ioc *ioc)
2706 struct bfa_ioc_mbox_mod *mod = &ioc->mbox_mod;
2710 if (bfa_ioc_msgget(ioc, &m)) {
2712 * Treat IOC message class as special.
2714 mc = m.mh.msg_class;
2715 if (mc == BFI_MC_IOC) {
2716 bfa_ioc_isr(ioc, &m);
2720 if ((mc >= BFI_MC_MAX) || (mod->mbhdlr[mc].cbfn == NULL))
2723 mod->mbhdlr[mc].cbfn(mod->mbhdlr[mc].cbarg, &m);
2726 bfa_ioc_lpu_read_stat(ioc);
2729 * Try to send pending mailbox commands
2731 bfa_ioc_mbox_poll(ioc);
2735 bfa_nw_ioc_error_isr(struct bfa_ioc *ioc)
2737 bfa_ioc_stats(ioc, ioc_hbfails);
2738 bfa_ioc_stats_hb_count(ioc, ioc->hb_count);
2739 bfa_fsm_send_event(ioc, IOC_E_HWERROR);
2742 /* return true if IOC is disabled */
2744 bfa_nw_ioc_is_disabled(struct bfa_ioc *ioc)
2746 return bfa_fsm_cmp_state(ioc, bfa_ioc_sm_disabling) ||
2747 bfa_fsm_cmp_state(ioc, bfa_ioc_sm_disabled);
2750 /* return true if IOC is operational */
2752 bfa_nw_ioc_is_operational(struct bfa_ioc *ioc)
2754 return bfa_fsm_cmp_state(ioc, bfa_ioc_sm_op);
2757 /* Add to IOC heartbeat failure notification queue. To be used by common
2758 * modules such as cee, port, diag.
2761 bfa_nw_ioc_notify_register(struct bfa_ioc *ioc,
2762 struct bfa_ioc_notify *notify)
2764 list_add_tail(¬ify->qe, &ioc->notify_q);
2767 #define BFA_MFG_NAME "QLogic"
2769 bfa_ioc_get_adapter_attr(struct bfa_ioc *ioc,
2770 struct bfa_adapter_attr *ad_attr)
2772 struct bfi_ioc_attr *ioc_attr;
2774 ioc_attr = ioc->attr;
2776 bfa_ioc_get_adapter_serial_num(ioc, ad_attr->serial_num);
2777 bfa_ioc_get_adapter_fw_ver(ioc, ad_attr->fw_ver);
2778 bfa_ioc_get_adapter_optrom_ver(ioc, ad_attr->optrom_ver);
2779 bfa_ioc_get_adapter_manufacturer(ioc, ad_attr->manufacturer);
2780 memcpy(&ad_attr->vpd, &ioc_attr->vpd,
2781 sizeof(struct bfa_mfg_vpd));
2783 ad_attr->nports = bfa_ioc_get_nports(ioc);
2784 ad_attr->max_speed = bfa_ioc_speed_sup(ioc);
2786 bfa_ioc_get_adapter_model(ioc, ad_attr->model);
2787 /* For now, model descr uses same model string */
2788 bfa_ioc_get_adapter_model(ioc, ad_attr->model_descr);
2790 ad_attr->card_type = ioc_attr->card_type;
2791 ad_attr->is_mezz = bfa_mfg_is_mezz(ioc_attr->card_type);
2793 if (BFI_ADAPTER_IS_SPECIAL(ioc_attr->adapter_prop))
2794 ad_attr->prototype = 1;
2796 ad_attr->prototype = 0;
2798 ad_attr->pwwn = bfa_ioc_get_pwwn(ioc);
2799 ad_attr->mac = bfa_nw_ioc_get_mac(ioc);
2801 ad_attr->pcie_gen = ioc_attr->pcie_gen;
2802 ad_attr->pcie_lanes = ioc_attr->pcie_lanes;
2803 ad_attr->pcie_lanes_orig = ioc_attr->pcie_lanes_orig;
2804 ad_attr->asic_rev = ioc_attr->asic_rev;
2806 bfa_ioc_get_pci_chip_rev(ioc, ad_attr->hw_ver);
2809 static enum bfa_ioc_type
2810 bfa_ioc_get_type(struct bfa_ioc *ioc)
2812 if (ioc->clscode == BFI_PCIFN_CLASS_ETH)
2813 return BFA_IOC_TYPE_LL;
2815 BUG_ON(!(ioc->clscode == BFI_PCIFN_CLASS_FC));
2817 return (ioc->attr->port_mode == BFI_PORT_MODE_FC)
2818 ? BFA_IOC_TYPE_FC : BFA_IOC_TYPE_FCoE;
2822 bfa_ioc_get_adapter_serial_num(struct bfa_ioc *ioc, char *serial_num)
2825 (void *)ioc->attr->brcd_serialnum,
2826 BFA_ADAPTER_SERIAL_NUM_LEN);
2830 bfa_ioc_get_adapter_fw_ver(struct bfa_ioc *ioc, char *fw_ver)
2832 memcpy(fw_ver, ioc->attr->fw_version, BFA_VERSION_LEN);
2836 bfa_ioc_get_pci_chip_rev(struct bfa_ioc *ioc, char *chip_rev)
2838 BUG_ON(!(chip_rev));
2840 memset(chip_rev, 0, BFA_IOC_CHIP_REV_LEN);
2846 chip_rev[4] = ioc->attr->asic_rev;
2851 bfa_ioc_get_adapter_optrom_ver(struct bfa_ioc *ioc, char *optrom_ver)
2853 memcpy(optrom_ver, ioc->attr->optrom_version,
2858 bfa_ioc_get_adapter_manufacturer(struct bfa_ioc *ioc, char *manufacturer)
2860 memcpy(manufacturer, BFA_MFG_NAME, BFA_ADAPTER_MFG_NAME_LEN);
2864 bfa_ioc_get_adapter_model(struct bfa_ioc *ioc, char *model)
2866 struct bfi_ioc_attr *ioc_attr;
2869 memset(model, 0, BFA_ADAPTER_MODEL_NAME_LEN);
2871 ioc_attr = ioc->attr;
2873 snprintf(model, BFA_ADAPTER_MODEL_NAME_LEN, "%s-%u",
2874 BFA_MFG_NAME, ioc_attr->card_type);
2877 static enum bfa_ioc_state
2878 bfa_ioc_get_state(struct bfa_ioc *ioc)
2880 enum bfa_iocpf_state iocpf_st;
2881 enum bfa_ioc_state ioc_st = bfa_sm_to_state(ioc_sm_table, ioc->fsm);
2883 if (ioc_st == BFA_IOC_ENABLING ||
2884 ioc_st == BFA_IOC_FAIL || ioc_st == BFA_IOC_INITFAIL) {
2886 iocpf_st = bfa_sm_to_state(iocpf_sm_table, ioc->iocpf.fsm);
2889 case BFA_IOCPF_SEMWAIT:
2890 ioc_st = BFA_IOC_SEMWAIT;
2893 case BFA_IOCPF_HWINIT:
2894 ioc_st = BFA_IOC_HWINIT;
2897 case BFA_IOCPF_FWMISMATCH:
2898 ioc_st = BFA_IOC_FWMISMATCH;
2901 case BFA_IOCPF_FAIL:
2902 ioc_st = BFA_IOC_FAIL;
2905 case BFA_IOCPF_INITFAIL:
2906 ioc_st = BFA_IOC_INITFAIL;
2917 bfa_nw_ioc_get_attr(struct bfa_ioc *ioc, struct bfa_ioc_attr *ioc_attr)
2919 memset((void *)ioc_attr, 0, sizeof(struct bfa_ioc_attr));
2921 ioc_attr->state = bfa_ioc_get_state(ioc);
2922 ioc_attr->port_id = bfa_ioc_portid(ioc);
2923 ioc_attr->port_mode = ioc->port_mode;
2925 ioc_attr->port_mode_cfg = ioc->port_mode_cfg;
2926 ioc_attr->cap_bm = ioc->ad_cap_bm;
2928 ioc_attr->ioc_type = bfa_ioc_get_type(ioc);
2930 bfa_ioc_get_adapter_attr(ioc, &ioc_attr->adapter_attr);
2932 ioc_attr->pci_attr.device_id = bfa_ioc_devid(ioc);
2933 ioc_attr->pci_attr.pcifn = bfa_ioc_pcifn(ioc);
2934 ioc_attr->def_fn = bfa_ioc_is_default(ioc);
2935 bfa_ioc_get_pci_chip_rev(ioc, ioc_attr->pci_attr.chip_rev);
2940 bfa_ioc_get_pwwn(struct bfa_ioc *ioc)
2942 return ioc->attr->pwwn;
2946 bfa_nw_ioc_get_mac(struct bfa_ioc *ioc)
2948 return ioc->attr->mac;
2951 /* Firmware failure detected. Start recovery actions. */
2953 bfa_ioc_recover(struct bfa_ioc *ioc)
2955 pr_crit("Heart Beat of IOC has failed\n");
2956 bfa_ioc_stats(ioc, ioc_hbfails);
2957 bfa_ioc_stats_hb_count(ioc, ioc->hb_count);
2958 bfa_fsm_send_event(ioc, IOC_E_HBFAIL);
2961 /* BFA IOC PF private functions */
2964 bfa_iocpf_enable(struct bfa_ioc *ioc)
2966 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_ENABLE);
2970 bfa_iocpf_disable(struct bfa_ioc *ioc)
2972 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_DISABLE);
2976 bfa_iocpf_fail(struct bfa_ioc *ioc)
2978 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_FAIL);
2982 bfa_iocpf_initfail(struct bfa_ioc *ioc)
2984 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_INITFAIL);
2988 bfa_iocpf_getattrfail(struct bfa_ioc *ioc)
2990 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_GETATTRFAIL);
2994 bfa_iocpf_stop(struct bfa_ioc *ioc)
2996 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_STOP);
3000 bfa_nw_iocpf_timeout(void *ioc_arg)
3002 struct bfa_ioc *ioc = (struct bfa_ioc *) ioc_arg;
3003 enum bfa_iocpf_state iocpf_st;
3005 iocpf_st = bfa_sm_to_state(iocpf_sm_table, ioc->iocpf.fsm);
3007 if (iocpf_st == BFA_IOCPF_HWINIT)
3008 bfa_ioc_poll_fwinit(ioc);
3010 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_TIMEOUT);
3014 bfa_nw_iocpf_sem_timeout(void *ioc_arg)
3016 struct bfa_ioc *ioc = (struct bfa_ioc *) ioc_arg;
3018 bfa_ioc_hw_sem_get(ioc);
3022 bfa_ioc_poll_fwinit(struct bfa_ioc *ioc)
3024 u32 fwstate = bfa_ioc_get_cur_ioc_fwstate(ioc);
3026 if (fwstate == BFI_IOC_DISABLED) {
3027 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_FWREADY);
3031 if (ioc->iocpf.poll_time >= BFA_IOC_TOV) {
3032 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_TIMEOUT);
3034 ioc->iocpf.poll_time += BFA_IOC_POLL_TOV;
3035 mod_timer(&ioc->iocpf_timer, jiffies +
3036 msecs_to_jiffies(BFA_IOC_POLL_TOV));
3041 * Flash module specific
3045 * FLASH DMA buffer should be big enough to hold both MFG block and
3046 * asic block(64k) at the same time and also should be 2k aligned to
3047 * avoid write segement to cross sector boundary.
3049 #define BFA_FLASH_SEG_SZ 2048
3050 #define BFA_FLASH_DMA_BUF_SZ \
3051 roundup(0x010000 + sizeof(struct bfa_mfg_block), BFA_FLASH_SEG_SZ)
3054 bfa_flash_cb(struct bfa_flash *flash)
3058 flash->cbfn(flash->cbarg, flash->status);
3062 bfa_flash_notify(void *cbarg, enum bfa_ioc_event event)
3064 struct bfa_flash *flash = cbarg;
3067 case BFA_IOC_E_DISABLED:
3068 case BFA_IOC_E_FAILED:
3069 if (flash->op_busy) {
3070 flash->status = BFA_STATUS_IOC_FAILURE;
3071 flash->cbfn(flash->cbarg, flash->status);
3081 * Send flash write request.
3084 bfa_flash_write_send(struct bfa_flash *flash)
3086 struct bfi_flash_write_req *msg =
3087 (struct bfi_flash_write_req *) flash->mb.msg;
3090 msg->type = be32_to_cpu(flash->type);
3091 msg->instance = flash->instance;
3092 msg->offset = be32_to_cpu(flash->addr_off + flash->offset);
3093 len = (flash->residue < BFA_FLASH_DMA_BUF_SZ) ?
3094 flash->residue : BFA_FLASH_DMA_BUF_SZ;
3095 msg->length = be32_to_cpu(len);
3097 /* indicate if it's the last msg of the whole write operation */
3098 msg->last = (len == flash->residue) ? 1 : 0;
3100 bfi_h2i_set(msg->mh, BFI_MC_FLASH, BFI_FLASH_H2I_WRITE_REQ,
3101 bfa_ioc_portid(flash->ioc));
3102 bfa_alen_set(&msg->alen, len, flash->dbuf_pa);
3103 memcpy(flash->dbuf_kva, flash->ubuf + flash->offset, len);
3104 bfa_nw_ioc_mbox_queue(flash->ioc, &flash->mb, NULL, NULL);
3106 flash->residue -= len;
3107 flash->offset += len;
3111 * bfa_flash_read_send - Send flash read request.
3113 * @cbarg: callback argument
3116 bfa_flash_read_send(void *cbarg)
3118 struct bfa_flash *flash = cbarg;
3119 struct bfi_flash_read_req *msg =
3120 (struct bfi_flash_read_req *) flash->mb.msg;
3123 msg->type = be32_to_cpu(flash->type);
3124 msg->instance = flash->instance;
3125 msg->offset = be32_to_cpu(flash->addr_off + flash->offset);
3126 len = (flash->residue < BFA_FLASH_DMA_BUF_SZ) ?
3127 flash->residue : BFA_FLASH_DMA_BUF_SZ;
3128 msg->length = be32_to_cpu(len);
3129 bfi_h2i_set(msg->mh, BFI_MC_FLASH, BFI_FLASH_H2I_READ_REQ,
3130 bfa_ioc_portid(flash->ioc));
3131 bfa_alen_set(&msg->alen, len, flash->dbuf_pa);
3132 bfa_nw_ioc_mbox_queue(flash->ioc, &flash->mb, NULL, NULL);
3136 * bfa_flash_intr - Process flash response messages upon receiving interrupts.
3138 * @flasharg: flash structure
3139 * @msg: message structure
3142 bfa_flash_intr(void *flasharg, struct bfi_mbmsg *msg)
3144 struct bfa_flash *flash = flasharg;
3148 struct bfi_flash_query_rsp *query;
3149 struct bfi_flash_write_rsp *write;
3150 struct bfi_flash_read_rsp *read;
3151 struct bfi_mbmsg *msg;
3156 /* receiving response after ioc failure */
3157 if (!flash->op_busy && msg->mh.msg_id != BFI_FLASH_I2H_EVENT)
3160 switch (msg->mh.msg_id) {
3161 case BFI_FLASH_I2H_QUERY_RSP:
3162 status = be32_to_cpu(m.query->status);
3163 if (status == BFA_STATUS_OK) {
3165 struct bfa_flash_attr *attr, *f;
3167 attr = (struct bfa_flash_attr *) flash->ubuf;
3168 f = (struct bfa_flash_attr *) flash->dbuf_kva;
3169 attr->status = be32_to_cpu(f->status);
3170 attr->npart = be32_to_cpu(f->npart);
3171 for (i = 0; i < attr->npart; i++) {
3172 attr->part[i].part_type =
3173 be32_to_cpu(f->part[i].part_type);
3174 attr->part[i].part_instance =
3175 be32_to_cpu(f->part[i].part_instance);
3176 attr->part[i].part_off =
3177 be32_to_cpu(f->part[i].part_off);
3178 attr->part[i].part_size =
3179 be32_to_cpu(f->part[i].part_size);
3180 attr->part[i].part_len =
3181 be32_to_cpu(f->part[i].part_len);
3182 attr->part[i].part_status =
3183 be32_to_cpu(f->part[i].part_status);
3186 flash->status = status;
3187 bfa_flash_cb(flash);
3189 case BFI_FLASH_I2H_WRITE_RSP:
3190 status = be32_to_cpu(m.write->status);
3191 if (status != BFA_STATUS_OK || flash->residue == 0) {
3192 flash->status = status;
3193 bfa_flash_cb(flash);
3195 bfa_flash_write_send(flash);
3197 case BFI_FLASH_I2H_READ_RSP:
3198 status = be32_to_cpu(m.read->status);
3199 if (status != BFA_STATUS_OK) {
3200 flash->status = status;
3201 bfa_flash_cb(flash);
3203 u32 len = be32_to_cpu(m.read->length);
3204 memcpy(flash->ubuf + flash->offset,
3205 flash->dbuf_kva, len);
3206 flash->residue -= len;
3207 flash->offset += len;
3208 if (flash->residue == 0) {
3209 flash->status = status;
3210 bfa_flash_cb(flash);
3212 bfa_flash_read_send(flash);
3215 case BFI_FLASH_I2H_BOOT_VER_RSP:
3216 case BFI_FLASH_I2H_EVENT:
3224 * Flash memory info API.
3227 bfa_nw_flash_meminfo(void)
3229 return roundup(BFA_FLASH_DMA_BUF_SZ, BFA_DMA_ALIGN_SZ);
3233 * bfa_nw_flash_attach - Flash attach API.
3235 * @flash: flash structure
3236 * @ioc: ioc structure
3237 * @dev: device structure
3240 bfa_nw_flash_attach(struct bfa_flash *flash, struct bfa_ioc *ioc, void *dev)
3244 flash->cbarg = NULL;
3247 bfa_nw_ioc_mbox_regisr(flash->ioc, BFI_MC_FLASH, bfa_flash_intr, flash);
3248 bfa_q_qe_init(&flash->ioc_notify);
3249 bfa_ioc_notify_init(&flash->ioc_notify, bfa_flash_notify, flash);
3250 list_add_tail(&flash->ioc_notify.qe, &flash->ioc->notify_q);
3254 * bfa_nw_flash_memclaim - Claim memory for flash
3256 * @flash: flash structure
3257 * @dm_kva: pointer to virtual memory address
3258 * @dm_pa: physical memory address
3261 bfa_nw_flash_memclaim(struct bfa_flash *flash, u8 *dm_kva, u64 dm_pa)
3263 flash->dbuf_kva = dm_kva;
3264 flash->dbuf_pa = dm_pa;
3265 memset(flash->dbuf_kva, 0, BFA_FLASH_DMA_BUF_SZ);
3266 dm_kva += roundup(BFA_FLASH_DMA_BUF_SZ, BFA_DMA_ALIGN_SZ);
3267 dm_pa += roundup(BFA_FLASH_DMA_BUF_SZ, BFA_DMA_ALIGN_SZ);
3271 * bfa_nw_flash_get_attr - Get flash attribute.
3273 * @flash: flash structure
3274 * @attr: flash attribute structure
3275 * @cbfn: callback function
3276 * @cbarg: callback argument
3281 bfa_nw_flash_get_attr(struct bfa_flash *flash, struct bfa_flash_attr *attr,
3282 bfa_cb_flash cbfn, void *cbarg)
3284 struct bfi_flash_query_req *msg =
3285 (struct bfi_flash_query_req *) flash->mb.msg;
3287 if (!bfa_nw_ioc_is_operational(flash->ioc))
3288 return BFA_STATUS_IOC_NON_OP;
3291 return BFA_STATUS_DEVBUSY;
3295 flash->cbarg = cbarg;
3296 flash->ubuf = (u8 *) attr;
3298 bfi_h2i_set(msg->mh, BFI_MC_FLASH, BFI_FLASH_H2I_QUERY_REQ,
3299 bfa_ioc_portid(flash->ioc));
3300 bfa_alen_set(&msg->alen, sizeof(struct bfa_flash_attr), flash->dbuf_pa);
3301 bfa_nw_ioc_mbox_queue(flash->ioc, &flash->mb, NULL, NULL);
3303 return BFA_STATUS_OK;
3307 * bfa_nw_flash_update_part - Update flash partition.
3309 * @flash: flash structure
3310 * @type: flash partition type
3311 * @instance: flash partition instance
3312 * @buf: update data buffer
3313 * @len: data buffer length
3314 * @offset: offset relative to the partition starting address
3315 * @cbfn: callback function
3316 * @cbarg: callback argument
3321 bfa_nw_flash_update_part(struct bfa_flash *flash, u32 type, u8 instance,
3322 void *buf, u32 len, u32 offset,
3323 bfa_cb_flash cbfn, void *cbarg)
3325 if (!bfa_nw_ioc_is_operational(flash->ioc))
3326 return BFA_STATUS_IOC_NON_OP;
3329 * 'len' must be in word (4-byte) boundary
3331 if (!len || (len & 0x03))
3332 return BFA_STATUS_FLASH_BAD_LEN;
3334 if (type == BFA_FLASH_PART_MFG)
3335 return BFA_STATUS_EINVAL;
3338 return BFA_STATUS_DEVBUSY;
3342 flash->cbarg = cbarg;
3344 flash->instance = instance;
3345 flash->residue = len;
3347 flash->addr_off = offset;
3350 bfa_flash_write_send(flash);
3352 return BFA_STATUS_OK;
3356 * bfa_nw_flash_read_part - Read flash partition.
3358 * @flash: flash structure
3359 * @type: flash partition type
3360 * @instance: flash partition instance
3361 * @buf: read data buffer
3362 * @len: data buffer length
3363 * @offset: offset relative to the partition starting address
3364 * @cbfn: callback function
3365 * @cbarg: callback argument
3370 bfa_nw_flash_read_part(struct bfa_flash *flash, u32 type, u8 instance,
3371 void *buf, u32 len, u32 offset,
3372 bfa_cb_flash cbfn, void *cbarg)
3374 if (!bfa_nw_ioc_is_operational(flash->ioc))
3375 return BFA_STATUS_IOC_NON_OP;
3378 * 'len' must be in word (4-byte) boundary
3380 if (!len || (len & 0x03))
3381 return BFA_STATUS_FLASH_BAD_LEN;
3384 return BFA_STATUS_DEVBUSY;
3388 flash->cbarg = cbarg;
3390 flash->instance = instance;
3391 flash->residue = len;
3393 flash->addr_off = offset;
3396 bfa_flash_read_send(flash);
3398 return BFA_STATUS_OK;