Add the rt linux 4.1.3-rt3 as base
[kvmfornfv.git] / kernel / drivers / net / ethernet / brocade / bna / bfa_ioc_ct.c
1 /*
2  * Linux network driver for QLogic BR-series Converged Network Adapter.
3  *
4  * This program is free software; you can redistribute it and/or modify it
5  * under the terms of the GNU General Public License (GPL) Version 2 as
6  * published by the Free Software Foundation
7  *
8  * This program is distributed in the hope that it will be useful, but
9  * WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
11  * General Public License for more details.
12  */
13 /*
14  * Copyright (c) 2005-2014 Brocade Communications Systems, Inc.
15  * Copyright (c) 2014-2015 QLogic Corporation
16  * All rights reserved
17  * www.qlogic.com
18  */
19
20 #include "bfa_ioc.h"
21 #include "cna.h"
22 #include "bfi.h"
23 #include "bfi_reg.h"
24 #include "bfa_defs.h"
25
26 #define bfa_ioc_ct_sync_pos(__ioc)      \
27                 ((u32) (1 << bfa_ioc_pcifn(__ioc)))
28 #define BFA_IOC_SYNC_REQD_SH            16
29 #define bfa_ioc_ct_get_sync_ackd(__val) (__val & 0x0000ffff)
30 #define bfa_ioc_ct_clear_sync_ackd(__val) (__val & 0xffff0000)
31 #define bfa_ioc_ct_get_sync_reqd(__val) (__val >> BFA_IOC_SYNC_REQD_SH)
32 #define bfa_ioc_ct_sync_reqd_pos(__ioc) \
33                 (bfa_ioc_ct_sync_pos(__ioc) << BFA_IOC_SYNC_REQD_SH)
34
35 /*
36  * forward declarations
37  */
38 static bool bfa_ioc_ct_firmware_lock(struct bfa_ioc *ioc);
39 static void bfa_ioc_ct_firmware_unlock(struct bfa_ioc *ioc);
40 static void bfa_ioc_ct_reg_init(struct bfa_ioc *ioc);
41 static void bfa_ioc_ct2_reg_init(struct bfa_ioc *ioc);
42 static void bfa_ioc_ct_map_port(struct bfa_ioc *ioc);
43 static void bfa_ioc_ct2_map_port(struct bfa_ioc *ioc);
44 static void bfa_ioc_ct_isr_mode_set(struct bfa_ioc *ioc, bool msix);
45 static void bfa_ioc_ct_notify_fail(struct bfa_ioc *ioc);
46 static void bfa_ioc_ct_ownership_reset(struct bfa_ioc *ioc);
47 static bool bfa_ioc_ct_sync_start(struct bfa_ioc *ioc);
48 static void bfa_ioc_ct_sync_join(struct bfa_ioc *ioc);
49 static void bfa_ioc_ct_sync_leave(struct bfa_ioc *ioc);
50 static void bfa_ioc_ct_sync_ack(struct bfa_ioc *ioc);
51 static bool bfa_ioc_ct_sync_complete(struct bfa_ioc *ioc);
52 static void bfa_ioc_ct_set_cur_ioc_fwstate(
53                         struct bfa_ioc *ioc, enum bfi_ioc_state fwstate);
54 static enum bfi_ioc_state bfa_ioc_ct_get_cur_ioc_fwstate(struct bfa_ioc *ioc);
55 static void bfa_ioc_ct_set_alt_ioc_fwstate(
56                         struct bfa_ioc *ioc, enum bfi_ioc_state fwstate);
57 static enum bfi_ioc_state bfa_ioc_ct_get_alt_ioc_fwstate(struct bfa_ioc *ioc);
58 static enum bfa_status bfa_ioc_ct_pll_init(void __iomem *rb,
59                                 enum bfi_asic_mode asic_mode);
60 static enum bfa_status bfa_ioc_ct2_pll_init(void __iomem *rb,
61                                 enum bfi_asic_mode asic_mode);
62 static bool bfa_ioc_ct2_lpu_read_stat(struct bfa_ioc *ioc);
63
64 static const struct bfa_ioc_hwif nw_hwif_ct = {
65         .ioc_pll_init        = bfa_ioc_ct_pll_init,
66         .ioc_firmware_lock   = bfa_ioc_ct_firmware_lock,
67         .ioc_firmware_unlock = bfa_ioc_ct_firmware_unlock,
68         .ioc_reg_init        = bfa_ioc_ct_reg_init,
69         .ioc_map_port        = bfa_ioc_ct_map_port,
70         .ioc_isr_mode_set    = bfa_ioc_ct_isr_mode_set,
71         .ioc_notify_fail     = bfa_ioc_ct_notify_fail,
72         .ioc_ownership_reset = bfa_ioc_ct_ownership_reset,
73         .ioc_sync_start      = bfa_ioc_ct_sync_start,
74         .ioc_sync_join       = bfa_ioc_ct_sync_join,
75         .ioc_sync_leave      = bfa_ioc_ct_sync_leave,
76         .ioc_sync_ack        = bfa_ioc_ct_sync_ack,
77         .ioc_sync_complete   = bfa_ioc_ct_sync_complete,
78         .ioc_set_fwstate     = bfa_ioc_ct_set_cur_ioc_fwstate,
79         .ioc_get_fwstate     = bfa_ioc_ct_get_cur_ioc_fwstate,
80         .ioc_set_alt_fwstate     = bfa_ioc_ct_set_alt_ioc_fwstate,
81         .ioc_get_alt_fwstate     = bfa_ioc_ct_get_alt_ioc_fwstate,
82 };
83
84 static const struct bfa_ioc_hwif nw_hwif_ct2 = {
85         .ioc_pll_init        = bfa_ioc_ct2_pll_init,
86         .ioc_firmware_lock   = bfa_ioc_ct_firmware_lock,
87         .ioc_firmware_unlock = bfa_ioc_ct_firmware_unlock,
88         .ioc_reg_init        = bfa_ioc_ct2_reg_init,
89         .ioc_map_port        = bfa_ioc_ct2_map_port,
90         .ioc_lpu_read_stat   = bfa_ioc_ct2_lpu_read_stat,
91         .ioc_isr_mode_set    = NULL,
92         .ioc_notify_fail     = bfa_ioc_ct_notify_fail,
93         .ioc_ownership_reset = bfa_ioc_ct_ownership_reset,
94         .ioc_sync_start      = bfa_ioc_ct_sync_start,
95         .ioc_sync_join       = bfa_ioc_ct_sync_join,
96         .ioc_sync_leave      = bfa_ioc_ct_sync_leave,
97         .ioc_sync_ack        = bfa_ioc_ct_sync_ack,
98         .ioc_sync_complete   = bfa_ioc_ct_sync_complete,
99         .ioc_set_fwstate     = bfa_ioc_ct_set_cur_ioc_fwstate,
100         .ioc_get_fwstate     = bfa_ioc_ct_get_cur_ioc_fwstate,
101         .ioc_set_alt_fwstate     = bfa_ioc_ct_set_alt_ioc_fwstate,
102         .ioc_get_alt_fwstate     = bfa_ioc_ct_get_alt_ioc_fwstate,
103 };
104
105 /* Called from bfa_ioc_attach() to map asic specific calls. */
106 void
107 bfa_nw_ioc_set_ct_hwif(struct bfa_ioc *ioc)
108 {
109         ioc->ioc_hwif = &nw_hwif_ct;
110 }
111
112 void
113 bfa_nw_ioc_set_ct2_hwif(struct bfa_ioc *ioc)
114 {
115         ioc->ioc_hwif = &nw_hwif_ct2;
116 }
117
118 /* Return true if firmware of current driver matches the running firmware. */
119 static bool
120 bfa_ioc_ct_firmware_lock(struct bfa_ioc *ioc)
121 {
122         enum bfi_ioc_state ioc_fwstate;
123         u32 usecnt;
124         struct bfi_ioc_image_hdr fwhdr;
125
126         /**
127          * If bios boot (flash based) -- do not increment usage count
128          */
129         if (bfa_cb_image_get_size(bfa_ioc_asic_gen(ioc)) <
130                                                 BFA_IOC_FWIMG_MINSZ)
131                 return true;
132
133         bfa_nw_ioc_sem_get(ioc->ioc_regs.ioc_usage_sem_reg);
134         usecnt = readl(ioc->ioc_regs.ioc_usage_reg);
135
136         /**
137          * If usage count is 0, always return TRUE.
138          */
139         if (usecnt == 0) {
140                 writel(1, ioc->ioc_regs.ioc_usage_reg);
141                 bfa_nw_ioc_sem_release(ioc->ioc_regs.ioc_usage_sem_reg);
142                 writel(0, ioc->ioc_regs.ioc_fail_sync);
143                 return true;
144         }
145
146         ioc_fwstate = readl(ioc->ioc_regs.ioc_fwstate);
147
148         /**
149          * Use count cannot be non-zero and chip in uninitialized state.
150          */
151         BUG_ON(!(ioc_fwstate != BFI_IOC_UNINIT));
152
153         /**
154          * Check if another driver with a different firmware is active
155          */
156         bfa_nw_ioc_fwver_get(ioc, &fwhdr);
157         if (!bfa_nw_ioc_fwver_cmp(ioc, &fwhdr)) {
158                 bfa_nw_ioc_sem_release(ioc->ioc_regs.ioc_usage_sem_reg);
159                 return false;
160         }
161
162         /**
163          * Same firmware version. Increment the reference count.
164          */
165         usecnt++;
166         writel(usecnt, ioc->ioc_regs.ioc_usage_reg);
167         bfa_nw_ioc_sem_release(ioc->ioc_regs.ioc_usage_sem_reg);
168         return true;
169 }
170
171 static void
172 bfa_ioc_ct_firmware_unlock(struct bfa_ioc *ioc)
173 {
174         u32 usecnt;
175
176         /**
177          * If bios boot (flash based) -- do not decrement usage count
178          */
179         if (bfa_cb_image_get_size(bfa_ioc_asic_gen(ioc)) <
180                                                 BFA_IOC_FWIMG_MINSZ)
181                 return;
182
183         /**
184          * decrement usage count
185          */
186         bfa_nw_ioc_sem_get(ioc->ioc_regs.ioc_usage_sem_reg);
187         usecnt = readl(ioc->ioc_regs.ioc_usage_reg);
188         BUG_ON(!(usecnt > 0));
189
190         usecnt--;
191         writel(usecnt, ioc->ioc_regs.ioc_usage_reg);
192
193         bfa_nw_ioc_sem_release(ioc->ioc_regs.ioc_usage_sem_reg);
194 }
195
196 /* Notify other functions on HB failure. */
197 static void
198 bfa_ioc_ct_notify_fail(struct bfa_ioc *ioc)
199 {
200         writel(__FW_INIT_HALT_P, ioc->ioc_regs.ll_halt);
201         writel(__FW_INIT_HALT_P, ioc->ioc_regs.alt_ll_halt);
202         /* Wait for halt to take effect */
203         readl(ioc->ioc_regs.ll_halt);
204         readl(ioc->ioc_regs.alt_ll_halt);
205 }
206
207 /* Host to LPU mailbox message addresses */
208 static const struct {
209         u32     hfn_mbox;
210         u32     lpu_mbox;
211         u32     hfn_pgn;
212 } ct_fnreg[] = {
213         { HOSTFN0_LPU_MBOX0_0, LPU_HOSTFN0_MBOX0_0, HOST_PAGE_NUM_FN0 },
214         { HOSTFN1_LPU_MBOX0_8, LPU_HOSTFN1_MBOX0_8, HOST_PAGE_NUM_FN1 },
215         { HOSTFN2_LPU_MBOX0_0, LPU_HOSTFN2_MBOX0_0, HOST_PAGE_NUM_FN2 },
216         { HOSTFN3_LPU_MBOX0_8, LPU_HOSTFN3_MBOX0_8, HOST_PAGE_NUM_FN3 }
217 };
218
219 /* Host <-> LPU mailbox command/status registers - port 0 */
220 static const struct {
221         u32     hfn;
222         u32     lpu;
223 } ct_p0reg[] = {
224         { HOSTFN0_LPU0_CMD_STAT, LPU0_HOSTFN0_CMD_STAT },
225         { HOSTFN1_LPU0_CMD_STAT, LPU0_HOSTFN1_CMD_STAT },
226         { HOSTFN2_LPU0_CMD_STAT, LPU0_HOSTFN2_CMD_STAT },
227         { HOSTFN3_LPU0_CMD_STAT, LPU0_HOSTFN3_CMD_STAT }
228 };
229
230 /* Host <-> LPU mailbox command/status registers - port 1 */
231 static const struct {
232         u32     hfn;
233         u32     lpu;
234 } ct_p1reg[] = {
235         { HOSTFN0_LPU1_CMD_STAT, LPU1_HOSTFN0_CMD_STAT },
236         { HOSTFN1_LPU1_CMD_STAT, LPU1_HOSTFN1_CMD_STAT },
237         { HOSTFN2_LPU1_CMD_STAT, LPU1_HOSTFN2_CMD_STAT },
238         { HOSTFN3_LPU1_CMD_STAT, LPU1_HOSTFN3_CMD_STAT }
239 };
240
241 static const struct {
242         u32     hfn_mbox;
243         u32     lpu_mbox;
244         u32     hfn_pgn;
245         u32     hfn;
246         u32     lpu;
247         u32     lpu_read;
248 } ct2_reg[] = {
249         { CT2_HOSTFN_LPU0_MBOX0, CT2_LPU0_HOSTFN_MBOX0, CT2_HOSTFN_PAGE_NUM,
250           CT2_HOSTFN_LPU0_CMD_STAT, CT2_LPU0_HOSTFN_CMD_STAT,
251           CT2_HOSTFN_LPU0_READ_STAT},
252         { CT2_HOSTFN_LPU1_MBOX0, CT2_LPU1_HOSTFN_MBOX0, CT2_HOSTFN_PAGE_NUM,
253           CT2_HOSTFN_LPU1_CMD_STAT, CT2_LPU1_HOSTFN_CMD_STAT,
254           CT2_HOSTFN_LPU1_READ_STAT},
255 };
256
257 static void
258 bfa_ioc_ct_reg_init(struct bfa_ioc *ioc)
259 {
260         void __iomem *rb;
261         int             pcifn = bfa_ioc_pcifn(ioc);
262
263         rb = bfa_ioc_bar0(ioc);
264
265         ioc->ioc_regs.hfn_mbox = rb + ct_fnreg[pcifn].hfn_mbox;
266         ioc->ioc_regs.lpu_mbox = rb + ct_fnreg[pcifn].lpu_mbox;
267         ioc->ioc_regs.host_page_num_fn = rb + ct_fnreg[pcifn].hfn_pgn;
268
269         if (ioc->port_id == 0) {
270                 ioc->ioc_regs.heartbeat = rb + BFA_IOC0_HBEAT_REG;
271                 ioc->ioc_regs.ioc_fwstate = rb + BFA_IOC0_STATE_REG;
272                 ioc->ioc_regs.alt_ioc_fwstate = rb + BFA_IOC1_STATE_REG;
273                 ioc->ioc_regs.hfn_mbox_cmd = rb + ct_p0reg[pcifn].hfn;
274                 ioc->ioc_regs.lpu_mbox_cmd = rb + ct_p0reg[pcifn].lpu;
275                 ioc->ioc_regs.ll_halt = rb + FW_INIT_HALT_P0;
276                 ioc->ioc_regs.alt_ll_halt = rb + FW_INIT_HALT_P1;
277         } else {
278                 ioc->ioc_regs.heartbeat = rb + BFA_IOC1_HBEAT_REG;
279                 ioc->ioc_regs.ioc_fwstate = rb + BFA_IOC1_STATE_REG;
280                 ioc->ioc_regs.alt_ioc_fwstate = rb + BFA_IOC0_STATE_REG;
281                 ioc->ioc_regs.hfn_mbox_cmd = rb + ct_p1reg[pcifn].hfn;
282                 ioc->ioc_regs.lpu_mbox_cmd = rb + ct_p1reg[pcifn].lpu;
283                 ioc->ioc_regs.ll_halt = rb + FW_INIT_HALT_P1;
284                 ioc->ioc_regs.alt_ll_halt = rb + FW_INIT_HALT_P0;
285         }
286
287         /*
288          * PSS control registers
289          */
290         ioc->ioc_regs.pss_ctl_reg = rb + PSS_CTL_REG;
291         ioc->ioc_regs.pss_err_status_reg = rb + PSS_ERR_STATUS_REG;
292         ioc->ioc_regs.app_pll_fast_ctl_reg = rb + APP_PLL_LCLK_CTL_REG;
293         ioc->ioc_regs.app_pll_slow_ctl_reg = rb + APP_PLL_SCLK_CTL_REG;
294
295         /*
296          * IOC semaphore registers and serialization
297          */
298         ioc->ioc_regs.ioc_sem_reg = rb + HOST_SEM0_REG;
299         ioc->ioc_regs.ioc_usage_sem_reg = rb + HOST_SEM1_REG;
300         ioc->ioc_regs.ioc_init_sem_reg = rb + HOST_SEM2_REG;
301         ioc->ioc_regs.ioc_usage_reg = rb + BFA_FW_USE_COUNT;
302         ioc->ioc_regs.ioc_fail_sync = rb + BFA_IOC_FAIL_SYNC;
303
304         /**
305          * sram memory access
306          */
307         ioc->ioc_regs.smem_page_start = rb + PSS_SMEM_PAGE_START;
308         ioc->ioc_regs.smem_pg0 = BFI_IOC_SMEM_PG0_CT;
309
310         /*
311          * err set reg : for notification of hb failure in fcmode
312          */
313         ioc->ioc_regs.err_set = (rb + ERR_SET_REG);
314 }
315
316 static void
317 bfa_ioc_ct2_reg_init(struct bfa_ioc *ioc)
318 {
319         void __iomem *rb;
320         int             port = bfa_ioc_portid(ioc);
321
322         rb = bfa_ioc_bar0(ioc);
323
324         ioc->ioc_regs.hfn_mbox = rb + ct2_reg[port].hfn_mbox;
325         ioc->ioc_regs.lpu_mbox = rb + ct2_reg[port].lpu_mbox;
326         ioc->ioc_regs.host_page_num_fn = rb + ct2_reg[port].hfn_pgn;
327         ioc->ioc_regs.hfn_mbox_cmd = rb + ct2_reg[port].hfn;
328         ioc->ioc_regs.lpu_mbox_cmd = rb + ct2_reg[port].lpu;
329         ioc->ioc_regs.lpu_read_stat = rb + ct2_reg[port].lpu_read;
330
331         if (port == 0) {
332                 ioc->ioc_regs.heartbeat = rb + CT2_BFA_IOC0_HBEAT_REG;
333                 ioc->ioc_regs.ioc_fwstate = rb + CT2_BFA_IOC0_STATE_REG;
334                 ioc->ioc_regs.alt_ioc_fwstate = rb + CT2_BFA_IOC1_STATE_REG;
335                 ioc->ioc_regs.ll_halt = rb + FW_INIT_HALT_P0;
336                 ioc->ioc_regs.alt_ll_halt = rb + FW_INIT_HALT_P1;
337         } else {
338                 ioc->ioc_regs.heartbeat = rb + CT2_BFA_IOC1_HBEAT_REG;
339                 ioc->ioc_regs.ioc_fwstate = rb + CT2_BFA_IOC1_STATE_REG;
340                 ioc->ioc_regs.alt_ioc_fwstate = rb + CT2_BFA_IOC0_STATE_REG;
341                 ioc->ioc_regs.ll_halt = rb + FW_INIT_HALT_P1;
342                 ioc->ioc_regs.alt_ll_halt = rb + FW_INIT_HALT_P0;
343         }
344
345         /*
346          * PSS control registers
347          */
348         ioc->ioc_regs.pss_ctl_reg = rb + PSS_CTL_REG;
349         ioc->ioc_regs.pss_err_status_reg = rb + PSS_ERR_STATUS_REG;
350         ioc->ioc_regs.app_pll_fast_ctl_reg = rb + CT2_APP_PLL_LCLK_CTL_REG;
351         ioc->ioc_regs.app_pll_slow_ctl_reg = rb + CT2_APP_PLL_SCLK_CTL_REG;
352
353         /*
354          * IOC semaphore registers and serialization
355          */
356         ioc->ioc_regs.ioc_sem_reg = rb + CT2_HOST_SEM0_REG;
357         ioc->ioc_regs.ioc_usage_sem_reg = rb + CT2_HOST_SEM1_REG;
358         ioc->ioc_regs.ioc_init_sem_reg = rb + CT2_HOST_SEM2_REG;
359         ioc->ioc_regs.ioc_usage_reg = rb + CT2_BFA_FW_USE_COUNT;
360         ioc->ioc_regs.ioc_fail_sync = rb + CT2_BFA_IOC_FAIL_SYNC;
361
362         /**
363          * sram memory access
364          */
365         ioc->ioc_regs.smem_page_start = rb + PSS_SMEM_PAGE_START;
366         ioc->ioc_regs.smem_pg0 = BFI_IOC_SMEM_PG0_CT;
367
368         /*
369          * err set reg : for notification of hb failure in fcmode
370          */
371         ioc->ioc_regs.err_set = rb + ERR_SET_REG;
372 }
373
374 /* Initialize IOC to port mapping. */
375
376 #define FNC_PERS_FN_SHIFT(__fn) ((__fn) * 8)
377 static void
378 bfa_ioc_ct_map_port(struct bfa_ioc *ioc)
379 {
380         void __iomem *rb = ioc->pcidev.pci_bar_kva;
381         u32     r32;
382
383         /**
384          * For catapult, base port id on personality register and IOC type
385          */
386         r32 = readl(rb + FNC_PERS_REG);
387         r32 >>= FNC_PERS_FN_SHIFT(bfa_ioc_pcifn(ioc));
388         ioc->port_id = (r32 & __F0_PORT_MAP_MK) >> __F0_PORT_MAP_SH;
389
390 }
391
392 static void
393 bfa_ioc_ct2_map_port(struct bfa_ioc *ioc)
394 {
395         void __iomem *rb = ioc->pcidev.pci_bar_kva;
396         u32     r32;
397
398         r32 = readl(rb + CT2_HOSTFN_PERSONALITY0);
399         ioc->port_id = ((r32 & __FC_LL_PORT_MAP__MK) >> __FC_LL_PORT_MAP__SH);
400 }
401
402 /* Set interrupt mode for a function: INTX or MSIX */
403 static void
404 bfa_ioc_ct_isr_mode_set(struct bfa_ioc *ioc, bool msix)
405 {
406         void __iomem *rb = ioc->pcidev.pci_bar_kva;
407         u32     r32, mode;
408
409         r32 = readl(rb + FNC_PERS_REG);
410
411         mode = (r32 >> FNC_PERS_FN_SHIFT(bfa_ioc_pcifn(ioc))) &
412                 __F0_INTX_STATUS;
413
414         /**
415          * If already in desired mode, do not change anything
416          */
417         if ((!msix && mode) || (msix && !mode))
418                 return;
419
420         if (msix)
421                 mode = __F0_INTX_STATUS_MSIX;
422         else
423                 mode = __F0_INTX_STATUS_INTA;
424
425         r32 &= ~(__F0_INTX_STATUS << FNC_PERS_FN_SHIFT(bfa_ioc_pcifn(ioc)));
426         r32 |= (mode << FNC_PERS_FN_SHIFT(bfa_ioc_pcifn(ioc)));
427
428         writel(r32, rb + FNC_PERS_REG);
429 }
430
431 static bool
432 bfa_ioc_ct2_lpu_read_stat(struct bfa_ioc *ioc)
433 {
434         u32 r32;
435
436         r32 = readl(ioc->ioc_regs.lpu_read_stat);
437         if (r32) {
438                 writel(1, ioc->ioc_regs.lpu_read_stat);
439                 return true;
440         }
441
442         return false;
443 }
444
445 /* MSI-X resource allocation for 1860 with no asic block */
446 #define HOSTFN_MSIX_DEFAULT             64
447 #define HOSTFN_MSIX_VT_INDEX_MBOX_ERR   0x30138
448 #define HOSTFN_MSIX_VT_OFST_NUMVT       0x3013c
449 #define __MSIX_VT_NUMVT__MK             0x003ff800
450 #define __MSIX_VT_NUMVT__SH             11
451 #define __MSIX_VT_NUMVT_(_v)            ((_v) << __MSIX_VT_NUMVT__SH)
452 #define __MSIX_VT_OFST_                 0x000007ff
453 void
454 bfa_nw_ioc_ct2_poweron(struct bfa_ioc *ioc)
455 {
456         void __iomem *rb = ioc->pcidev.pci_bar_kva;
457         u32 r32;
458
459         r32 = readl(rb + HOSTFN_MSIX_VT_OFST_NUMVT);
460         if (r32 & __MSIX_VT_NUMVT__MK) {
461                 writel(r32 & __MSIX_VT_OFST_,
462                         rb + HOSTFN_MSIX_VT_INDEX_MBOX_ERR);
463                 return;
464         }
465
466         writel(__MSIX_VT_NUMVT_(HOSTFN_MSIX_DEFAULT - 1) |
467                         HOSTFN_MSIX_DEFAULT * bfa_ioc_pcifn(ioc),
468                         rb + HOSTFN_MSIX_VT_OFST_NUMVT);
469         writel(HOSTFN_MSIX_DEFAULT * bfa_ioc_pcifn(ioc),
470                         rb + HOSTFN_MSIX_VT_INDEX_MBOX_ERR);
471 }
472
473 /* Cleanup hw semaphore and usecnt registers */
474 static void
475 bfa_ioc_ct_ownership_reset(struct bfa_ioc *ioc)
476 {
477         bfa_nw_ioc_sem_get(ioc->ioc_regs.ioc_usage_sem_reg);
478         writel(0, ioc->ioc_regs.ioc_usage_reg);
479         bfa_nw_ioc_sem_release(ioc->ioc_regs.ioc_usage_sem_reg);
480
481         /*
482          * Read the hw sem reg to make sure that it is locked
483          * before we clear it. If it is not locked, writing 1
484          * will lock it instead of clearing it.
485          */
486         readl(ioc->ioc_regs.ioc_sem_reg);
487         bfa_nw_ioc_hw_sem_release(ioc);
488 }
489
490 /* Synchronized IOC failure processing routines */
491 static bool
492 bfa_ioc_ct_sync_start(struct bfa_ioc *ioc)
493 {
494         u32 r32 = readl(ioc->ioc_regs.ioc_fail_sync);
495         u32 sync_reqd = bfa_ioc_ct_get_sync_reqd(r32);
496
497         /*
498          * Driver load time.  If the sync required bit for this PCI fn
499          * is set, it is due to an unclean exit by the driver for this
500          * PCI fn in the previous incarnation. Whoever comes here first
501          * should clean it up, no matter which PCI fn.
502          */
503
504         if (sync_reqd & bfa_ioc_ct_sync_pos(ioc)) {
505                 writel(0, ioc->ioc_regs.ioc_fail_sync);
506                 writel(1, ioc->ioc_regs.ioc_usage_reg);
507                 writel(BFI_IOC_UNINIT, ioc->ioc_regs.ioc_fwstate);
508                 writel(BFI_IOC_UNINIT, ioc->ioc_regs.alt_ioc_fwstate);
509                 return true;
510         }
511
512         return bfa_ioc_ct_sync_complete(ioc);
513 }
514 /* Synchronized IOC failure processing routines */
515 static void
516 bfa_ioc_ct_sync_join(struct bfa_ioc *ioc)
517 {
518         u32 r32 = readl(ioc->ioc_regs.ioc_fail_sync);
519         u32 sync_pos = bfa_ioc_ct_sync_reqd_pos(ioc);
520
521         writel((r32 | sync_pos), ioc->ioc_regs.ioc_fail_sync);
522 }
523
524 static void
525 bfa_ioc_ct_sync_leave(struct bfa_ioc *ioc)
526 {
527         u32 r32 = readl(ioc->ioc_regs.ioc_fail_sync);
528         u32 sync_msk = bfa_ioc_ct_sync_reqd_pos(ioc) |
529                                         bfa_ioc_ct_sync_pos(ioc);
530
531         writel((r32 & ~sync_msk), ioc->ioc_regs.ioc_fail_sync);
532 }
533
534 static void
535 bfa_ioc_ct_sync_ack(struct bfa_ioc *ioc)
536 {
537         u32 r32 = readl(ioc->ioc_regs.ioc_fail_sync);
538
539         writel((r32 | bfa_ioc_ct_sync_pos(ioc)), ioc->ioc_regs.ioc_fail_sync);
540 }
541
542 static bool
543 bfa_ioc_ct_sync_complete(struct bfa_ioc *ioc)
544 {
545         u32 r32 = readl(ioc->ioc_regs.ioc_fail_sync);
546         u32 sync_reqd = bfa_ioc_ct_get_sync_reqd(r32);
547         u32 sync_ackd = bfa_ioc_ct_get_sync_ackd(r32);
548         u32 tmp_ackd;
549
550         if (sync_ackd == 0)
551                 return true;
552
553         /**
554          * The check below is to see whether any other PCI fn
555          * has reinitialized the ASIC (reset sync_ackd bits)
556          * and failed again while this IOC was waiting for hw
557          * semaphore (in bfa_iocpf_sm_semwait()).
558          */
559         tmp_ackd = sync_ackd;
560         if ((sync_reqd &  bfa_ioc_ct_sync_pos(ioc)) &&
561                         !(sync_ackd & bfa_ioc_ct_sync_pos(ioc)))
562                 sync_ackd |= bfa_ioc_ct_sync_pos(ioc);
563
564         if (sync_reqd == sync_ackd) {
565                 writel(bfa_ioc_ct_clear_sync_ackd(r32),
566                                 ioc->ioc_regs.ioc_fail_sync);
567                 writel(BFI_IOC_FAIL, ioc->ioc_regs.ioc_fwstate);
568                 writel(BFI_IOC_FAIL, ioc->ioc_regs.alt_ioc_fwstate);
569                 return true;
570         }
571
572         /**
573          * If another PCI fn reinitialized and failed again while
574          * this IOC was waiting for hw sem, the sync_ackd bit for
575          * this IOC need to be set again to allow reinitialization.
576          */
577         if (tmp_ackd != sync_ackd)
578                 writel((r32 | sync_ackd), ioc->ioc_regs.ioc_fail_sync);
579
580         return false;
581 }
582
583 static void
584 bfa_ioc_ct_set_cur_ioc_fwstate(struct bfa_ioc *ioc,
585                                enum bfi_ioc_state fwstate)
586 {
587         writel(fwstate, ioc->ioc_regs.ioc_fwstate);
588 }
589
590 static enum bfi_ioc_state
591 bfa_ioc_ct_get_cur_ioc_fwstate(struct bfa_ioc *ioc)
592 {
593         return (enum bfi_ioc_state)readl(ioc->ioc_regs.ioc_fwstate);
594 }
595
596 static void
597 bfa_ioc_ct_set_alt_ioc_fwstate(struct bfa_ioc *ioc,
598                                enum bfi_ioc_state fwstate)
599 {
600         writel(fwstate, ioc->ioc_regs.alt_ioc_fwstate);
601 }
602
603 static enum bfi_ioc_state
604 bfa_ioc_ct_get_alt_ioc_fwstate(struct bfa_ioc *ioc)
605 {
606         return (enum bfi_ioc_state)readl(ioc->ioc_regs.alt_ioc_fwstate);
607 }
608
609 static enum bfa_status
610 bfa_ioc_ct_pll_init(void __iomem *rb, enum bfi_asic_mode asic_mode)
611 {
612         u32     pll_sclk, pll_fclk, r32;
613         bool fcmode = (asic_mode == BFI_ASIC_MODE_FC);
614
615         pll_sclk = __APP_PLL_SCLK_LRESETN | __APP_PLL_SCLK_ENARST |
616                 __APP_PLL_SCLK_RSEL200500 | __APP_PLL_SCLK_P0_1(3U) |
617                 __APP_PLL_SCLK_JITLMT0_1(3U) |
618                 __APP_PLL_SCLK_CNTLMT0_1(1U);
619         pll_fclk = __APP_PLL_LCLK_LRESETN | __APP_PLL_LCLK_ENARST |
620                 __APP_PLL_LCLK_RSEL200500 | __APP_PLL_LCLK_P0_1(3U) |
621                 __APP_PLL_LCLK_JITLMT0_1(3U) |
622                 __APP_PLL_LCLK_CNTLMT0_1(1U);
623
624         if (fcmode) {
625                 writel(0, (rb + OP_MODE));
626                 writel(__APP_EMS_CMLCKSEL |
627                                 __APP_EMS_REFCKBUFEN2 |
628                                 __APP_EMS_CHANNEL_SEL,
629                                 (rb + ETH_MAC_SER_REG));
630         } else {
631                 writel(__GLOBAL_FCOE_MODE, (rb + OP_MODE));
632                 writel(__APP_EMS_REFCKBUFEN1,
633                                 (rb + ETH_MAC_SER_REG));
634         }
635         writel(BFI_IOC_UNINIT, (rb + BFA_IOC0_STATE_REG));
636         writel(BFI_IOC_UNINIT, (rb + BFA_IOC1_STATE_REG));
637         writel(0xffffffffU, (rb + HOSTFN0_INT_MSK));
638         writel(0xffffffffU, (rb + HOSTFN1_INT_MSK));
639         writel(0xffffffffU, (rb + HOSTFN0_INT_STATUS));
640         writel(0xffffffffU, (rb + HOSTFN1_INT_STATUS));
641         writel(0xffffffffU, (rb + HOSTFN0_INT_MSK));
642         writel(0xffffffffU, (rb + HOSTFN1_INT_MSK));
643         writel(pll_sclk |
644                 __APP_PLL_SCLK_LOGIC_SOFT_RESET,
645                 rb + APP_PLL_SCLK_CTL_REG);
646         writel(pll_fclk |
647                 __APP_PLL_LCLK_LOGIC_SOFT_RESET,
648                 rb + APP_PLL_LCLK_CTL_REG);
649         writel(pll_sclk |
650                 __APP_PLL_SCLK_LOGIC_SOFT_RESET | __APP_PLL_SCLK_ENABLE,
651                 rb + APP_PLL_SCLK_CTL_REG);
652         writel(pll_fclk |
653                 __APP_PLL_LCLK_LOGIC_SOFT_RESET | __APP_PLL_LCLK_ENABLE,
654                 rb + APP_PLL_LCLK_CTL_REG);
655         readl(rb + HOSTFN0_INT_MSK);
656         udelay(2000);
657         writel(0xffffffffU, (rb + HOSTFN0_INT_STATUS));
658         writel(0xffffffffU, (rb + HOSTFN1_INT_STATUS));
659         writel(pll_sclk |
660                 __APP_PLL_SCLK_ENABLE,
661                 rb + APP_PLL_SCLK_CTL_REG);
662         writel(pll_fclk |
663                 __APP_PLL_LCLK_ENABLE,
664                 rb + APP_PLL_LCLK_CTL_REG);
665
666         if (!fcmode) {
667                 writel(__PMM_1T_RESET_P, (rb + PMM_1T_RESET_REG_P0));
668                 writel(__PMM_1T_RESET_P, (rb + PMM_1T_RESET_REG_P1));
669         }
670         r32 = readl((rb + PSS_CTL_REG));
671         r32 &= ~__PSS_LMEM_RESET;
672         writel(r32, (rb + PSS_CTL_REG));
673         udelay(1000);
674         if (!fcmode) {
675                 writel(0, (rb + PMM_1T_RESET_REG_P0));
676                 writel(0, (rb + PMM_1T_RESET_REG_P1));
677         }
678
679         writel(__EDRAM_BISTR_START, (rb + MBIST_CTL_REG));
680         udelay(1000);
681         r32 = readl((rb + MBIST_STAT_REG));
682         writel(0, (rb + MBIST_CTL_REG));
683         return BFA_STATUS_OK;
684 }
685
686 static void
687 bfa_ioc_ct2_sclk_init(void __iomem *rb)
688 {
689         u32 r32;
690
691         /*
692          * put s_clk PLL and PLL FSM in reset
693          */
694         r32 = readl((rb + CT2_APP_PLL_SCLK_CTL_REG));
695         r32 &= ~(__APP_PLL_SCLK_ENABLE | __APP_PLL_SCLK_LRESETN);
696         r32 |= (__APP_PLL_SCLK_ENARST | __APP_PLL_SCLK_BYPASS |
697                 __APP_PLL_SCLK_LOGIC_SOFT_RESET);
698         writel(r32, (rb + CT2_APP_PLL_SCLK_CTL_REG));
699
700         /*
701          * Ignore mode and program for the max clock (which is FC16)
702          * Firmware/NFC will do the PLL init appropriately
703          */
704         r32 = readl((rb + CT2_APP_PLL_SCLK_CTL_REG));
705         r32 &= ~(__APP_PLL_SCLK_REFCLK_SEL | __APP_PLL_SCLK_CLK_DIV2);
706         writel(r32, (rb + CT2_APP_PLL_SCLK_CTL_REG));
707
708         /*
709          * while doing PLL init dont clock gate ethernet subsystem
710          */
711         r32 = readl((rb + CT2_CHIP_MISC_PRG));
712         writel((r32 | __ETH_CLK_ENABLE_PORT0),
713                                 (rb + CT2_CHIP_MISC_PRG));
714
715         r32 = readl((rb + CT2_PCIE_MISC_REG));
716         writel((r32 | __ETH_CLK_ENABLE_PORT1),
717                                 (rb + CT2_PCIE_MISC_REG));
718
719         /*
720          * set sclk value
721          */
722         r32 = readl((rb + CT2_APP_PLL_SCLK_CTL_REG));
723         r32 &= (__P_SCLK_PLL_LOCK | __APP_PLL_SCLK_REFCLK_SEL |
724                 __APP_PLL_SCLK_CLK_DIV2);
725         writel(r32 | 0x1061731b, (rb + CT2_APP_PLL_SCLK_CTL_REG));
726
727         /*
728          * poll for s_clk lock or delay 1ms
729          */
730         udelay(1000);
731
732         /*
733          * Dont do clock gating for ethernet subsystem, firmware/NFC will
734          * do this appropriately
735          */
736 }
737
738 static void
739 bfa_ioc_ct2_lclk_init(void __iomem *rb)
740 {
741         u32 r32;
742
743         /*
744          * put l_clk PLL and PLL FSM in reset
745          */
746         r32 = readl((rb + CT2_APP_PLL_LCLK_CTL_REG));
747         r32 &= ~(__APP_PLL_LCLK_ENABLE | __APP_PLL_LCLK_LRESETN);
748         r32 |= (__APP_PLL_LCLK_ENARST | __APP_PLL_LCLK_BYPASS |
749                 __APP_PLL_LCLK_LOGIC_SOFT_RESET);
750         writel(r32, (rb + CT2_APP_PLL_LCLK_CTL_REG));
751
752         /*
753          * set LPU speed (set for FC16 which will work for other modes)
754          */
755         r32 = readl((rb + CT2_CHIP_MISC_PRG));
756         writel(r32, (rb + CT2_CHIP_MISC_PRG));
757
758         /*
759          * set LPU half speed (set for FC16 which will work for other modes)
760          */
761         r32 = readl((rb + CT2_APP_PLL_LCLK_CTL_REG));
762         writel(r32, (rb + CT2_APP_PLL_LCLK_CTL_REG));
763
764         /*
765          * set lclk for mode (set for FC16)
766          */
767         r32 = readl((rb + CT2_APP_PLL_LCLK_CTL_REG));
768         r32 &= (__P_LCLK_PLL_LOCK | __APP_LPUCLK_HALFSPEED);
769         r32 |= 0x20c1731b;
770         writel(r32, (rb + CT2_APP_PLL_LCLK_CTL_REG));
771
772         /*
773          * poll for s_clk lock or delay 1ms
774          */
775         udelay(1000);
776 }
777
778 static void
779 bfa_ioc_ct2_mem_init(void __iomem *rb)
780 {
781         u32 r32;
782
783         r32 = readl((rb + PSS_CTL_REG));
784         r32 &= ~__PSS_LMEM_RESET;
785         writel(r32, (rb + PSS_CTL_REG));
786         udelay(1000);
787
788         writel(__EDRAM_BISTR_START, (rb + CT2_MBIST_CTL_REG));
789         udelay(1000);
790         writel(0, (rb + CT2_MBIST_CTL_REG));
791 }
792
793 static void
794 bfa_ioc_ct2_mac_reset(void __iomem *rb)
795 {
796         volatile u32 r32;
797
798         bfa_ioc_ct2_sclk_init(rb);
799         bfa_ioc_ct2_lclk_init(rb);
800
801         /*
802          * release soft reset on s_clk & l_clk
803          */
804         r32 = readl((rb + CT2_APP_PLL_SCLK_CTL_REG));
805         writel((r32 & ~__APP_PLL_SCLK_LOGIC_SOFT_RESET),
806                         (rb + CT2_APP_PLL_SCLK_CTL_REG));
807
808         /*
809          * release soft reset on s_clk & l_clk
810          */
811         r32 = readl((rb + CT2_APP_PLL_LCLK_CTL_REG));
812         writel((r32 & ~__APP_PLL_LCLK_LOGIC_SOFT_RESET),
813                         (rb + CT2_APP_PLL_LCLK_CTL_REG));
814
815         /* put port0, port1 MAC & AHB in reset */
816         writel((__CSI_MAC_RESET | __CSI_MAC_AHB_RESET),
817                         (rb + CT2_CSI_MAC_CONTROL_REG(0)));
818         writel((__CSI_MAC_RESET | __CSI_MAC_AHB_RESET),
819                         (rb + CT2_CSI_MAC_CONTROL_REG(1)));
820 }
821
822 #define CT2_NFC_MAX_DELAY       1000
823 #define CT2_NFC_VER_VALID       0x143
824 #define BFA_IOC_PLL_POLL        1000000
825
826 static bool
827 bfa_ioc_ct2_nfc_halted(void __iomem *rb)
828 {
829         volatile u32 r32;
830
831         r32 = readl(rb + CT2_NFC_CSR_SET_REG);
832         if (r32 & __NFC_CONTROLLER_HALTED)
833                 return true;
834
835         return false;
836 }
837
838 static void
839 bfa_ioc_ct2_nfc_resume(void __iomem *rb)
840 {
841         volatile u32 r32;
842         int i;
843
844         writel(__HALT_NFC_CONTROLLER, rb + CT2_NFC_CSR_CLR_REG);
845         for (i = 0; i < CT2_NFC_MAX_DELAY; i++) {
846                 r32 = readl(rb + CT2_NFC_CSR_SET_REG);
847                 if (!(r32 & __NFC_CONTROLLER_HALTED))
848                         return;
849                 udelay(1000);
850         }
851         BUG_ON(1);
852 }
853
854 static enum bfa_status
855 bfa_ioc_ct2_pll_init(void __iomem *rb, enum bfi_asic_mode asic_mode)
856 {
857         volatile u32 wgn, r32;
858         u32 nfc_ver, i;
859
860         wgn = readl(rb + CT2_WGN_STATUS);
861
862         nfc_ver = readl(rb + CT2_RSC_GPR15_REG);
863
864         if ((wgn == (__A2T_AHB_LOAD | __WGN_READY)) &&
865                 (nfc_ver >= CT2_NFC_VER_VALID)) {
866                 if (bfa_ioc_ct2_nfc_halted(rb))
867                         bfa_ioc_ct2_nfc_resume(rb);
868                 writel(__RESET_AND_START_SCLK_LCLK_PLLS,
869                                 rb + CT2_CSI_FW_CTL_SET_REG);
870
871                 for (i = 0; i < BFA_IOC_PLL_POLL; i++) {
872                         r32 = readl(rb + CT2_APP_PLL_LCLK_CTL_REG);
873                         if (r32 & __RESET_AND_START_SCLK_LCLK_PLLS)
874                                 break;
875                 }
876                 BUG_ON(!(r32 & __RESET_AND_START_SCLK_LCLK_PLLS));
877
878                 for (i = 0; i < BFA_IOC_PLL_POLL; i++) {
879                         r32 = readl(rb + CT2_APP_PLL_LCLK_CTL_REG);
880                         if (!(r32 & __RESET_AND_START_SCLK_LCLK_PLLS))
881                                 break;
882                 }
883                 BUG_ON(r32 & __RESET_AND_START_SCLK_LCLK_PLLS);
884                 udelay(1000);
885
886                 r32 = readl(rb + CT2_CSI_FW_CTL_REG);
887                 BUG_ON(r32 & __RESET_AND_START_SCLK_LCLK_PLLS);
888         } else {
889                 writel(__HALT_NFC_CONTROLLER, (rb + CT2_NFC_CSR_SET_REG));
890                 for (i = 0; i < CT2_NFC_MAX_DELAY; i++) {
891                         r32 = readl(rb + CT2_NFC_CSR_SET_REG);
892                         if (r32 & __NFC_CONTROLLER_HALTED)
893                                 break;
894                         udelay(1000);
895                 }
896
897                 bfa_ioc_ct2_mac_reset(rb);
898                 bfa_ioc_ct2_sclk_init(rb);
899                 bfa_ioc_ct2_lclk_init(rb);
900
901                 /* release soft reset on s_clk & l_clk */
902                 r32 = readl((rb + CT2_APP_PLL_SCLK_CTL_REG));
903                 writel(r32 & ~__APP_PLL_SCLK_LOGIC_SOFT_RESET,
904                                 rb + CT2_APP_PLL_SCLK_CTL_REG);
905                 r32 = readl((rb + CT2_APP_PLL_LCLK_CTL_REG));
906                 writel(r32 & ~__APP_PLL_LCLK_LOGIC_SOFT_RESET,
907                                 rb + CT2_APP_PLL_LCLK_CTL_REG);
908         }
909
910         /* Announce flash device presence, if flash was corrupted. */
911         if (wgn == (__WGN_READY | __GLBL_PF_VF_CFG_RDY)) {
912                 r32 = readl((rb + PSS_GPIO_OUT_REG));
913                 writel(r32 & ~1, rb + PSS_GPIO_OUT_REG);
914                 r32 = readl((rb + PSS_GPIO_OE_REG));
915                 writel(r32 | 1, rb + PSS_GPIO_OE_REG);
916         }
917
918         /*
919          * Mask the interrupts and clear any
920          * pending interrupts left by BIOS/EFI
921          */
922         writel(1, (rb + CT2_LPU0_HOSTFN_MBOX0_MSK));
923         writel(1, (rb + CT2_LPU1_HOSTFN_MBOX0_MSK));
924
925         /* For first time initialization, no need to clear interrupts */
926         r32 = readl(rb + HOST_SEM5_REG);
927         if (r32 & 0x1) {
928                 r32 = readl((rb + CT2_LPU0_HOSTFN_CMD_STAT));
929                 if (r32 == 1) {
930                         writel(1, (rb + CT2_LPU0_HOSTFN_CMD_STAT));
931                         readl((rb + CT2_LPU0_HOSTFN_CMD_STAT));
932                 }
933                 r32 = readl((rb + CT2_LPU1_HOSTFN_CMD_STAT));
934                 if (r32 == 1) {
935                         writel(1, (rb + CT2_LPU1_HOSTFN_CMD_STAT));
936                         readl((rb + CT2_LPU1_HOSTFN_CMD_STAT));
937                 }
938         }
939
940         bfa_ioc_ct2_mem_init(rb);
941
942         writel(BFI_IOC_UNINIT, (rb + CT2_BFA_IOC0_STATE_REG));
943         writel(BFI_IOC_UNINIT, (rb + CT2_BFA_IOC1_STATE_REG));
944         return BFA_STATUS_OK;
945 }