Add qemu 2.4.0
[kvmfornfv.git] / qemu / roms / ipxe / src / drivers / net / vxge / vxge_config.h
1 /*
2  * vxge-config.h: iPXE driver for Neterion Inc's X3100 Series 10GbE
3  *              PCIe I/O Virtualized Server Adapter.
4  *
5  * Copyright(c) 2002-2010 Neterion Inc.
6  *
7  * This software may be used and distributed according to the terms of
8  * the GNU General Public License (GPL), incorporated herein by
9  * reference.  Drivers based on or derived from this code fall under
10  * the GPL and must retain the authorship, copyright and license
11  * notice.
12  *
13  */
14
15 FILE_LICENCE(GPL2_ONLY);
16
17 #ifndef VXGE_CONFIG_H
18 #define VXGE_CONFIG_H
19
20 #include <stdint.h>
21 #include <ipxe/list.h>
22 #include <ipxe/pci.h>
23
24 #ifndef VXGE_CACHE_LINE_SIZE
25 #define VXGE_CACHE_LINE_SIZE 4096
26 #endif
27
28 #define WAIT_FACTOR          1
29
30 #ifndef ARRAY_SIZE
31 #define ARRAY_SIZE(a)  (sizeof(a) / sizeof((a)[0]))
32 #endif
33
34 #define VXGE_HW_MAC_MAX_WIRE_PORTS      2
35 #define VXGE_HW_MAC_MAX_AGGR_PORTS      2
36 #define VXGE_HW_MAC_MAX_PORTS           3
37
38 #define VXGE_HW_MIN_MTU                         68
39 #define VXGE_HW_MAX_MTU                         9600
40 #define VXGE_HW_DEFAULT_MTU                     1500
41
42 #ifndef __iomem
43 #define __iomem
44 #endif
45
46 #ifndef ____cacheline_aligned
47 #define ____cacheline_aligned
48 #endif
49
50 /**
51  * debug filtering masks
52  */
53 #define VXGE_NONE       0x00
54 #define VXGE_INFO       0x01
55 #define VXGE_INTR       0x02
56 #define VXGE_XMIT       0x04
57 #define VXGE_POLL       0x08
58 #define VXGE_ERR        0x10
59 #define VXGE_TRACE      0x20
60 #define VXGE_ALL        (VXGE_INFO|VXGE_INTR|VXGE_XMIT\
61                         |VXGE_POLL|VXGE_ERR|VXGE_TRACE)
62
63 #define NULL_VPID                                       0xFFFFFFFF
64
65 #define VXGE_HW_EVENT_BASE                      0
66 #define VXGE_LL_EVENT_BASE                      100
67
68 #define VXGE_HW_BASE_INF        100
69 #define VXGE_HW_BASE_ERR        200
70 #define VXGE_HW_BASE_BADCFG     300
71 #define VXGE_HW_DEF_DEVICE_POLL_MILLIS            1000
72 #define VXGE_HW_MAX_PAYLOAD_SIZE_512            2
73
74 enum vxge_hw_status {
75         VXGE_HW_OK                                = 0,
76         VXGE_HW_FAIL                              = 1,
77         VXGE_HW_PENDING                           = 2,
78         VXGE_HW_COMPLETIONS_REMAIN                = 3,
79
80         VXGE_HW_INF_NO_MORE_COMPLETED_DESCRIPTORS = VXGE_HW_BASE_INF + 1,
81         VXGE_HW_INF_OUT_OF_DESCRIPTORS            = VXGE_HW_BASE_INF + 2,
82         VXGE_HW_INF_SW_LRO_BEGIN                  = VXGE_HW_BASE_INF + 3,
83         VXGE_HW_INF_SW_LRO_CONT                   = VXGE_HW_BASE_INF + 4,
84         VXGE_HW_INF_SW_LRO_UNCAPABLE              = VXGE_HW_BASE_INF + 5,
85         VXGE_HW_INF_SW_LRO_FLUSH_SESSION          = VXGE_HW_BASE_INF + 6,
86         VXGE_HW_INF_SW_LRO_FLUSH_BOTH             = VXGE_HW_BASE_INF + 7,
87
88         VXGE_HW_ERR_INVALID_HANDLE                = VXGE_HW_BASE_ERR + 1,
89         VXGE_HW_ERR_OUT_OF_MEMORY                 = VXGE_HW_BASE_ERR + 2,
90         VXGE_HW_ERR_VPATH_NOT_AVAILABLE           = VXGE_HW_BASE_ERR + 3,
91         VXGE_HW_ERR_VPATH_NOT_OPEN                = VXGE_HW_BASE_ERR + 4,
92         VXGE_HW_ERR_WRONG_IRQ                     = VXGE_HW_BASE_ERR + 5,
93         VXGE_HW_ERR_SWAPPER_CTRL                  = VXGE_HW_BASE_ERR + 6,
94         VXGE_HW_ERR_INVALID_MTU_SIZE              = VXGE_HW_BASE_ERR + 7,
95         VXGE_HW_ERR_INVALID_INDEX                 = VXGE_HW_BASE_ERR + 8,
96         VXGE_HW_ERR_INVALID_TYPE                  = VXGE_HW_BASE_ERR + 9,
97         VXGE_HW_ERR_INVALID_OFFSET                = VXGE_HW_BASE_ERR + 10,
98         VXGE_HW_ERR_INVALID_DEVICE                = VXGE_HW_BASE_ERR + 11,
99         VXGE_HW_ERR_VERSION_CONFLICT              = VXGE_HW_BASE_ERR + 12,
100         VXGE_HW_ERR_INVALID_PCI_INFO              = VXGE_HW_BASE_ERR + 13,
101         VXGE_HW_ERR_INVALID_TCODE                 = VXGE_HW_BASE_ERR + 14,
102         VXGE_HW_ERR_INVALID_BLOCK_SIZE            = VXGE_HW_BASE_ERR + 15,
103         VXGE_HW_ERR_INVALID_STATE                 = VXGE_HW_BASE_ERR + 16,
104         VXGE_HW_ERR_PRIVILAGED_OPEARATION         = VXGE_HW_BASE_ERR + 17,
105         VXGE_HW_ERR_INVALID_PORT                  = VXGE_HW_BASE_ERR + 18,
106         VXGE_HW_ERR_FIFO                          = VXGE_HW_BASE_ERR + 19,
107         VXGE_HW_ERR_VPATH                         = VXGE_HW_BASE_ERR + 20,
108         VXGE_HW_ERR_CRITICAL                      = VXGE_HW_BASE_ERR + 21,
109         VXGE_HW_ERR_SLOT_FREEZE                   = VXGE_HW_BASE_ERR + 22,
110         VXGE_HW_ERR_INVALID_MIN_BANDWIDTH         = VXGE_HW_BASE_ERR + 25,
111         VXGE_HW_ERR_INVALID_MAX_BANDWIDTH         = VXGE_HW_BASE_ERR + 26,
112         VXGE_HW_ERR_INVALID_TOTAL_BANDWIDTH       = VXGE_HW_BASE_ERR + 27,
113         VXGE_HW_ERR_INVALID_BANDWIDTH_LIMIT       = VXGE_HW_BASE_ERR + 28,
114         VXGE_HW_ERR_RESET_IN_PROGRESS             = VXGE_HW_BASE_ERR + 29,
115         VXGE_HW_ERR_OUT_OF_SPACE                  = VXGE_HW_BASE_ERR + 30,
116         VXGE_HW_ERR_INVALID_FUNC_MODE             = VXGE_HW_BASE_ERR + 31,
117         VXGE_HW_ERR_INVALID_DP_MODE               = VXGE_HW_BASE_ERR + 32,
118         VXGE_HW_ERR_INVALID_FAILURE_BEHAVIOUR     = VXGE_HW_BASE_ERR + 33,
119         VXGE_HW_ERR_INVALID_L2_SWITCH_STATE       = VXGE_HW_BASE_ERR + 34,
120         VXGE_HW_ERR_INVALID_CATCH_BASIN_MODE      = VXGE_HW_BASE_ERR + 35,
121
122         VXGE_HW_BADCFG_RING_INDICATE_MAX_PKTS     = VXGE_HW_BASE_BADCFG + 1,
123         VXGE_HW_BADCFG_FIFO_BLOCKS                = VXGE_HW_BASE_BADCFG + 2,
124         VXGE_HW_BADCFG_VPATH_MTU                  = VXGE_HW_BASE_BADCFG + 3,
125         VXGE_HW_BADCFG_VPATH_RPA_STRIP_VLAN_TAG   = VXGE_HW_BASE_BADCFG + 4,
126         VXGE_HW_BADCFG_VPATH_MIN_BANDWIDTH        = VXGE_HW_BASE_BADCFG + 5,
127         VXGE_HW_BADCFG_VPATH_BANDWIDTH_LIMIT      = VXGE_HW_BASE_BADCFG + 6,
128         VXGE_HW_BADCFG_INTR_MODE                  = VXGE_HW_BASE_BADCFG + 7,
129         VXGE_HW_BADCFG_RTS_MAC_EN                 = VXGE_HW_BASE_BADCFG + 8,
130         VXGE_HW_BADCFG_VPATH_AGGR_ACK             = VXGE_HW_BASE_BADCFG + 9,
131         VXGE_HW_BADCFG_VPATH_PRIORITY             = VXGE_HW_BASE_BADCFG + 10,
132
133         VXGE_HW_EOF_TRACE_BUF                     = -1
134 };
135
136 /**
137  * enum enum vxge_hw_device_link_state - Link state enumeration.
138  * @VXGE_HW_LINK_NONE: Invalid link state.
139  * @VXGE_HW_LINK_DOWN: Link is down.
140  * @VXGE_HW_LINK_UP: Link is up.
141  *
142  */
143 enum vxge_hw_device_link_state {
144         VXGE_HW_LINK_NONE,
145         VXGE_HW_LINK_DOWN,
146         VXGE_HW_LINK_UP
147 };
148
149 /*forward declaration*/
150 struct vxge_vpath;
151 struct __vxge_hw_virtualpath;
152
153 /**
154  * struct vxge_hw_ring_rxd_1 - One buffer mode RxD for ring
155  *
156  * One buffer mode RxD for ring structure
157  */
158 struct vxge_hw_ring_rxd_1 {
159         u64 host_control;
160         u64 control_0;
161 #define VXGE_HW_RING_RXD_RTH_BUCKET_GET(ctrl0)          vxge_bVALn(ctrl0, 0, 7)
162
163 #define VXGE_HW_RING_RXD_LIST_OWN_ADAPTER               vxge_mBIT(7)
164
165 #define VXGE_HW_RING_RXD_FAST_PATH_ELIGIBLE_GET(ctrl0)  vxge_bVALn(ctrl0, 8, 1)
166
167 #define VXGE_HW_RING_RXD_L3_CKSUM_CORRECT_GET(ctrl0)    vxge_bVALn(ctrl0, 9, 1)
168
169 #define VXGE_HW_RING_RXD_L4_CKSUM_CORRECT_GET(ctrl0)    vxge_bVALn(ctrl0, 10, 1)
170
171 #define VXGE_HW_RING_RXD_T_CODE_GET(ctrl0)              vxge_bVALn(ctrl0, 12, 4)
172 #define VXGE_HW_RING_RXD_T_CODE(val)                    vxge_vBIT(val, 12, 4)
173
174 #define VXGE_HW_RING_RXD_T_CODE_UNUSED          VXGE_HW_RING_T_CODE_UNUSED
175
176 #define VXGE_HW_RING_RXD_SYN_GET(ctrl0)         vxge_bVALn(ctrl0, 16, 1)
177
178 #define VXGE_HW_RING_RXD_IS_ICMP_GET(ctrl0)             vxge_bVALn(ctrl0, 17, 1)
179
180 #define VXGE_HW_RING_RXD_RTH_SPDM_HIT_GET(ctrl0)        vxge_bVALn(ctrl0, 18, 1)
181
182 #define VXGE_HW_RING_RXD_RTH_IT_HIT_GET(ctrl0)          vxge_bVALn(ctrl0, 19, 1)
183
184 #define VXGE_HW_RING_RXD_RTH_HASH_TYPE_GET(ctrl0)       vxge_bVALn(ctrl0, 20, 4)
185
186 #define VXGE_HW_RING_RXD_IS_VLAN_GET(ctrl0)             vxge_bVALn(ctrl0, 24, 1)
187
188 #define VXGE_HW_RING_RXD_ETHER_ENCAP_GET(ctrl0)         vxge_bVALn(ctrl0, 25, 2)
189
190 #define VXGE_HW_RING_RXD_FRAME_PROTO_GET(ctrl0)         vxge_bVALn(ctrl0, 27, 5)
191
192 #define VXGE_HW_RING_RXD_L3_CKSUM_GET(ctrl0)    vxge_bVALn(ctrl0, 32, 16)
193
194 #define VXGE_HW_RING_RXD_L4_CKSUM_GET(ctrl0)    vxge_bVALn(ctrl0, 48, 16)
195
196         u64 control_1;
197
198 #define VXGE_HW_RING_RXD_1_BUFFER0_SIZE_GET(ctrl1)      vxge_bVALn(ctrl1, 2, 14)
199 #define VXGE_HW_RING_RXD_1_BUFFER0_SIZE(val) vxge_vBIT(val, 2, 14)
200 #define VXGE_HW_RING_RXD_1_BUFFER0_SIZE_MASK            vxge_vBIT(0x3FFF, 2, 14)
201
202 #define VXGE_HW_RING_RXD_1_RTH_HASH_VAL_GET(ctrl1)    vxge_bVALn(ctrl1, 16, 32)
203
204 #define VXGE_HW_RING_RXD_VLAN_TAG_GET(ctrl1)    vxge_bVALn(ctrl1, 48, 16)
205
206         u64 buffer0_ptr;
207 };
208
209 /**
210  * struct vxge_hw_fifo_txd - Transmit Descriptor
211  *
212  * Transmit descriptor (TxD).Fifo descriptor contains configured number
213  * (list) of TxDs. * For more details please refer to Titan User Guide,
214  * Section 5.4.2 "Transmit Descriptor (TxD) Format".
215  */
216 struct vxge_hw_fifo_txd {
217         u64 control_0;
218 #define VXGE_HW_FIFO_TXD_LIST_OWN_ADAPTER               vxge_mBIT(7)
219
220 #define VXGE_HW_FIFO_TXD_T_CODE_GET(ctrl0)              vxge_bVALn(ctrl0, 12, 4)
221 #define VXGE_HW_FIFO_TXD_T_CODE(val)                    vxge_vBIT(val, 12, 4)
222 #define VXGE_HW_FIFO_TXD_T_CODE_UNUSED          VXGE_HW_FIFO_T_CODE_UNUSED
223
224 #define VXGE_HW_FIFO_TXD_GATHER_CODE(val)               vxge_vBIT(val, 22, 2)
225 #define VXGE_HW_FIFO_TXD_GATHER_CODE_FIRST      VXGE_HW_FIFO_GATHER_CODE_FIRST
226 #define VXGE_HW_FIFO_TXD_GATHER_CODE_LAST       VXGE_HW_FIFO_GATHER_CODE_LAST
227
228 #define VXGE_HW_FIFO_TXD_LSO_EN                         vxge_mBIT(30)
229 #define VXGE_HW_FIFO_TXD_LSO_MSS(val)                   vxge_vBIT(val, 34, 14)
230 #define VXGE_HW_FIFO_TXD_BUFFER_SIZE(val)               vxge_vBIT(val, 48, 16)
231
232         u64 control_1;
233 #define VXGE_HW_FIFO_TXD_TX_CKO_IPV4_EN                 vxge_mBIT(5)
234 #define VXGE_HW_FIFO_TXD_TX_CKO_TCP_EN                  vxge_mBIT(6)
235 #define VXGE_HW_FIFO_TXD_TX_CKO_UDP_EN                  vxge_mBIT(7)
236 #define VXGE_HW_FIFO_TXD_VLAN_ENABLE                    vxge_mBIT(15)
237
238 #define VXGE_HW_FIFO_TXD_VLAN_TAG(val)                  vxge_vBIT(val, 16, 16)
239 #define VXGE_HW_FIFO_TXD_NO_BW_LIMIT                    vxge_mBIT(43)
240
241 #define VXGE_HW_FIFO_TXD_INT_NUMBER(val)                vxge_vBIT(val, 34, 6)
242
243 #define VXGE_HW_FIFO_TXD_INT_TYPE_PER_LIST              vxge_mBIT(46)
244 #define VXGE_HW_FIFO_TXD_INT_TYPE_UTILZ                 vxge_mBIT(47)
245
246         u64 buffer_pointer;
247
248         u64 host_control;
249 };
250
251 /**
252  * struct vxge_hw_device_date - Date Format
253  * @day: Day
254  * @month: Month
255  * @year: Year
256  * @date: Date in string format
257  *
258  * Structure for returning date
259  */
260
261 #define VXGE_HW_FW_STRLEN       32
262 struct vxge_hw_device_date {
263         u32     day;
264         u32     month;
265         u32     year;
266         char    date[VXGE_HW_FW_STRLEN];
267 };
268
269 struct vxge_hw_device_version {
270         u32     major;
271         u32     minor;
272         u32     build;
273         char    version[VXGE_HW_FW_STRLEN];
274 };
275
276 u64 __vxge_hw_vpath_pci_func_mode_get(
277         u32 vp_id,
278         struct vxge_hw_vpath_reg __iomem *vpath_reg);
279
280 /*
281  * struct __vxge_hw_non_offload_db_wrapper - Non-offload Doorbell Wrapper
282  * @control_0: Bits 0 to 7 - Doorbell type.
283  *             Bits 8 to 31 - Reserved.
284  *             Bits 32 to 39 - The highest TxD in this TxDL.
285  *             Bits 40 to 47 - Reserved.
286  *             Bits 48 to 55 - Reserved.
287  *             Bits 56 to 63 - No snoop flags.
288  * @txdl_ptr:  The starting location of the TxDL in host memory.
289  *
290  * Created by the host and written to the adapter via PIO to a Kernel Doorbell
291  * FIFO. All non-offload doorbell wrapper fields must be written by the host as
292  * part of a doorbell write. Consumed by the adapter but is not written by the
293  * adapter.
294  */
295 struct __vxge_hw_non_offload_db_wrapper {
296         u64             control_0;
297 #define VXGE_HW_NODBW_GET_TYPE(ctrl0)                   vxge_bVALn(ctrl0, 0, 8)
298 #define VXGE_HW_NODBW_TYPE(val) vxge_vBIT(val, 0, 8)
299 #define VXGE_HW_NODBW_TYPE_NODBW                                0
300
301 #define VXGE_HW_NODBW_GET_LAST_TXD_NUMBER(ctrl0)        vxge_bVALn(ctrl0, 32, 8)
302 #define VXGE_HW_NODBW_LAST_TXD_NUMBER(val) vxge_vBIT(val, 32, 8)
303
304 #define VXGE_HW_NODBW_GET_NO_SNOOP(ctrl0)               vxge_bVALn(ctrl0, 56, 8)
305 #define VXGE_HW_NODBW_LIST_NO_SNOOP(val) vxge_vBIT(val, 56, 8)
306 #define VXGE_HW_NODBW_LIST_NO_SNOOP_TXD_READ_TXD0_WRITE         0x2
307 #define VXGE_HW_NODBW_LIST_NO_SNOOP_TX_FRAME_DATA_READ          0x1
308
309         u64             txdl_ptr;
310 };
311
312 /*
313  * struct __vxge_hw_fifo - Fifo.
314  * @vp_id: Virtual path id
315  * @tx_intr_num: Interrupt Number associated with the TX
316  * @txdl: Start pointer of the txdl list of this fifo.
317  *        iPXE does not support tx fragmentation, so we need
318  *        only one txd in a list
319  * @depth: total number of lists in this fifo
320  * @hw_offset: txd index from where adapter owns the txd list
321  * @sw_offset: txd index from where driver owns the txd list
322  *
323  * @stats: Statistics of this fifo
324  *
325  */
326 struct __vxge_hw_fifo {
327         struct vxge_hw_vpath_reg                *vp_reg;
328         struct __vxge_hw_non_offload_db_wrapper *nofl_db;
329         u32                                     vp_id;
330         u32                                     tx_intr_num;
331
332         struct vxge_hw_fifo_txd         *txdl;
333 #define VXGE_HW_FIFO_TXD_DEPTH 128
334         u16                             depth;
335         u16                             hw_offset;
336         u16                             sw_offset;
337
338         struct __vxge_hw_virtualpath    *vpathh;
339 };
340
341 /* Structure that represents the Rx descriptor block which contains
342  * 128 Rx descriptors.
343  */
344 struct __vxge_hw_ring_block {
345 #define VXGE_HW_MAX_RXDS_PER_BLOCK_1            127
346         struct vxge_hw_ring_rxd_1 rxd[VXGE_HW_MAX_RXDS_PER_BLOCK_1];
347
348         u64 reserved_0;
349 #define END_OF_BLOCK    0xFEFFFFFFFFFFFFFFULL
350         /* 0xFEFFFFFFFFFFFFFF to mark last Rxd in this blk */
351         u64 reserved_1;
352         /* Logical ptr to next */
353         u64 reserved_2_pNext_RxD_block;
354         /* Buff0_ptr.In a 32 bit arch the upper 32 bits should be 0 */
355         u64 pNext_RxD_Blk_physical;
356 };
357
358 /*
359  * struct __vxge_hw_ring - Ring channel.
360  *
361  * Note: The structure is cache line aligned to better utilize
362  *       CPU cache performance.
363  */
364 struct __vxge_hw_ring {
365         struct vxge_hw_vpath_reg                *vp_reg;
366         struct vxge_hw_common_reg               *common_reg;
367         u32                                     vp_id;
368 #define VXGE_HW_RING_RXD_QWORDS_MODE_1  4
369         u32                                     doorbell_cnt;
370         u32                                     total_db_cnt;
371 #define VXGE_HW_RING_RXD_QWORD_LIMIT    16
372         u64                                     rxd_qword_limit;
373
374         struct __vxge_hw_ring_block             *rxdl;
375 #define VXGE_HW_RING_BUF_PER_BLOCK      9
376         u16                                     buf_per_block;
377         u16                                     rxd_offset;
378
379 #define VXGE_HW_RING_RX_POLL_WEIGHT     8
380         u16                                     rx_poll_weight;
381
382         struct io_buffer *iobuf[VXGE_HW_RING_BUF_PER_BLOCK + 1];
383         struct __vxge_hw_virtualpath *vpathh;
384 };
385
386 /*
387  * struct __vxge_hw_virtualpath - Virtual Path
388  *
389  * Virtual path structure to encapsulate the data related to a virtual path.
390  * Virtual paths are allocated by the HW upon getting configuration from the
391  * driver and inserted into the list of virtual paths.
392  */
393 struct __vxge_hw_virtualpath {
394         u32                             vp_id;
395
396         u32                             vp_open;
397 #define VXGE_HW_VP_NOT_OPEN     0
398 #define VXGE_HW_VP_OPEN         1
399
400         struct __vxge_hw_device         *hldev;
401         struct vxge_hw_vpath_reg        *vp_reg;
402         struct vxge_hw_vpmgmt_reg       *vpmgmt_reg;
403         struct __vxge_hw_non_offload_db_wrapper *nofl_db;
404
405         u32                             max_mtu;
406         u32                             vsport_number;
407         u32                             max_kdfc_db;
408         u32                             max_nofl_db;
409
410         struct __vxge_hw_ring ringh;
411         struct __vxge_hw_fifo fifoh;
412 };
413 #define VXGE_HW_INFO_LEN        64
414 #define VXGE_HW_PMD_INFO_LEN    16
415 #define VXGE_MAX_PRINT_BUF_SIZE 128
416 /**
417  * struct vxge_hw_device_hw_info - Device information
418  * @host_type: Host Type
419  * @func_id: Function Id
420  * @vpath_mask: vpath bit mask
421  * @fw_version: Firmware version
422  * @fw_date: Firmware Date
423  * @flash_version: Firmware version
424  * @flash_date: Firmware Date
425  * @mac_addrs: Mac addresses for each vpath
426  * @mac_addr_masks: Mac address masks for each vpath
427  *
428  * Returns the vpath mask that has the bits set for each vpath allocated
429  * for the driver and the first mac address for each vpath
430  */
431 struct vxge_hw_device_hw_info {
432         u32             host_type;
433 #define VXGE_HW_NO_MR_NO_SR_NORMAL_FUNCTION                     0
434 #define VXGE_HW_MR_NO_SR_VH0_BASE_FUNCTION                      1
435 #define VXGE_HW_NO_MR_SR_VH0_FUNCTION0                          2
436 #define VXGE_HW_NO_MR_SR_VH0_VIRTUAL_FUNCTION                   3
437 #define VXGE_HW_MR_SR_VH0_INVALID_CONFIG                        4
438 #define VXGE_HW_SR_VH_FUNCTION0                                 5
439 #define VXGE_HW_SR_VH_VIRTUAL_FUNCTION                          6
440 #define VXGE_HW_VH_NORMAL_FUNCTION                              7
441         u64             function_mode;
442 #define VXGE_HW_FUNCTION_MODE_MIN                               0
443 #define VXGE_HW_FUNCTION_MODE_MAX                               11
444
445 #define VXGE_HW_FUNCTION_MODE_SINGLE_FUNCTION                   0
446 #define VXGE_HW_FUNCTION_MODE_MULTI_FUNCTION                    1
447 #define VXGE_HW_FUNCTION_MODE_SRIOV                             2
448 #define VXGE_HW_FUNCTION_MODE_MRIOV                             3
449 #define VXGE_HW_FUNCTION_MODE_MRIOV_8                           4
450 #define VXGE_HW_FUNCTION_MODE_MULTI_FUNCTION_17                 5
451 #define VXGE_HW_FUNCTION_MODE_SRIOV_8                           6
452 #define VXGE_HW_FUNCTION_MODE_SRIOV_4                           7
453 #define VXGE_HW_FUNCTION_MODE_MULTI_FUNCTION_2                  8
454 #define VXGE_HW_FUNCTION_MODE_MULTI_FUNCTION_4                  9
455 #define VXGE_HW_FUNCTION_MODE_MRIOV_4                           10
456 #define VXGE_HW_FUNCTION_MODE_MULTI_FUNCTION_DIRECT_IO          11
457
458         u32             func_id;
459         u64             vpath_mask;
460         struct vxge_hw_device_version fw_version;
461         struct vxge_hw_device_date    fw_date;
462         struct vxge_hw_device_version flash_version;
463         struct vxge_hw_device_date    flash_date;
464         u8              serial_number[VXGE_HW_INFO_LEN];
465         u8              part_number[VXGE_HW_INFO_LEN];
466         u8              product_desc[VXGE_HW_INFO_LEN];
467         u8 (mac_addrs)[VXGE_HW_MAX_VIRTUAL_PATHS][ETH_ALEN];
468         u8 (mac_addr_masks)[VXGE_HW_MAX_VIRTUAL_PATHS][ETH_ALEN];
469 };
470
471 /**
472  * struct __vxge_hw_device  - Hal device object
473  * @magic: Magic Number
474  * @bar0: BAR0 virtual address.
475  * @pdev: Physical device handle
476  * @config: Confguration passed by the LL driver at initialization
477  * @link_state: Link state
478  *
479  * HW device object. Represents Titan adapter
480  */
481 struct __vxge_hw_device {
482         u32                             magic;
483 #define VXGE_HW_DEVICE_MAGIC            0x12345678
484 #define VXGE_HW_DEVICE_DEAD             0xDEADDEAD
485         void __iomem                    *bar0;
486         struct pci_device               *pdev;
487         struct net_device               *ndev;
488         struct vxgedev                  *vdev;
489
490         enum vxge_hw_device_link_state  link_state;
491
492         u32                             host_type;
493         u32                             func_id;
494         u8                              titan1;
495         u32                             access_rights;
496 #define VXGE_HW_DEVICE_ACCESS_RIGHT_VPATH      0x1
497 #define VXGE_HW_DEVICE_ACCESS_RIGHT_SRPCIM     0x2
498 #define VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM     0x4
499         struct vxge_hw_legacy_reg       *legacy_reg;
500         struct vxge_hw_toc_reg          *toc_reg;
501         struct vxge_hw_common_reg       *common_reg;
502         struct vxge_hw_mrpcim_reg       *mrpcim_reg;
503         struct vxge_hw_srpcim_reg       *srpcim_reg \
504                                         [VXGE_HW_TITAN_SRPCIM_REG_SPACES];
505         struct vxge_hw_vpmgmt_reg       *vpmgmt_reg \
506                                         [VXGE_HW_TITAN_VPMGMT_REG_SPACES];
507         struct vxge_hw_vpath_reg        *vpath_reg \
508                                         [VXGE_HW_TITAN_VPATH_REG_SPACES];
509         u8                              *kdfc;
510         u8                              *usdc;
511         struct __vxge_hw_virtualpath    virtual_path;
512         u64                             vpath_assignments;
513         u64                             vpaths_deployed;
514         u32                             first_vp_id;
515         u64                             tim_int_mask0[4];
516         u32                             tim_int_mask1[4];
517
518         struct vxge_hw_device_hw_info   hw_info;
519 };
520
521 #define VXGE_HW_DEVICE_LINK_STATE_SET(hldev, ls) (hldev->link_state = ls)
522
523 #define VXGE_HW_DEVICE_TIM_INT_MASK_SET(m0, m1, i) {    \
524         if (i < 16) {                                   \
525                 m0[0] |= vxge_vBIT(0x8, (i*4), 4);      \
526                 m0[1] |= vxge_vBIT(0x4, (i*4), 4);      \
527         }                                       \
528         else {                                  \
529                 m1[0] = 0x80000000;             \
530                 m1[1] = 0x40000000;             \
531         }                                       \
532 }
533
534 #define VXGE_HW_DEVICE_TIM_INT_MASK_RESET(m0, m1, i) {  \
535         if (i < 16) {                                   \
536                 m0[0] &= ~vxge_vBIT(0x8, (i*4), 4);     \
537                 m0[1] &= ~vxge_vBIT(0x4, (i*4), 4);     \
538         }                                               \
539         else {                                          \
540                 m1[0] = 0;                              \
541                 m1[1] = 0;                              \
542         }                                               \
543 }
544
545 /**
546  * enum enum vxge_hw_txdl_state - Descriptor (TXDL) state.
547  * @VXGE_HW_TXDL_STATE_NONE: Invalid state.
548  * @VXGE_HW_TXDL_STATE_AVAIL: Descriptor is available for reservation.
549  * @VXGE_HW_TXDL_STATE_POSTED: Descriptor is posted for processing by the
550  * device.
551  * @VXGE_HW_TXDL_STATE_FREED: Descriptor is free and can be reused for
552  * filling-in and posting later.
553  *
554  * Titan/HW descriptor states.
555  *
556  */
557 enum vxge_hw_txdl_state {
558         VXGE_HW_TXDL_STATE_NONE = 0,
559         VXGE_HW_TXDL_STATE_AVAIL        = 1,
560         VXGE_HW_TXDL_STATE_POSTED       = 2,
561         VXGE_HW_TXDL_STATE_FREED        = 3
562 };
563
564
565 /* fifo and ring circular buffer offset tracking apis */
566 static inline void __vxge_hw_desc_offset_up(u16 upper_limit,
567                         u16 *offset)
568 {
569         if (++(*offset) >= upper_limit)
570                 *offset = 0;
571 }
572
573 /* rxd offset handling apis */
574 static inline void vxge_hw_ring_rxd_offset_up(u16 *offset)
575 {
576         __vxge_hw_desc_offset_up(VXGE_HW_MAX_RXDS_PER_BLOCK_1,
577                         offset);
578 }
579 /* txd offset handling apis */
580 static inline void vxge_hw_fifo_txd_offset_up(u16 *offset)
581 {
582         __vxge_hw_desc_offset_up(VXGE_HW_FIFO_TXD_DEPTH, offset);
583 }
584
585 /**
586  * vxge_hw_ring_rxd_1b_set - Prepare 1-buffer-mode descriptor.
587  * @rxdh: Descriptor handle.
588  * @dma_pointer: DMA address of a single receive buffer this descriptor
589  * should carry. Note that by the time vxge_hw_ring_rxd_1b_set is called,
590  * the receive buffer should be already mapped to the device
591  * @size: Size of the receive @dma_pointer buffer.
592  *
593  * Prepare 1-buffer-mode Rx     descriptor for posting
594  * (via vxge_hw_ring_rxd_post()).
595  *
596  * This inline helper-function does not return any parameters and always
597  * succeeds.
598  *
599  */
600 static inline
601 void vxge_hw_ring_rxd_1b_set(struct vxge_hw_ring_rxd_1 *rxdp,
602         struct io_buffer *iob, u32 size)
603 {
604         rxdp->host_control = (intptr_t)(iob);
605         rxdp->buffer0_ptr = virt_to_bus(iob->data);
606         rxdp->control_1 &= ~VXGE_HW_RING_RXD_1_BUFFER0_SIZE_MASK;
607         rxdp->control_1 |= VXGE_HW_RING_RXD_1_BUFFER0_SIZE(size);
608 }
609
610 enum vxge_hw_status vxge_hw_device_hw_info_get(
611         struct pci_device *pdev,
612         void __iomem *bar0,
613         struct vxge_hw_device_hw_info *hw_info);
614
615 enum vxge_hw_status
616 __vxge_hw_vpath_fw_ver_get(
617         struct vxge_hw_vpath_reg __iomem *vpath_reg,
618         struct vxge_hw_device_hw_info *hw_info);
619
620 enum vxge_hw_status
621 __vxge_hw_vpath_card_info_get(
622         struct vxge_hw_vpath_reg __iomem *vpath_reg,
623         struct vxge_hw_device_hw_info *hw_info);
624
625 /**
626  * vxge_hw_device_link_state_get - Get link state.
627  * @devh: HW device handle.
628  *
629  * Get link state.
630  * Returns: link state.
631  */
632 static inline
633 enum vxge_hw_device_link_state vxge_hw_device_link_state_get(
634         struct __vxge_hw_device *devh)
635 {
636         return devh->link_state;
637 }
638
639 void vxge_hw_device_terminate(struct __vxge_hw_device *devh);
640
641 enum vxge_hw_status vxge_hw_device_initialize(
642         struct __vxge_hw_device **devh,
643         void *bar0,
644         struct pci_device *pdev,
645         u8 titan1);
646
647 enum vxge_hw_status
648 vxge_hw_vpath_open(struct __vxge_hw_device *hldev, struct vxge_vpath *vpath);
649
650 enum vxge_hw_status
651 __vxge_hw_device_vpath_reset_in_prog_check(u64 __iomem *vpath_rst_in_prog);
652
653 enum vxge_hw_status vxge_hw_vpath_close(struct __vxge_hw_virtualpath *vpath);
654
655 enum vxge_hw_status vxge_hw_vpath_reset(struct __vxge_hw_virtualpath *vpath);
656
657 enum vxge_hw_status
658 vxge_hw_vpath_recover_from_reset(struct __vxge_hw_virtualpath *vpath);
659
660 void
661 vxge_hw_vpath_enable(struct __vxge_hw_virtualpath *vpath);
662
663 enum vxge_hw_status
664 vxge_hw_vpath_mtu_set(struct __vxge_hw_virtualpath *vpath, u32 new_mtu);
665
666 void
667 vxge_hw_vpath_rx_doorbell_init(struct __vxge_hw_virtualpath *vpath);
668
669 void
670 __vxge_hw_device_pci_e_init(struct __vxge_hw_device *hldev);
671
672 enum vxge_hw_status
673 __vxge_hw_legacy_swapper_set(struct vxge_hw_legacy_reg __iomem *legacy_reg);
674
675 enum vxge_hw_status
676 __vxge_hw_vpath_swapper_set(struct vxge_hw_vpath_reg __iomem *vpath_reg);
677
678 enum vxge_hw_status
679 __vxge_hw_kdfc_swapper_set(struct vxge_hw_legacy_reg __iomem *legacy_reg,
680         struct vxge_hw_vpath_reg __iomem *vpath_reg);
681
682 enum vxge_hw_status
683 __vxge_hw_device_register_poll(
684         void __iomem    *reg,
685         u64 mask, u32 max_millis);
686
687 #ifndef readq
688 static inline u64 readq(void __iomem *addr)
689 {
690         u64 ret = 0;
691         ret = readl(addr + 4);
692         ret <<= 32;
693         ret |= readl(addr);
694
695         return ret;
696 }
697 #endif
698
699 #ifndef writeq
700 static inline void writeq(u64 val, void __iomem *addr)
701 {
702         writel((u32) (val), addr);
703         writel((u32) (val >> 32), (addr + 4));
704 }
705 #endif
706
707 static inline void __vxge_hw_pio_mem_write32_upper(u32 val, void __iomem *addr)
708 {
709         writel(val, addr + 4);
710 }
711
712 static inline void __vxge_hw_pio_mem_write32_lower(u32 val, void __iomem *addr)
713 {
714         writel(val, addr);
715 }
716
717 static inline enum vxge_hw_status
718 __vxge_hw_pio_mem_write64(u64 val64, void __iomem *addr,
719                           u64 mask, u32 max_millis)
720 {
721         enum vxge_hw_status status = VXGE_HW_OK;
722
723         __vxge_hw_pio_mem_write32_lower((u32)vxge_bVALn(val64, 32, 32), addr);
724         wmb();
725         __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(val64, 0, 32), addr);
726         wmb();
727
728         status = __vxge_hw_device_register_poll(addr, mask, max_millis);
729         return status;
730 }
731
732 void
733 __vxge_hw_device_host_info_get(struct __vxge_hw_device *hldev);
734
735 enum vxge_hw_status
736 __vxge_hw_device_initialize(struct __vxge_hw_device *hldev);
737
738 enum vxge_hw_status
739 __vxge_hw_vpath_pci_read(
740         struct __vxge_hw_virtualpath    *vpath,
741         u32                     phy_func_0,
742         u32                     offset,
743         u32                     *val);
744
745 enum vxge_hw_status
746 __vxge_hw_vpath_addr_get(
747         struct vxge_hw_vpath_reg __iomem *vpath_reg,
748         u8 (macaddr)[ETH_ALEN],
749         u8 (macaddr_mask)[ETH_ALEN]);
750
751 u32
752 __vxge_hw_vpath_func_id_get(struct vxge_hw_vpmgmt_reg __iomem *vpmgmt_reg);
753
754 enum vxge_hw_status
755 __vxge_hw_vpath_reset_check(struct __vxge_hw_virtualpath *vpath);
756
757 enum vxge_hw_status
758 vxge_hw_vpath_strip_fcs_check(struct __vxge_hw_device *hldev, u64 vpath_mask);
759
760 /**
761  * vxge_debug
762  * @mask: mask for the debug
763  * @fmt: printf like format string
764  */
765 static const u16 debug_filter = VXGE_ERR;
766 #define vxge_debug(mask, fmt...)        do {    \
767                 if (debug_filter & mask)        \
768                         DBG(fmt);               \
769         } while (0);
770
771 #define vxge_trace()    vxge_debug(VXGE_TRACE, "%s:%d\n", __func__, __LINE__);
772
773 enum vxge_hw_status
774 vxge_hw_get_func_mode(struct __vxge_hw_device *hldev, u32 *func_mode);
775
776 enum vxge_hw_status
777 vxge_hw_set_fw_api(struct __vxge_hw_device *hldev,
778                 u64 vp_id, u32 action,
779                 u32 offset, u64 data0, u64 data1);
780 void
781 vxge_hw_vpath_set_zero_rx_frm_len(struct __vxge_hw_device *hldev);
782
783 #endif