Add qemu 2.4.0
[kvmfornfv.git] / qemu / roms / ipxe / src / drivers / net / vxge / vxge_traffic.c
1 /*
2  * vxge-traffic.c: iPXE driver for Neterion Inc's X3100 Series 10GbE
3  *              PCIe I/O Virtualized Server Adapter.
4  *
5  * Copyright(c) 2002-2010 Neterion Inc.
6  *
7  * This software may be used and distributed according to the terms of
8  * the GNU General Public License (GPL), incorporated herein by
9  * reference.  Drivers based on or derived from this code fall under
10  * the GPL and must retain the authorship, copyright and license
11  * notice.
12  *
13  */
14
15 FILE_LICENCE(GPL2_ONLY);
16
17 #include <ipxe/netdevice.h>
18 #include <errno.h>
19
20 #include "vxge_traffic.h"
21 #include "vxge_config.h"
22 #include "vxge_main.h"
23
24 /*
25  * vxge_hw_vpath_intr_enable - Enable vpath interrupts.
26  * @vpath: Virtual Path handle.
27  *
28  * Enable vpath interrupts. The function is to be executed the last in
29  * vpath initialization sequence.
30  *
31  * See also: vxge_hw_vpath_intr_disable()
32  */
33 enum vxge_hw_status
34 vxge_hw_vpath_intr_enable(struct __vxge_hw_virtualpath *vpath)
35 {
36         struct vxge_hw_vpath_reg *vp_reg;
37         enum vxge_hw_status status = VXGE_HW_OK;
38
39         if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) {
40                 status = VXGE_HW_ERR_VPATH_NOT_OPEN;
41         goto exit;
42         }
43
44         vp_reg = vpath->vp_reg;
45
46         writeq(VXGE_HW_INTR_MASK_ALL, &vp_reg->kdfcctl_errors_reg);
47
48         __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
49                                 &vp_reg->general_errors_reg);
50
51         __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
52                                 &vp_reg->pci_config_errors_reg);
53
54         __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
55                                 &vp_reg->mrpcim_to_vpath_alarm_reg);
56
57         __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
58                                 &vp_reg->srpcim_to_vpath_alarm_reg);
59
60         __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
61                                 &vp_reg->vpath_ppif_int_status);
62
63         __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
64                                 &vp_reg->srpcim_msg_to_vpath_reg);
65
66         __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
67                                 &vp_reg->vpath_pcipif_int_status);
68
69         __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
70                                 &vp_reg->prc_alarm_reg);
71
72         __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
73                                 &vp_reg->wrdma_alarm_status);
74
75         __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
76                                 &vp_reg->asic_ntwk_vp_err_reg);
77
78         __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
79                                 &vp_reg->xgmac_vp_int_status);
80
81         readq(&vp_reg->vpath_general_int_status);
82
83         /* Mask unwanted interrupts */
84         __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
85                                 &vp_reg->vpath_pcipif_int_mask);
86
87         __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
88                                 &vp_reg->srpcim_msg_to_vpath_mask);
89
90         __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
91                                 &vp_reg->srpcim_to_vpath_alarm_mask);
92
93         __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
94                                 &vp_reg->mrpcim_to_vpath_alarm_mask);
95
96         __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
97                                 &vp_reg->pci_config_errors_mask);
98
99         /* Unmask the individual interrupts */
100         writeq((u32)vxge_bVALn((VXGE_HW_GENERAL_ERRORS_REG_DBLGEN_FIFO1_OVRFLOW|
101                 VXGE_HW_GENERAL_ERRORS_REG_DBLGEN_FIFO2_OVRFLOW|
102                 VXGE_HW_GENERAL_ERRORS_REG_STATSB_DROP_TIMEOUT_REQ|
103                 VXGE_HW_GENERAL_ERRORS_REG_STATSB_PIF_CHAIN_ERR), 0, 32),
104                 &vp_reg->general_errors_mask);
105
106         __vxge_hw_pio_mem_write32_upper(
107                 (u32)vxge_bVALn((VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO1_OVRWR|
108                 VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO2_OVRWR|
109                 VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO1_POISON|
110                 VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO2_POISON|
111                 VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO1_DMA_ERR|
112                 VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO2_DMA_ERR), 0, 32),
113                 &vp_reg->kdfcctl_errors_mask);
114
115         __vxge_hw_pio_mem_write32_upper(0, &vp_reg->vpath_ppif_int_mask);
116
117         __vxge_hw_pio_mem_write32_upper(
118                 (u32)vxge_bVALn(VXGE_HW_PRC_ALARM_REG_PRC_RING_BUMP, 0, 32),
119                 &vp_reg->prc_alarm_mask);
120
121         __vxge_hw_pio_mem_write32_upper(0, &vp_reg->wrdma_alarm_mask);
122         __vxge_hw_pio_mem_write32_upper(0, &vp_reg->xgmac_vp_int_mask);
123
124         if (vpath->hldev->first_vp_id != vpath->vp_id)
125                 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
126                                 &vp_reg->asic_ntwk_vp_err_mask);
127         else
128                 __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn((
129                 VXGE_HW_ASIC_NTWK_VP_ERR_REG_XMACJ_NTWK_REAFFIRMED_FAULT|
130                         VXGE_HW_ASIC_NTWK_VP_ERR_REG_XMACJ_NTWK_REAFFIRMED_OK),
131                         0, 32), &vp_reg->asic_ntwk_vp_err_mask);
132
133         __vxge_hw_pio_mem_write32_upper(0, &vp_reg->vpath_general_int_mask);
134 exit:
135         return status;
136
137 }
138
139 /*
140  * vxge_hw_vpath_intr_disable - Disable vpath interrupts.
141  * @vpath: Virtual Path handle.
142  *
143  * Disable vpath interrupts. The function is to be executed the last in
144  * vpath initialization sequence.
145  *
146  * See also: vxge_hw_vpath_intr_enable()
147  */
148 enum vxge_hw_status
149 vxge_hw_vpath_intr_disable(struct __vxge_hw_virtualpath *vpath)
150 {
151         enum vxge_hw_status status = VXGE_HW_OK;
152         struct vxge_hw_vpath_reg __iomem *vp_reg;
153
154         if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) {
155                 status = VXGE_HW_ERR_VPATH_NOT_OPEN;
156                 goto exit;
157         }
158         vp_reg = vpath->vp_reg;
159
160         __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
161                         &vp_reg->vpath_general_int_mask);
162
163         writeq(VXGE_HW_INTR_MASK_ALL, &vp_reg->kdfcctl_errors_mask);
164
165         __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
166                         &vp_reg->general_errors_mask);
167
168         __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
169                         &vp_reg->pci_config_errors_mask);
170
171         __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
172                         &vp_reg->mrpcim_to_vpath_alarm_mask);
173
174         __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
175                         &vp_reg->srpcim_to_vpath_alarm_mask);
176
177         __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
178                         &vp_reg->vpath_ppif_int_mask);
179
180         __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
181                         &vp_reg->srpcim_msg_to_vpath_mask);
182
183         __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
184                         &vp_reg->vpath_pcipif_int_mask);
185
186         __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
187                         &vp_reg->wrdma_alarm_mask);
188
189         __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
190                         &vp_reg->prc_alarm_mask);
191
192         __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
193                         &vp_reg->xgmac_vp_int_mask);
194
195         __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
196                         &vp_reg->asic_ntwk_vp_err_mask);
197
198 exit:
199         return status;
200 }
201
202 /**
203  * vxge_hw_device_mask_all - Mask all device interrupts.
204  * @hldev: HW device handle.
205  *
206  * Mask all device interrupts.
207  *
208  * See also: vxge_hw_device_unmask_all()
209  */
210 void vxge_hw_device_mask_all(struct __vxge_hw_device *hldev)
211 {
212         u64 val64;
213
214         val64 = VXGE_HW_TITAN_MASK_ALL_INT_ALARM |
215                         VXGE_HW_TITAN_MASK_ALL_INT_TRAFFIC;
216
217         __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(val64, 0, 32),
218                         &hldev->common_reg->titan_mask_all_int);
219
220         return;
221 }
222
223 /**
224  * vxge_hw_device_unmask_all - Unmask all device interrupts.
225  * @hldev: HW device handle.
226  *
227  * Unmask all device interrupts.
228  *
229  * See also: vxge_hw_device_mask_all()
230  */
231 void vxge_hw_device_unmask_all(struct __vxge_hw_device *hldev)
232 {
233         u64 val64 = VXGE_HW_TITAN_MASK_ALL_INT_TRAFFIC;
234
235         __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(val64, 0, 32),
236                         &hldev->common_reg->titan_mask_all_int);
237
238         return;
239 }
240
241 /**
242  * vxge_hw_device_intr_enable - Enable interrupts.
243  * @hldev: HW device handle.
244  *
245  * Enable Titan interrupts. The function is to be executed the last in
246  * Titan initialization sequence.
247  *
248  * See also: vxge_hw_device_intr_disable()
249  */
250 void vxge_hw_device_intr_enable(struct __vxge_hw_device *hldev)
251 {
252         u64 val64;
253         u32 val32;
254
255         vxge_hw_device_mask_all(hldev);
256
257         vxge_hw_vpath_intr_enable(&hldev->virtual_path);
258
259         val64 = hldev->tim_int_mask0[VXGE_HW_VPATH_INTR_TX] |
260                         hldev->tim_int_mask0[VXGE_HW_VPATH_INTR_RX];
261
262         if (val64 != 0) {
263                 writeq(val64, &hldev->common_reg->tim_int_status0);
264
265                 writeq(~val64, &hldev->common_reg->tim_int_mask0);
266         }
267
268         val32 = hldev->tim_int_mask1[VXGE_HW_VPATH_INTR_TX] |
269                         hldev->tim_int_mask1[VXGE_HW_VPATH_INTR_RX];
270
271         if (val32 != 0) {
272                 __vxge_hw_pio_mem_write32_upper(val32,
273                                 &hldev->common_reg->tim_int_status1);
274
275                 __vxge_hw_pio_mem_write32_upper(~val32,
276                                 &hldev->common_reg->tim_int_mask1);
277         }
278
279         val64 = readq(&hldev->common_reg->titan_general_int_status);
280
281         /* We have not enabled the top level interrupt yet.
282          * This will be controlled from vxge_irq() entry api.
283          */
284         return;
285 }
286
287 /**
288  * vxge_hw_device_intr_disable - Disable Titan interrupts.
289  * @hldev: HW device handle.
290  *
291  * Disable Titan interrupts.
292  *
293  * See also: vxge_hw_device_intr_enable()
294  */
295 void vxge_hw_device_intr_disable(struct __vxge_hw_device *hldev)
296 {
297         vxge_hw_device_mask_all(hldev);
298
299         /* mask all the tim interrupts */
300         writeq(VXGE_HW_INTR_MASK_ALL, &hldev->common_reg->tim_int_mask0);
301         __vxge_hw_pio_mem_write32_upper(VXGE_HW_DEFAULT_32,
302                                 &hldev->common_reg->tim_int_mask1);
303
304         vxge_hw_vpath_intr_disable(&hldev->virtual_path);
305
306         return;
307 }
308
309 /**
310  * vxge_hw_ring_rxd_post - Post descriptor on the ring.
311  * @ring: Handle to the ring object used for receive
312  * @rxdh: Descriptor obtained via vxge_hw_ring_rxd_reserve().
313  *
314  * Post descriptor on the ring.
315  * Prior to posting the descriptor should be filled in accordance with
316  * Host/Titan interface specification for a given service (LL, etc.).
317  */
318 void vxge_hw_ring_rxd_post(struct __vxge_hw_ring *ring __unused,
319                                 struct vxge_hw_ring_rxd_1 *rxdp)
320 {
321         rxdp->control_0 = VXGE_HW_RING_RXD_LIST_OWN_ADAPTER;
322 }
323
324 /**
325  * __vxge_hw_non_offload_db_post - Post non offload doorbell
326  *
327  * @fifo: fifohandle
328  * @txdl_ptr: The starting location of the TxDL in host memory
329  * @num_txds: The highest TxD in this TxDL (0 to 255 means 1 to 256)
330  *
331  * This function posts a non-offload doorbell to doorbell FIFO
332  *
333  */
334 static void __vxge_hw_non_offload_db_post(struct __vxge_hw_fifo *fifo,
335         u64 txdl_ptr, u32 num_txds)
336 {
337         writeq(VXGE_HW_NODBW_TYPE(VXGE_HW_NODBW_TYPE_NODBW) |
338                 VXGE_HW_NODBW_LAST_TXD_NUMBER(num_txds),
339                 &fifo->nofl_db->control_0);
340
341         wmb();
342
343         writeq(txdl_ptr, &fifo->nofl_db->txdl_ptr);
344
345         wmb();
346 }
347
348 /**
349  * vxge_hw_fifo_free_txdl_get: fetch next available txd in the fifo
350  *
351  * @fifo: tx channel handle
352  */
353 struct vxge_hw_fifo_txd *
354         vxge_hw_fifo_free_txdl_get(struct __vxge_hw_fifo *fifo)
355 {
356         struct vxge_hw_fifo_txd *txdp;
357
358         txdp = fifo->txdl + fifo->sw_offset;
359         if (txdp->control_0 & VXGE_HW_FIFO_TXD_LIST_OWN_ADAPTER) {
360                 vxge_debug(VXGE_ERR, "%s:%d, error: txd(%d) owned by hw\n",
361                                 __func__, __LINE__, fifo->sw_offset);
362                 return NULL;
363         }
364
365         return txdp;
366 }
367 /**
368  * vxge_hw_fifo_txdl_buffer_set - Set transmit buffer pointer in the
369  * descriptor.
370  * @fifo: Handle to the fifo object used for non offload send
371  * @txdlh: Descriptor handle.
372  * @iob: data buffer.
373  */
374 void vxge_hw_fifo_txdl_buffer_set(struct __vxge_hw_fifo *fifo,
375                         struct vxge_hw_fifo_txd *txdp,
376                         struct io_buffer *iob)
377 {
378         txdp->control_0 = VXGE_HW_FIFO_TXD_GATHER_CODE(
379                         VXGE_HW_FIFO_GATHER_CODE_FIRST_LAST);
380         txdp->control_0 |= VXGE_HW_FIFO_TXD_BUFFER_SIZE(iob_len(iob));
381
382         txdp->control_1 = VXGE_HW_FIFO_TXD_INT_NUMBER(fifo->tx_intr_num);
383         txdp->control_1 |= VXGE_HW_FIFO_TXD_INT_TYPE_PER_LIST;
384
385         txdp->host_control = (intptr_t)iob;
386         txdp->buffer_pointer = virt_to_bus(iob->data);
387 }
388
389 /**
390  * vxge_hw_fifo_txdl_post - Post descriptor on the fifo channel.
391  * @fifo: Handle to the fifo object used for non offload send
392  * @txdp: Tx Descriptor
393  *
394  * Post descriptor on the 'fifo' type channel for transmission.
395  * Prior to posting the descriptor should be filled in accordance with
396  * Host/Titan interface specification for a given service (LL, etc.).
397  *
398  */
399 void vxge_hw_fifo_txdl_post(struct __vxge_hw_fifo *fifo,
400                         struct vxge_hw_fifo_txd *txdp)
401 {
402         txdp->control_0 |= VXGE_HW_FIFO_TXD_LIST_OWN_ADAPTER;
403
404         __vxge_hw_non_offload_db_post(fifo, (u64) virt_to_bus(txdp), 0);
405
406         vxge_hw_fifo_txd_offset_up(&fifo->sw_offset);
407 }
408
409 /*
410  * __vxge_hw_vpath_alarm_process - Process Alarms.
411  * @vpath: Virtual Path.
412  * @skip_alarms: Do not clear the alarms
413  *
414  * Process vpath alarms.
415  *
416  */
417 static enum vxge_hw_status __vxge_hw_vpath_alarm_process(
418                         struct __vxge_hw_virtualpath *vpath)
419 {
420         u64 val64;
421         u64 alarm_status;
422         enum vxge_hw_status status = VXGE_HW_OK;
423         struct __vxge_hw_device *hldev = NULL;
424         struct vxge_hw_vpath_reg *vp_reg;
425
426         hldev = vpath->hldev;
427         vp_reg = vpath->vp_reg;
428         alarm_status = readq(&vp_reg->vpath_general_int_status);
429
430         if (alarm_status == VXGE_HW_ALL_FOXES) {
431
432                 vxge_debug(VXGE_ERR, "%s: %s:%d, slot freeze error\n",
433                         hldev->ndev->name, __func__, __LINE__);
434                 status = VXGE_HW_ERR_SLOT_FREEZE;
435                 goto out;
436         }
437
438         if (alarm_status & ~(
439                 VXGE_HW_VPATH_GENERAL_INT_STATUS_PIC_INT |
440                 VXGE_HW_VPATH_GENERAL_INT_STATUS_PCI_INT |
441                 VXGE_HW_VPATH_GENERAL_INT_STATUS_WRDMA_INT |
442                 VXGE_HW_VPATH_GENERAL_INT_STATUS_XMAC_INT)) {
443
444                 vxge_debug(VXGE_ERR, "%s: %s:%d, Unknown vpath alarm\n",
445                         hldev->ndev->name, __func__, __LINE__);
446                 status = VXGE_HW_FAIL;
447                 goto out;
448         }
449
450         if (alarm_status & VXGE_HW_VPATH_GENERAL_INT_STATUS_XMAC_INT) {
451
452                 val64 = readq(&vp_reg->xgmac_vp_int_status);
453
454                 if (val64 &
455                 VXGE_HW_XGMAC_VP_INT_STATUS_ASIC_NTWK_VP_ERR_ASIC_NTWK_VP_INT) {
456
457                         val64 = readq(&vp_reg->asic_ntwk_vp_err_reg);
458
459                         if (((val64 &
460                                 VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT) &&
461                             (!(val64 &
462                                 VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK))) ||
463                             ((val64 &
464                                 VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT_OCCURR)
465                                 && (!(val64 &
466                                 VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK_OCCURR)
467                         ))) {
468                                 writeq(VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT,
469                                         &vp_reg->asic_ntwk_vp_err_mask);
470
471                                 netdev_link_down(hldev->ndev);
472                                 vxge_debug(VXGE_INTR, "%s: %s:%d link down\n",
473                                         hldev->ndev->name, __func__, __LINE__);
474                         }
475
476                         if (((val64 &
477                                 VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK) &&
478                             (!(val64 &
479                                 VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT))) ||
480                             ((val64 &
481                                 VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK_OCCURR)
482                                 && (!(val64 &
483                                 VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT_OCCURR)
484                         ))) {
485                                 writeq(VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK,
486                                         &vp_reg->asic_ntwk_vp_err_mask);
487
488                                 netdev_link_up(hldev->ndev);
489                                 vxge_debug(VXGE_INTR, "%s: %s:%d link up\n",
490                                         hldev->ndev->name, __func__, __LINE__);
491                         }
492
493                         writeq(VXGE_HW_INTR_MASK_ALL,
494                                 &vp_reg->asic_ntwk_vp_err_reg);
495                 }
496         } else {
497                 vxge_debug(VXGE_INFO, "%s: %s:%d unhandled alarm %llx\n",
498                                 hldev->ndev->name, __func__, __LINE__,
499                                 alarm_status);
500         }
501 out:
502         return status;
503 }
504
505 /**
506  * vxge_hw_device_clear_tx_rx - Acknowledge (that is, clear) the
507  * condition that has caused the Tx and RX interrupt.
508  * @hldev: HW device.
509  *
510  * Acknowledge (that is, clear) the condition that has caused
511  * the Tx and Rx interrupt.
512  * See also: vxge_hw_device_begin_irq(),
513  * vxge_hw_device_mask_tx_rx(), vxge_hw_device_unmask_tx_rx().
514  */
515 void vxge_hw_device_clear_tx_rx(struct __vxge_hw_device *hldev)
516 {
517
518         if ((hldev->tim_int_mask0[VXGE_HW_VPATH_INTR_TX] != 0) ||
519                         (hldev->tim_int_mask0[VXGE_HW_VPATH_INTR_RX] != 0)) {
520                 writeq((hldev->tim_int_mask0[VXGE_HW_VPATH_INTR_TX] |
521                         hldev->tim_int_mask0[VXGE_HW_VPATH_INTR_RX]),
522                         &hldev->common_reg->tim_int_status0);
523         }
524
525         if ((hldev->tim_int_mask1[VXGE_HW_VPATH_INTR_TX] != 0) ||
526                         (hldev->tim_int_mask1[VXGE_HW_VPATH_INTR_RX] != 0)) {
527                 __vxge_hw_pio_mem_write32_upper(
528                         (hldev->tim_int_mask1[VXGE_HW_VPATH_INTR_TX] |
529                         hldev->tim_int_mask1[VXGE_HW_VPATH_INTR_RX]),
530                         &hldev->common_reg->tim_int_status1);
531         }
532
533         return;
534 }
535
536
537 /**
538  * vxge_hw_device_begin_irq - Begin IRQ processing.
539  * @hldev: HW device handle.
540  *
541  * The function performs two actions, It first checks whether (shared IRQ) the
542  * interrupt was raised by the device. Next, it masks the device interrupts.
543  *
544  * Note:
545  * vxge_hw_device_begin_irq() does not flush MMIO writes through the
546  * bridge. Therefore, two back-to-back interrupts are potentially possible.
547  *
548  * Returns: 0, if the interrupt is not "ours" (note that in this case the
549  * device remain enabled).
550  * Otherwise, vxge_hw_device_begin_irq() returns 64bit general adapter
551  * status.
552  */
553 enum vxge_hw_status
554 vxge_hw_device_begin_irq(struct __vxge_hw_device *hldev)
555 {
556         u64 val64;
557         u64 adapter_status;
558         u64 vpath_mask;
559         enum vxge_hw_status ret = VXGE_HW_OK;
560
561         val64 = readq(&hldev->common_reg->titan_general_int_status);
562
563         if (!val64) {
564                 ret = VXGE_HW_ERR_WRONG_IRQ;
565                 goto exit;
566         }
567
568         if (val64 == VXGE_HW_ALL_FOXES) {
569
570                 adapter_status = readq(&hldev->common_reg->adapter_status);
571
572                 if (adapter_status == VXGE_HW_ALL_FOXES) {
573
574                         vxge_debug(VXGE_ERR, "%s: %s:%d critical error "
575                                 "occurred\n", hldev->ndev->name,
576                                 __func__, __LINE__);
577                         ret = VXGE_HW_ERR_SLOT_FREEZE;
578                         goto exit;
579                 }
580         }
581
582         vpath_mask = hldev->vpaths_deployed >>
583                                 (64 - VXGE_HW_MAX_VIRTUAL_PATHS);
584         if (val64 & VXGE_HW_TITAN_GENERAL_INT_STATUS_VPATH_TRAFFIC_INT(
585                                 vpath_mask))
586                 vxge_hw_device_clear_tx_rx(hldev);
587
588         if (val64 & VXGE_HW_TITAN_GENERAL_INT_STATUS_VPATH_ALARM_INT)
589                 ret = __vxge_hw_vpath_alarm_process(&hldev->virtual_path);
590
591 exit:
592         return ret;
593 }
594
595 /**
596  * vxge_hw_vpath_doorbell_rx - Indicates to hw the qwords of receive
597  * descriptors posted.
598  * @ring: Handle to the ring object used for receive
599  *
600  * The function writes the number of qwords of rxds posted during replishment.
601  * Since the function is called frequently, a flush is not required to post the
602  * write transaction. At the very least, the previous write will be flushed
603  * once the subsequent write is made.
604  *
605  * Returns: None.
606  */
607 void vxge_hw_vpath_doorbell_rx(struct __vxge_hw_ring *ring)
608 {
609         u32 rxds_qw_per_block = VXGE_HW_MAX_RXDS_PER_BLOCK_1 *
610                 VXGE_HW_RING_RXD_QWORDS_MODE_1;
611
612         ring->doorbell_cnt += VXGE_HW_RING_RXD_QWORDS_MODE_1;
613
614         ring->total_db_cnt += VXGE_HW_RING_RXD_QWORDS_MODE_1;
615
616         if (ring->total_db_cnt >= rxds_qw_per_block) {
617                 /* For each block add 4 more qwords */
618                 ring->doorbell_cnt += VXGE_HW_RING_RXD_QWORDS_MODE_1;
619
620                 /* Reset total count */
621                 ring->total_db_cnt -= rxds_qw_per_block;
622         }
623
624         if (ring->doorbell_cnt >= ring->rxd_qword_limit) {
625                 wmb();
626                 writeq(VXGE_HW_PRC_RXD_DOORBELL_NEW_QW_CNT(
627                         ring->doorbell_cnt),
628                         &ring->vp_reg->prc_rxd_doorbell);
629                 ring->doorbell_cnt = 0;
630         }
631 }
632
633 /**
634  * vxge_hw_vpath_poll_rx - Poll Rx Virtual Path for completed
635  * descriptors and process the same.
636  * @ring: Handle to the ring object used for receive
637  *
638  * The function polls the Rx for the completed  descriptors.
639  */
640 #define ETH_FCS_LEN     4
641 enum vxge_hw_status vxge_hw_vpath_poll_rx(struct __vxge_hw_ring *ring)
642 {
643         struct __vxge_hw_device *hldev;
644         enum vxge_hw_status status = VXGE_HW_OK;
645         struct vxge_hw_ring_rxd_1 *rxd;
646         unsigned int len;
647         enum vxge_hw_ring_tcode tcode;
648         struct io_buffer *rx_iob, *iobuf = NULL;
649         u16 poll_count = 0;
650
651         hldev = ring->vpathh->hldev;
652
653         do {
654                 rxd = &ring->rxdl->rxd[ring->rxd_offset];
655                 tcode = VXGE_HW_RING_RXD_T_CODE_GET(rxd->control_0);
656
657                 /* if tcode is VXGE_HW_RING_T_CODE_FRM_DROP, it is
658                  * possible the ownership bit still set to adapter
659                  */
660                 if ((rxd->control_0 & VXGE_HW_RING_RXD_LIST_OWN_ADAPTER)
661                         && (tcode == VXGE_HW_RING_T_CODE_OK)) {
662
663                         status = VXGE_HW_INF_NO_MORE_COMPLETED_DESCRIPTORS;
664                         goto err0;
665                 }
666
667                 vxge_debug(VXGE_INFO, "%s: rx frame received at offset %d\n",
668                         hldev->ndev->name, ring->rxd_offset);
669
670                 if (tcode != VXGE_HW_RING_T_CODE_OK) {
671                         netdev_rx_err(hldev->ndev, NULL, -EINVAL);
672                         vxge_debug(VXGE_ERR, "%s:%d, rx error tcode %d\n",
673                                 __func__, __LINE__, tcode);
674                         status = VXGE_HW_FAIL;
675                         goto err1;
676                 }
677
678                 iobuf = (struct io_buffer *)(intptr_t)rxd->host_control;
679
680                 len = VXGE_HW_RING_RXD_1_BUFFER0_SIZE_GET(rxd->control_1);
681                 len -= ETH_FCS_LEN;
682
683                 rx_iob = alloc_iob(len);
684                 if (!rx_iob) {
685                         netdev_rx_err(hldev->ndev, NULL, -ENOMEM);
686                         vxge_debug(VXGE_ERR, "%s:%d, alloc_iob error\n",
687                                 __func__, __LINE__);
688                         status = VXGE_HW_ERR_OUT_OF_MEMORY;
689                         goto err1;
690                 }
691
692                 memcpy(iob_put(rx_iob, len), iobuf->data, len);
693                 /* Add this packet to the receive queue. */
694                 netdev_rx(hldev->ndev, rx_iob);
695
696 err1:
697                 /* repost the rxd */
698                 rxd->control_0 = rxd->control_1 = 0;
699                 vxge_hw_ring_rxd_1b_set(rxd, iobuf,
700                                 VXGE_LL_MAX_FRAME_SIZE(hldev->vdev));
701                 vxge_hw_ring_rxd_post(ring, rxd);
702
703                 /* repost the qword count for doorbell */
704                 vxge_hw_vpath_doorbell_rx(ring);
705
706                 /* increment the descriptor offset */
707                 vxge_hw_ring_rxd_offset_up(&ring->rxd_offset);
708
709         } while (++poll_count < ring->rx_poll_weight);
710 err0:
711         return status;
712 }
713
714 /**
715  * vxge_hw_vpath_poll_tx - Poll Tx for completed descriptors and process
716  * the same.
717  * @fifo: Handle to the fifo object used for non offload send
718  *
719  * The function polls the Tx for the completed  descriptors and calls
720  * the driver via supplied completion callback.
721  */
722 enum vxge_hw_status vxge_hw_vpath_poll_tx(struct __vxge_hw_fifo *fifo)
723 {
724         enum vxge_hw_status status = VXGE_HW_OK;
725         struct vxge_hw_fifo_txd *txdp;
726
727         txdp = fifo->txdl + fifo->hw_offset;
728         if (!(txdp->control_0 & VXGE_HW_FIFO_TXD_LIST_OWN_ADAPTER)
729                 && (txdp->host_control)) {
730
731                 vxge_xmit_compl(fifo, txdp,
732                         VXGE_HW_FIFO_TXD_T_CODE_GET(txdp->control_0));
733
734                 vxge_hw_fifo_txd_offset_up(&fifo->hw_offset);
735         }
736
737         return status;
738 }