These changes are the raw update to linux-4.4.6-rt14. Kernel sources
[kvmfornfv.git] / kernel / drivers / net / ethernet / neterion / vxge / vxge-traffic.c
1 /******************************************************************************
2  * This software may be used and distributed according to the terms of
3  * the GNU General Public License (GPL), incorporated herein by reference.
4  * Drivers based on or derived from this code fall under the GPL and must
5  * retain the authorship, copyright and license notice.  This file is not
6  * a complete program and may only be used when the entire operating
7  * system is licensed under the GPL.
8  * See the file COPYING in this distribution for more information.
9  *
10  * vxge-traffic.c: Driver for Exar Corp's X3100 Series 10GbE PCIe I/O
11  *                 Virtualized Server Adapter.
12  * Copyright(c) 2002-2010 Exar Corp.
13  ******************************************************************************/
14 #include <linux/etherdevice.h>
15 #include <linux/prefetch.h>
16
17 #include "vxge-traffic.h"
18 #include "vxge-config.h"
19 #include "vxge-main.h"
20
21 /*
22  * vxge_hw_vpath_intr_enable - Enable vpath interrupts.
23  * @vp: Virtual Path handle.
24  *
25  * Enable vpath interrupts. The function is to be executed the last in
26  * vpath initialization sequence.
27  *
28  * See also: vxge_hw_vpath_intr_disable()
29  */
30 enum vxge_hw_status vxge_hw_vpath_intr_enable(struct __vxge_hw_vpath_handle *vp)
31 {
32         u64 val64;
33
34         struct __vxge_hw_virtualpath *vpath;
35         struct vxge_hw_vpath_reg __iomem *vp_reg;
36         enum vxge_hw_status status = VXGE_HW_OK;
37         if (vp == NULL) {
38                 status = VXGE_HW_ERR_INVALID_HANDLE;
39                 goto exit;
40         }
41
42         vpath = vp->vpath;
43
44         if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) {
45                 status = VXGE_HW_ERR_VPATH_NOT_OPEN;
46                 goto exit;
47         }
48
49         vp_reg = vpath->vp_reg;
50
51         writeq(VXGE_HW_INTR_MASK_ALL, &vp_reg->kdfcctl_errors_reg);
52
53         __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
54                         &vp_reg->general_errors_reg);
55
56         __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
57                         &vp_reg->pci_config_errors_reg);
58
59         __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
60                         &vp_reg->mrpcim_to_vpath_alarm_reg);
61
62         __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
63                         &vp_reg->srpcim_to_vpath_alarm_reg);
64
65         __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
66                         &vp_reg->vpath_ppif_int_status);
67
68         __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
69                         &vp_reg->srpcim_msg_to_vpath_reg);
70
71         __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
72                         &vp_reg->vpath_pcipif_int_status);
73
74         __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
75                         &vp_reg->prc_alarm_reg);
76
77         __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
78                         &vp_reg->wrdma_alarm_status);
79
80         __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
81                         &vp_reg->asic_ntwk_vp_err_reg);
82
83         __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
84                         &vp_reg->xgmac_vp_int_status);
85
86         val64 = readq(&vp_reg->vpath_general_int_status);
87
88         /* Mask unwanted interrupts */
89
90         __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
91                         &vp_reg->vpath_pcipif_int_mask);
92
93         __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
94                         &vp_reg->srpcim_msg_to_vpath_mask);
95
96         __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
97                         &vp_reg->srpcim_to_vpath_alarm_mask);
98
99         __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
100                         &vp_reg->mrpcim_to_vpath_alarm_mask);
101
102         __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
103                         &vp_reg->pci_config_errors_mask);
104
105         /* Unmask the individual interrupts */
106
107         writeq((u32)vxge_bVALn((VXGE_HW_GENERAL_ERRORS_REG_DBLGEN_FIFO1_OVRFLOW|
108                 VXGE_HW_GENERAL_ERRORS_REG_DBLGEN_FIFO2_OVRFLOW|
109                 VXGE_HW_GENERAL_ERRORS_REG_STATSB_DROP_TIMEOUT_REQ|
110                 VXGE_HW_GENERAL_ERRORS_REG_STATSB_PIF_CHAIN_ERR), 0, 32),
111                 &vp_reg->general_errors_mask);
112
113         __vxge_hw_pio_mem_write32_upper(
114                 (u32)vxge_bVALn((VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO1_OVRWR|
115                 VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO2_OVRWR|
116                 VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO1_POISON|
117                 VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO2_POISON|
118                 VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO1_DMA_ERR|
119                 VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO2_DMA_ERR), 0, 32),
120                 &vp_reg->kdfcctl_errors_mask);
121
122         __vxge_hw_pio_mem_write32_upper(0, &vp_reg->vpath_ppif_int_mask);
123
124         __vxge_hw_pio_mem_write32_upper(
125                 (u32)vxge_bVALn(VXGE_HW_PRC_ALARM_REG_PRC_RING_BUMP, 0, 32),
126                 &vp_reg->prc_alarm_mask);
127
128         __vxge_hw_pio_mem_write32_upper(0, &vp_reg->wrdma_alarm_mask);
129         __vxge_hw_pio_mem_write32_upper(0, &vp_reg->xgmac_vp_int_mask);
130
131         if (vpath->hldev->first_vp_id != vpath->vp_id)
132                 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
133                         &vp_reg->asic_ntwk_vp_err_mask);
134         else
135                 __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn((
136                 VXGE_HW_ASIC_NTWK_VP_ERR_REG_XMACJ_NTWK_REAFFIRMED_FAULT |
137                 VXGE_HW_ASIC_NTWK_VP_ERR_REG_XMACJ_NTWK_REAFFIRMED_OK), 0, 32),
138                 &vp_reg->asic_ntwk_vp_err_mask);
139
140         __vxge_hw_pio_mem_write32_upper(0,
141                 &vp_reg->vpath_general_int_mask);
142 exit:
143         return status;
144
145 }
146
147 /*
148  * vxge_hw_vpath_intr_disable - Disable vpath interrupts.
149  * @vp: Virtual Path handle.
150  *
151  * Disable vpath interrupts. The function is to be executed the last in
152  * vpath initialization sequence.
153  *
154  * See also: vxge_hw_vpath_intr_enable()
155  */
156 enum vxge_hw_status vxge_hw_vpath_intr_disable(
157                         struct __vxge_hw_vpath_handle *vp)
158 {
159         u64 val64;
160
161         struct __vxge_hw_virtualpath *vpath;
162         enum vxge_hw_status status = VXGE_HW_OK;
163         struct vxge_hw_vpath_reg __iomem *vp_reg;
164         if (vp == NULL) {
165                 status = VXGE_HW_ERR_INVALID_HANDLE;
166                 goto exit;
167         }
168
169         vpath = vp->vpath;
170
171         if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) {
172                 status = VXGE_HW_ERR_VPATH_NOT_OPEN;
173                 goto exit;
174         }
175         vp_reg = vpath->vp_reg;
176
177         __vxge_hw_pio_mem_write32_upper(
178                 (u32)VXGE_HW_INTR_MASK_ALL,
179                 &vp_reg->vpath_general_int_mask);
180
181         val64 = VXGE_HW_TIM_CLR_INT_EN_VP(1 << (16 - vpath->vp_id));
182
183         writeq(VXGE_HW_INTR_MASK_ALL, &vp_reg->kdfcctl_errors_mask);
184
185         __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
186                         &vp_reg->general_errors_mask);
187
188         __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
189                         &vp_reg->pci_config_errors_mask);
190
191         __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
192                         &vp_reg->mrpcim_to_vpath_alarm_mask);
193
194         __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
195                         &vp_reg->srpcim_to_vpath_alarm_mask);
196
197         __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
198                         &vp_reg->vpath_ppif_int_mask);
199
200         __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
201                         &vp_reg->srpcim_msg_to_vpath_mask);
202
203         __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
204                         &vp_reg->vpath_pcipif_int_mask);
205
206         __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
207                         &vp_reg->wrdma_alarm_mask);
208
209         __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
210                         &vp_reg->prc_alarm_mask);
211
212         __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
213                         &vp_reg->xgmac_vp_int_mask);
214
215         __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
216                         &vp_reg->asic_ntwk_vp_err_mask);
217
218 exit:
219         return status;
220 }
221
222 void vxge_hw_vpath_tti_ci_set(struct __vxge_hw_fifo *fifo)
223 {
224         struct vxge_hw_vpath_reg __iomem *vp_reg;
225         struct vxge_hw_vp_config *config;
226         u64 val64;
227
228         if (fifo->config->enable != VXGE_HW_FIFO_ENABLE)
229                 return;
230
231         vp_reg = fifo->vp_reg;
232         config = container_of(fifo->config, struct vxge_hw_vp_config, fifo);
233
234         if (config->tti.timer_ci_en != VXGE_HW_TIM_TIMER_CI_ENABLE) {
235                 config->tti.timer_ci_en = VXGE_HW_TIM_TIMER_CI_ENABLE;
236                 val64 = readq(&vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_TX]);
237                 val64 |= VXGE_HW_TIM_CFG1_INT_NUM_TIMER_CI;
238                 fifo->tim_tti_cfg1_saved = val64;
239                 writeq(val64, &vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_TX]);
240         }
241 }
242
243 void vxge_hw_vpath_dynamic_rti_ci_set(struct __vxge_hw_ring *ring)
244 {
245         u64 val64 = ring->tim_rti_cfg1_saved;
246
247         val64 |= VXGE_HW_TIM_CFG1_INT_NUM_TIMER_CI;
248         ring->tim_rti_cfg1_saved = val64;
249         writeq(val64, &ring->vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_RX]);
250 }
251
252 void vxge_hw_vpath_dynamic_tti_rtimer_set(struct __vxge_hw_fifo *fifo)
253 {
254         u64 val64 = fifo->tim_tti_cfg3_saved;
255         u64 timer = (fifo->rtimer * 1000) / 272;
256
257         val64 &= ~VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_VAL(0x3ffffff);
258         if (timer)
259                 val64 |= VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_VAL(timer) |
260                         VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_EVENT_SF(5);
261
262         writeq(val64, &fifo->vp_reg->tim_cfg3_int_num[VXGE_HW_VPATH_INTR_TX]);
263         /* tti_cfg3_saved is not updated again because it is
264          * initialized at one place only - init time.
265          */
266 }
267
268 void vxge_hw_vpath_dynamic_rti_rtimer_set(struct __vxge_hw_ring *ring)
269 {
270         u64 val64 = ring->tim_rti_cfg3_saved;
271         u64 timer = (ring->rtimer * 1000) / 272;
272
273         val64 &= ~VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_VAL(0x3ffffff);
274         if (timer)
275                 val64 |= VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_VAL(timer) |
276                         VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_EVENT_SF(4);
277
278         writeq(val64, &ring->vp_reg->tim_cfg3_int_num[VXGE_HW_VPATH_INTR_RX]);
279         /* rti_cfg3_saved is not updated again because it is
280          * initialized at one place only - init time.
281          */
282 }
283
284 /**
285  * vxge_hw_channel_msix_mask - Mask MSIX Vector.
286  * @channeh: Channel for rx or tx handle
287  * @msix_id:  MSIX ID
288  *
289  * The function masks the msix interrupt for the given msix_id
290  *
291  * Returns: 0
292  */
293 void vxge_hw_channel_msix_mask(struct __vxge_hw_channel *channel, int msix_id)
294 {
295
296         __vxge_hw_pio_mem_write32_upper(
297                 (u32)vxge_bVALn(vxge_mBIT(msix_id >> 2), 0, 32),
298                 &channel->common_reg->set_msix_mask_vect[msix_id%4]);
299 }
300
301 /**
302  * vxge_hw_channel_msix_unmask - Unmask the MSIX Vector.
303  * @channeh: Channel for rx or tx handle
304  * @msix_id:  MSI ID
305  *
306  * The function unmasks the msix interrupt for the given msix_id
307  *
308  * Returns: 0
309  */
310 void
311 vxge_hw_channel_msix_unmask(struct __vxge_hw_channel *channel, int msix_id)
312 {
313
314         __vxge_hw_pio_mem_write32_upper(
315                 (u32)vxge_bVALn(vxge_mBIT(msix_id >> 2), 0, 32),
316                 &channel->common_reg->clear_msix_mask_vect[msix_id%4]);
317 }
318
319 /**
320  * vxge_hw_channel_msix_clear - Unmask the MSIX Vector.
321  * @channel: Channel for rx or tx handle
322  * @msix_id:  MSI ID
323  *
324  * The function unmasks the msix interrupt for the given msix_id
325  * if configured in MSIX oneshot mode
326  *
327  * Returns: 0
328  */
329 void vxge_hw_channel_msix_clear(struct __vxge_hw_channel *channel, int msix_id)
330 {
331         __vxge_hw_pio_mem_write32_upper(
332                 (u32) vxge_bVALn(vxge_mBIT(msix_id >> 2), 0, 32),
333                 &channel->common_reg->clr_msix_one_shot_vec[msix_id % 4]);
334 }
335
336 /**
337  * vxge_hw_device_set_intr_type - Updates the configuration
338  *              with new interrupt type.
339  * @hldev: HW device handle.
340  * @intr_mode: New interrupt type
341  */
342 u32 vxge_hw_device_set_intr_type(struct __vxge_hw_device *hldev, u32 intr_mode)
343 {
344
345         if ((intr_mode != VXGE_HW_INTR_MODE_IRQLINE) &&
346            (intr_mode != VXGE_HW_INTR_MODE_MSIX) &&
347            (intr_mode != VXGE_HW_INTR_MODE_MSIX_ONE_SHOT) &&
348            (intr_mode != VXGE_HW_INTR_MODE_DEF))
349                 intr_mode = VXGE_HW_INTR_MODE_IRQLINE;
350
351         hldev->config.intr_mode = intr_mode;
352         return intr_mode;
353 }
354
355 /**
356  * vxge_hw_device_intr_enable - Enable interrupts.
357  * @hldev: HW device handle.
358  * @op: One of the enum vxge_hw_device_intr enumerated values specifying
359  *      the type(s) of interrupts to enable.
360  *
361  * Enable Titan interrupts. The function is to be executed the last in
362  * Titan initialization sequence.
363  *
364  * See also: vxge_hw_device_intr_disable()
365  */
366 void vxge_hw_device_intr_enable(struct __vxge_hw_device *hldev)
367 {
368         u32 i;
369         u64 val64;
370         u32 val32;
371
372         vxge_hw_device_mask_all(hldev);
373
374         for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
375
376                 if (!(hldev->vpaths_deployed & vxge_mBIT(i)))
377                         continue;
378
379                 vxge_hw_vpath_intr_enable(
380                         VXGE_HW_VIRTUAL_PATH_HANDLE(&hldev->virtual_paths[i]));
381         }
382
383         if (hldev->config.intr_mode == VXGE_HW_INTR_MODE_IRQLINE) {
384                 val64 = hldev->tim_int_mask0[VXGE_HW_VPATH_INTR_TX] |
385                         hldev->tim_int_mask0[VXGE_HW_VPATH_INTR_RX];
386
387                 if (val64 != 0) {
388                         writeq(val64, &hldev->common_reg->tim_int_status0);
389
390                         writeq(~val64, &hldev->common_reg->tim_int_mask0);
391                 }
392
393                 val32 = hldev->tim_int_mask1[VXGE_HW_VPATH_INTR_TX] |
394                         hldev->tim_int_mask1[VXGE_HW_VPATH_INTR_RX];
395
396                 if (val32 != 0) {
397                         __vxge_hw_pio_mem_write32_upper(val32,
398                                         &hldev->common_reg->tim_int_status1);
399
400                         __vxge_hw_pio_mem_write32_upper(~val32,
401                                         &hldev->common_reg->tim_int_mask1);
402                 }
403         }
404
405         val64 = readq(&hldev->common_reg->titan_general_int_status);
406
407         vxge_hw_device_unmask_all(hldev);
408 }
409
410 /**
411  * vxge_hw_device_intr_disable - Disable Titan interrupts.
412  * @hldev: HW device handle.
413  * @op: One of the enum vxge_hw_device_intr enumerated values specifying
414  *      the type(s) of interrupts to disable.
415  *
416  * Disable Titan interrupts.
417  *
418  * See also: vxge_hw_device_intr_enable()
419  */
420 void vxge_hw_device_intr_disable(struct __vxge_hw_device *hldev)
421 {
422         u32 i;
423
424         vxge_hw_device_mask_all(hldev);
425
426         /* mask all the tim interrupts */
427         writeq(VXGE_HW_INTR_MASK_ALL, &hldev->common_reg->tim_int_mask0);
428         __vxge_hw_pio_mem_write32_upper(VXGE_HW_DEFAULT_32,
429                 &hldev->common_reg->tim_int_mask1);
430
431         for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
432
433                 if (!(hldev->vpaths_deployed & vxge_mBIT(i)))
434                         continue;
435
436                 vxge_hw_vpath_intr_disable(
437                         VXGE_HW_VIRTUAL_PATH_HANDLE(&hldev->virtual_paths[i]));
438         }
439 }
440
441 /**
442  * vxge_hw_device_mask_all - Mask all device interrupts.
443  * @hldev: HW device handle.
444  *
445  * Mask all device interrupts.
446  *
447  * See also: vxge_hw_device_unmask_all()
448  */
449 void vxge_hw_device_mask_all(struct __vxge_hw_device *hldev)
450 {
451         u64 val64;
452
453         val64 = VXGE_HW_TITAN_MASK_ALL_INT_ALARM |
454                 VXGE_HW_TITAN_MASK_ALL_INT_TRAFFIC;
455
456         __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(val64, 0, 32),
457                                 &hldev->common_reg->titan_mask_all_int);
458 }
459
460 /**
461  * vxge_hw_device_unmask_all - Unmask all device interrupts.
462  * @hldev: HW device handle.
463  *
464  * Unmask all device interrupts.
465  *
466  * See also: vxge_hw_device_mask_all()
467  */
468 void vxge_hw_device_unmask_all(struct __vxge_hw_device *hldev)
469 {
470         u64 val64 = 0;
471
472         if (hldev->config.intr_mode == VXGE_HW_INTR_MODE_IRQLINE)
473                 val64 =  VXGE_HW_TITAN_MASK_ALL_INT_TRAFFIC;
474
475         __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(val64, 0, 32),
476                         &hldev->common_reg->titan_mask_all_int);
477 }
478
479 /**
480  * vxge_hw_device_flush_io - Flush io writes.
481  * @hldev: HW device handle.
482  *
483  * The function performs a read operation to flush io writes.
484  *
485  * Returns: void
486  */
487 void vxge_hw_device_flush_io(struct __vxge_hw_device *hldev)
488 {
489         u32 val32;
490
491         val32 = readl(&hldev->common_reg->titan_general_int_status);
492 }
493
494 /**
495  * __vxge_hw_device_handle_error - Handle error
496  * @hldev: HW device
497  * @vp_id: Vpath Id
498  * @type: Error type. Please see enum vxge_hw_event{}
499  *
500  * Handle error.
501  */
502 static enum vxge_hw_status
503 __vxge_hw_device_handle_error(struct __vxge_hw_device *hldev, u32 vp_id,
504                               enum vxge_hw_event type)
505 {
506         switch (type) {
507         case VXGE_HW_EVENT_UNKNOWN:
508                 break;
509         case VXGE_HW_EVENT_RESET_START:
510         case VXGE_HW_EVENT_RESET_COMPLETE:
511         case VXGE_HW_EVENT_LINK_DOWN:
512         case VXGE_HW_EVENT_LINK_UP:
513                 goto out;
514         case VXGE_HW_EVENT_ALARM_CLEARED:
515                 goto out;
516         case VXGE_HW_EVENT_ECCERR:
517         case VXGE_HW_EVENT_MRPCIM_ECCERR:
518                 goto out;
519         case VXGE_HW_EVENT_FIFO_ERR:
520         case VXGE_HW_EVENT_VPATH_ERR:
521         case VXGE_HW_EVENT_CRITICAL_ERR:
522         case VXGE_HW_EVENT_SERR:
523                 break;
524         case VXGE_HW_EVENT_SRPCIM_SERR:
525         case VXGE_HW_EVENT_MRPCIM_SERR:
526                 goto out;
527         case VXGE_HW_EVENT_SLOT_FREEZE:
528                 break;
529         default:
530                 vxge_assert(0);
531                 goto out;
532         }
533
534         /* notify driver */
535         if (hldev->uld_callbacks->crit_err)
536                 hldev->uld_callbacks->crit_err(hldev,
537                         type, vp_id);
538 out:
539
540         return VXGE_HW_OK;
541 }
542
543 /*
544  * __vxge_hw_device_handle_link_down_ind
545  * @hldev: HW device handle.
546  *
547  * Link down indication handler. The function is invoked by HW when
548  * Titan indicates that the link is down.
549  */
550 static enum vxge_hw_status
551 __vxge_hw_device_handle_link_down_ind(struct __vxge_hw_device *hldev)
552 {
553         /*
554          * If the previous link state is not down, return.
555          */
556         if (hldev->link_state == VXGE_HW_LINK_DOWN)
557                 goto exit;
558
559         hldev->link_state = VXGE_HW_LINK_DOWN;
560
561         /* notify driver */
562         if (hldev->uld_callbacks->link_down)
563                 hldev->uld_callbacks->link_down(hldev);
564 exit:
565         return VXGE_HW_OK;
566 }
567
568 /*
569  * __vxge_hw_device_handle_link_up_ind
570  * @hldev: HW device handle.
571  *
572  * Link up indication handler. The function is invoked by HW when
573  * Titan indicates that the link is up for programmable amount of time.
574  */
575 static enum vxge_hw_status
576 __vxge_hw_device_handle_link_up_ind(struct __vxge_hw_device *hldev)
577 {
578         /*
579          * If the previous link state is not down, return.
580          */
581         if (hldev->link_state == VXGE_HW_LINK_UP)
582                 goto exit;
583
584         hldev->link_state = VXGE_HW_LINK_UP;
585
586         /* notify driver */
587         if (hldev->uld_callbacks->link_up)
588                 hldev->uld_callbacks->link_up(hldev);
589 exit:
590         return VXGE_HW_OK;
591 }
592
593 /*
594  * __vxge_hw_vpath_alarm_process - Process Alarms.
595  * @vpath: Virtual Path.
596  * @skip_alarms: Do not clear the alarms
597  *
598  * Process vpath alarms.
599  *
600  */
601 static enum vxge_hw_status
602 __vxge_hw_vpath_alarm_process(struct __vxge_hw_virtualpath *vpath,
603                               u32 skip_alarms)
604 {
605         u64 val64;
606         u64 alarm_status;
607         u64 pic_status;
608         struct __vxge_hw_device *hldev = NULL;
609         enum vxge_hw_event alarm_event = VXGE_HW_EVENT_UNKNOWN;
610         u64 mask64;
611         struct vxge_hw_vpath_stats_sw_info *sw_stats;
612         struct vxge_hw_vpath_reg __iomem *vp_reg;
613
614         if (vpath == NULL) {
615                 alarm_event = VXGE_HW_SET_LEVEL(VXGE_HW_EVENT_UNKNOWN,
616                         alarm_event);
617                 goto out2;
618         }
619
620         hldev = vpath->hldev;
621         vp_reg = vpath->vp_reg;
622         alarm_status = readq(&vp_reg->vpath_general_int_status);
623
624         if (alarm_status == VXGE_HW_ALL_FOXES) {
625                 alarm_event = VXGE_HW_SET_LEVEL(VXGE_HW_EVENT_SLOT_FREEZE,
626                         alarm_event);
627                 goto out;
628         }
629
630         sw_stats = vpath->sw_stats;
631
632         if (alarm_status & ~(
633                 VXGE_HW_VPATH_GENERAL_INT_STATUS_PIC_INT |
634                 VXGE_HW_VPATH_GENERAL_INT_STATUS_PCI_INT |
635                 VXGE_HW_VPATH_GENERAL_INT_STATUS_WRDMA_INT |
636                 VXGE_HW_VPATH_GENERAL_INT_STATUS_XMAC_INT)) {
637                 sw_stats->error_stats.unknown_alarms++;
638
639                 alarm_event = VXGE_HW_SET_LEVEL(VXGE_HW_EVENT_UNKNOWN,
640                         alarm_event);
641                 goto out;
642         }
643
644         if (alarm_status & VXGE_HW_VPATH_GENERAL_INT_STATUS_XMAC_INT) {
645
646                 val64 = readq(&vp_reg->xgmac_vp_int_status);
647
648                 if (val64 &
649                 VXGE_HW_XGMAC_VP_INT_STATUS_ASIC_NTWK_VP_ERR_ASIC_NTWK_VP_INT) {
650
651                         val64 = readq(&vp_reg->asic_ntwk_vp_err_reg);
652
653                         if (((val64 &
654                               VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT) &&
655                              (!(val64 &
656                                 VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK))) ||
657                             ((val64 &
658                              VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT_OCCURR) &&
659                              (!(val64 &
660                                 VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK_OCCURR)
661                                      ))) {
662                                 sw_stats->error_stats.network_sustained_fault++;
663
664                                 writeq(
665                                 VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT,
666                                         &vp_reg->asic_ntwk_vp_err_mask);
667
668                                 __vxge_hw_device_handle_link_down_ind(hldev);
669                                 alarm_event = VXGE_HW_SET_LEVEL(
670                                         VXGE_HW_EVENT_LINK_DOWN, alarm_event);
671                         }
672
673                         if (((val64 &
674                               VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK) &&
675                              (!(val64 &
676                                 VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT))) ||
677                             ((val64 &
678                               VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK_OCCURR) &&
679                              (!(val64 &
680                                 VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT_OCCURR)
681                                      ))) {
682
683                                 sw_stats->error_stats.network_sustained_ok++;
684
685                                 writeq(
686                                 VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK,
687                                         &vp_reg->asic_ntwk_vp_err_mask);
688
689                                 __vxge_hw_device_handle_link_up_ind(hldev);
690                                 alarm_event = VXGE_HW_SET_LEVEL(
691                                         VXGE_HW_EVENT_LINK_UP, alarm_event);
692                         }
693
694                         writeq(VXGE_HW_INTR_MASK_ALL,
695                                 &vp_reg->asic_ntwk_vp_err_reg);
696
697                         alarm_event = VXGE_HW_SET_LEVEL(
698                                 VXGE_HW_EVENT_ALARM_CLEARED, alarm_event);
699
700                         if (skip_alarms)
701                                 return VXGE_HW_OK;
702                 }
703         }
704
705         if (alarm_status & VXGE_HW_VPATH_GENERAL_INT_STATUS_PIC_INT) {
706
707                 pic_status = readq(&vp_reg->vpath_ppif_int_status);
708
709                 if (pic_status &
710                     VXGE_HW_VPATH_PPIF_INT_STATUS_GENERAL_ERRORS_GENERAL_INT) {
711
712                         val64 = readq(&vp_reg->general_errors_reg);
713                         mask64 = readq(&vp_reg->general_errors_mask);
714
715                         if ((val64 &
716                                 VXGE_HW_GENERAL_ERRORS_REG_INI_SERR_DET) &
717                                 ~mask64) {
718                                 sw_stats->error_stats.ini_serr_det++;
719
720                                 alarm_event = VXGE_HW_SET_LEVEL(
721                                         VXGE_HW_EVENT_SERR, alarm_event);
722                         }
723
724                         if ((val64 &
725                             VXGE_HW_GENERAL_ERRORS_REG_DBLGEN_FIFO0_OVRFLOW) &
726                                 ~mask64) {
727                                 sw_stats->error_stats.dblgen_fifo0_overflow++;
728
729                                 alarm_event = VXGE_HW_SET_LEVEL(
730                                         VXGE_HW_EVENT_FIFO_ERR, alarm_event);
731                         }
732
733                         if ((val64 &
734                             VXGE_HW_GENERAL_ERRORS_REG_STATSB_PIF_CHAIN_ERR) &
735                                 ~mask64)
736                                 sw_stats->error_stats.statsb_pif_chain_error++;
737
738                         if ((val64 &
739                            VXGE_HW_GENERAL_ERRORS_REG_STATSB_DROP_TIMEOUT_REQ) &
740                                 ~mask64)
741                                 sw_stats->error_stats.statsb_drop_timeout++;
742
743                         if ((val64 &
744                                 VXGE_HW_GENERAL_ERRORS_REG_TGT_ILLEGAL_ACCESS) &
745                                 ~mask64)
746                                 sw_stats->error_stats.target_illegal_access++;
747
748                         if (!skip_alarms) {
749                                 writeq(VXGE_HW_INTR_MASK_ALL,
750                                         &vp_reg->general_errors_reg);
751                                 alarm_event = VXGE_HW_SET_LEVEL(
752                                         VXGE_HW_EVENT_ALARM_CLEARED,
753                                         alarm_event);
754                         }
755                 }
756
757                 if (pic_status &
758                     VXGE_HW_VPATH_PPIF_INT_STATUS_KDFCCTL_ERRORS_KDFCCTL_INT) {
759
760                         val64 = readq(&vp_reg->kdfcctl_errors_reg);
761                         mask64 = readq(&vp_reg->kdfcctl_errors_mask);
762
763                         if ((val64 &
764                             VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO0_OVRWR) &
765                                 ~mask64) {
766                                 sw_stats->error_stats.kdfcctl_fifo0_overwrite++;
767
768                                 alarm_event = VXGE_HW_SET_LEVEL(
769                                         VXGE_HW_EVENT_FIFO_ERR,
770                                         alarm_event);
771                         }
772
773                         if ((val64 &
774                             VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO0_POISON) &
775                                 ~mask64) {
776                                 sw_stats->error_stats.kdfcctl_fifo0_poison++;
777
778                                 alarm_event = VXGE_HW_SET_LEVEL(
779                                         VXGE_HW_EVENT_FIFO_ERR,
780                                         alarm_event);
781                         }
782
783                         if ((val64 &
784                             VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO0_DMA_ERR) &
785                                 ~mask64) {
786                                 sw_stats->error_stats.kdfcctl_fifo0_dma_error++;
787
788                                 alarm_event = VXGE_HW_SET_LEVEL(
789                                         VXGE_HW_EVENT_FIFO_ERR,
790                                         alarm_event);
791                         }
792
793                         if (!skip_alarms) {
794                                 writeq(VXGE_HW_INTR_MASK_ALL,
795                                         &vp_reg->kdfcctl_errors_reg);
796                                 alarm_event = VXGE_HW_SET_LEVEL(
797                                         VXGE_HW_EVENT_ALARM_CLEARED,
798                                         alarm_event);
799                         }
800                 }
801
802         }
803
804         if (alarm_status & VXGE_HW_VPATH_GENERAL_INT_STATUS_WRDMA_INT) {
805
806                 val64 = readq(&vp_reg->wrdma_alarm_status);
807
808                 if (val64 & VXGE_HW_WRDMA_ALARM_STATUS_PRC_ALARM_PRC_INT) {
809
810                         val64 = readq(&vp_reg->prc_alarm_reg);
811                         mask64 = readq(&vp_reg->prc_alarm_mask);
812
813                         if ((val64 & VXGE_HW_PRC_ALARM_REG_PRC_RING_BUMP)&
814                                 ~mask64)
815                                 sw_stats->error_stats.prc_ring_bumps++;
816
817                         if ((val64 & VXGE_HW_PRC_ALARM_REG_PRC_RXDCM_SC_ERR) &
818                                 ~mask64) {
819                                 sw_stats->error_stats.prc_rxdcm_sc_err++;
820
821                                 alarm_event = VXGE_HW_SET_LEVEL(
822                                         VXGE_HW_EVENT_VPATH_ERR,
823                                         alarm_event);
824                         }
825
826                         if ((val64 & VXGE_HW_PRC_ALARM_REG_PRC_RXDCM_SC_ABORT)
827                                 & ~mask64) {
828                                 sw_stats->error_stats.prc_rxdcm_sc_abort++;
829
830                                 alarm_event = VXGE_HW_SET_LEVEL(
831                                                 VXGE_HW_EVENT_VPATH_ERR,
832                                                 alarm_event);
833                         }
834
835                         if ((val64 & VXGE_HW_PRC_ALARM_REG_PRC_QUANTA_SIZE_ERR)
836                                  & ~mask64) {
837                                 sw_stats->error_stats.prc_quanta_size_err++;
838
839                                 alarm_event = VXGE_HW_SET_LEVEL(
840                                         VXGE_HW_EVENT_VPATH_ERR,
841                                         alarm_event);
842                         }
843
844                         if (!skip_alarms) {
845                                 writeq(VXGE_HW_INTR_MASK_ALL,
846                                         &vp_reg->prc_alarm_reg);
847                                 alarm_event = VXGE_HW_SET_LEVEL(
848                                                 VXGE_HW_EVENT_ALARM_CLEARED,
849                                                 alarm_event);
850                         }
851                 }
852         }
853 out:
854         hldev->stats.sw_dev_err_stats.vpath_alarms++;
855 out2:
856         if ((alarm_event == VXGE_HW_EVENT_ALARM_CLEARED) ||
857                 (alarm_event == VXGE_HW_EVENT_UNKNOWN))
858                 return VXGE_HW_OK;
859
860         __vxge_hw_device_handle_error(hldev, vpath->vp_id, alarm_event);
861
862         if (alarm_event == VXGE_HW_EVENT_SERR)
863                 return VXGE_HW_ERR_CRITICAL;
864
865         return (alarm_event == VXGE_HW_EVENT_SLOT_FREEZE) ?
866                 VXGE_HW_ERR_SLOT_FREEZE :
867                 (alarm_event == VXGE_HW_EVENT_FIFO_ERR) ? VXGE_HW_ERR_FIFO :
868                 VXGE_HW_ERR_VPATH;
869 }
870
871 /**
872  * vxge_hw_device_begin_irq - Begin IRQ processing.
873  * @hldev: HW device handle.
874  * @skip_alarms: Do not clear the alarms
875  * @reason: "Reason" for the interrupt, the value of Titan's
876  *      general_int_status register.
877  *
878  * The function performs two actions, It first checks whether (shared IRQ) the
879  * interrupt was raised by the device. Next, it masks the device interrupts.
880  *
881  * Note:
882  * vxge_hw_device_begin_irq() does not flush MMIO writes through the
883  * bridge. Therefore, two back-to-back interrupts are potentially possible.
884  *
885  * Returns: 0, if the interrupt is not "ours" (note that in this case the
886  * device remain enabled).
887  * Otherwise, vxge_hw_device_begin_irq() returns 64bit general adapter
888  * status.
889  */
890 enum vxge_hw_status vxge_hw_device_begin_irq(struct __vxge_hw_device *hldev,
891                                              u32 skip_alarms, u64 *reason)
892 {
893         u32 i;
894         u64 val64;
895         u64 adapter_status;
896         u64 vpath_mask;
897         enum vxge_hw_status ret = VXGE_HW_OK;
898
899         val64 = readq(&hldev->common_reg->titan_general_int_status);
900
901         if (unlikely(!val64)) {
902                 /* not Titan interrupt  */
903                 *reason = 0;
904                 ret = VXGE_HW_ERR_WRONG_IRQ;
905                 goto exit;
906         }
907
908         if (unlikely(val64 == VXGE_HW_ALL_FOXES)) {
909
910                 adapter_status = readq(&hldev->common_reg->adapter_status);
911
912                 if (adapter_status == VXGE_HW_ALL_FOXES) {
913
914                         __vxge_hw_device_handle_error(hldev,
915                                 NULL_VPID, VXGE_HW_EVENT_SLOT_FREEZE);
916                         *reason = 0;
917                         ret = VXGE_HW_ERR_SLOT_FREEZE;
918                         goto exit;
919                 }
920         }
921
922         hldev->stats.sw_dev_info_stats.total_intr_cnt++;
923
924         *reason = val64;
925
926         vpath_mask = hldev->vpaths_deployed >>
927                                 (64 - VXGE_HW_MAX_VIRTUAL_PATHS);
928
929         if (val64 &
930             VXGE_HW_TITAN_GENERAL_INT_STATUS_VPATH_TRAFFIC_INT(vpath_mask)) {
931                 hldev->stats.sw_dev_info_stats.traffic_intr_cnt++;
932
933                 return VXGE_HW_OK;
934         }
935
936         hldev->stats.sw_dev_info_stats.not_traffic_intr_cnt++;
937
938         if (unlikely(val64 &
939                         VXGE_HW_TITAN_GENERAL_INT_STATUS_VPATH_ALARM_INT)) {
940
941                 enum vxge_hw_status error_level = VXGE_HW_OK;
942
943                 hldev->stats.sw_dev_err_stats.vpath_alarms++;
944
945                 for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
946
947                         if (!(hldev->vpaths_deployed & vxge_mBIT(i)))
948                                 continue;
949
950                         ret = __vxge_hw_vpath_alarm_process(
951                                 &hldev->virtual_paths[i], skip_alarms);
952
953                         error_level = VXGE_HW_SET_LEVEL(ret, error_level);
954
955                         if (unlikely((ret == VXGE_HW_ERR_CRITICAL) ||
956                                 (ret == VXGE_HW_ERR_SLOT_FREEZE)))
957                                 break;
958                 }
959
960                 ret = error_level;
961         }
962 exit:
963         return ret;
964 }
965
966 /**
967  * vxge_hw_device_clear_tx_rx - Acknowledge (that is, clear) the
968  * condition that has caused the Tx and RX interrupt.
969  * @hldev: HW device.
970  *
971  * Acknowledge (that is, clear) the condition that has caused
972  * the Tx and Rx interrupt.
973  * See also: vxge_hw_device_begin_irq(),
974  * vxge_hw_device_mask_tx_rx(), vxge_hw_device_unmask_tx_rx().
975  */
976 void vxge_hw_device_clear_tx_rx(struct __vxge_hw_device *hldev)
977 {
978
979         if ((hldev->tim_int_mask0[VXGE_HW_VPATH_INTR_TX] != 0) ||
980            (hldev->tim_int_mask0[VXGE_HW_VPATH_INTR_RX] != 0)) {
981                 writeq((hldev->tim_int_mask0[VXGE_HW_VPATH_INTR_TX] |
982                                  hldev->tim_int_mask0[VXGE_HW_VPATH_INTR_RX]),
983                                 &hldev->common_reg->tim_int_status0);
984         }
985
986         if ((hldev->tim_int_mask1[VXGE_HW_VPATH_INTR_TX] != 0) ||
987            (hldev->tim_int_mask1[VXGE_HW_VPATH_INTR_RX] != 0)) {
988                 __vxge_hw_pio_mem_write32_upper(
989                                 (hldev->tim_int_mask1[VXGE_HW_VPATH_INTR_TX] |
990                                  hldev->tim_int_mask1[VXGE_HW_VPATH_INTR_RX]),
991                                 &hldev->common_reg->tim_int_status1);
992         }
993 }
994
995 /*
996  * vxge_hw_channel_dtr_alloc - Allocate a dtr from the channel
997  * @channel: Channel
998  * @dtrh: Buffer to return the DTR pointer
999  *
1000  * Allocates a dtr from the reserve array. If the reserve array is empty,
1001  * it swaps the reserve and free arrays.
1002  *
1003  */
1004 static enum vxge_hw_status
1005 vxge_hw_channel_dtr_alloc(struct __vxge_hw_channel *channel, void **dtrh)
1006 {
1007         if (channel->reserve_ptr - channel->reserve_top > 0) {
1008 _alloc_after_swap:
1009                 *dtrh = channel->reserve_arr[--channel->reserve_ptr];
1010
1011                 return VXGE_HW_OK;
1012         }
1013
1014         /* switch between empty and full arrays */
1015
1016         /* the idea behind such a design is that by having free and reserved
1017          * arrays separated we basically separated irq and non-irq parts.
1018          * i.e. no additional lock need to be done when we free a resource */
1019
1020         if (channel->length - channel->free_ptr > 0) {
1021                 swap(channel->reserve_arr, channel->free_arr);
1022                 channel->reserve_ptr = channel->length;
1023                 channel->reserve_top = channel->free_ptr;
1024                 channel->free_ptr = channel->length;
1025
1026                 channel->stats->reserve_free_swaps_cnt++;
1027
1028                 goto _alloc_after_swap;
1029         }
1030
1031         channel->stats->full_cnt++;
1032
1033         *dtrh = NULL;
1034         return VXGE_HW_INF_OUT_OF_DESCRIPTORS;
1035 }
1036
1037 /*
1038  * vxge_hw_channel_dtr_post - Post a dtr to the channel
1039  * @channelh: Channel
1040  * @dtrh: DTR pointer
1041  *
1042  * Posts a dtr to work array.
1043  *
1044  */
1045 static void
1046 vxge_hw_channel_dtr_post(struct __vxge_hw_channel *channel, void *dtrh)
1047 {
1048         vxge_assert(channel->work_arr[channel->post_index] == NULL);
1049
1050         channel->work_arr[channel->post_index++] = dtrh;
1051
1052         /* wrap-around */
1053         if (channel->post_index == channel->length)
1054                 channel->post_index = 0;
1055 }
1056
1057 /*
1058  * vxge_hw_channel_dtr_try_complete - Returns next completed dtr
1059  * @channel: Channel
1060  * @dtr: Buffer to return the next completed DTR pointer
1061  *
1062  * Returns the next completed dtr with out removing it from work array
1063  *
1064  */
1065 void
1066 vxge_hw_channel_dtr_try_complete(struct __vxge_hw_channel *channel, void **dtrh)
1067 {
1068         vxge_assert(channel->compl_index < channel->length);
1069
1070         *dtrh = channel->work_arr[channel->compl_index];
1071         prefetch(*dtrh);
1072 }
1073
1074 /*
1075  * vxge_hw_channel_dtr_complete - Removes next completed dtr from the work array
1076  * @channel: Channel handle
1077  *
1078  * Removes the next completed dtr from work array
1079  *
1080  */
1081 void vxge_hw_channel_dtr_complete(struct __vxge_hw_channel *channel)
1082 {
1083         channel->work_arr[channel->compl_index] = NULL;
1084
1085         /* wrap-around */
1086         if (++channel->compl_index == channel->length)
1087                 channel->compl_index = 0;
1088
1089         channel->stats->total_compl_cnt++;
1090 }
1091
1092 /*
1093  * vxge_hw_channel_dtr_free - Frees a dtr
1094  * @channel: Channel handle
1095  * @dtr:  DTR pointer
1096  *
1097  * Returns the dtr to free array
1098  *
1099  */
1100 void vxge_hw_channel_dtr_free(struct __vxge_hw_channel *channel, void *dtrh)
1101 {
1102         channel->free_arr[--channel->free_ptr] = dtrh;
1103 }
1104
1105 /*
1106  * vxge_hw_channel_dtr_count
1107  * @channel: Channel handle. Obtained via vxge_hw_channel_open().
1108  *
1109  * Retrieve number of DTRs available. This function can not be called
1110  * from data path. ring_initial_replenishi() is the only user.
1111  */
1112 int vxge_hw_channel_dtr_count(struct __vxge_hw_channel *channel)
1113 {
1114         return (channel->reserve_ptr - channel->reserve_top) +
1115                 (channel->length - channel->free_ptr);
1116 }
1117
1118 /**
1119  * vxge_hw_ring_rxd_reserve     - Reserve ring descriptor.
1120  * @ring: Handle to the ring object used for receive
1121  * @rxdh: Reserved descriptor. On success HW fills this "out" parameter
1122  * with a valid handle.
1123  *
1124  * Reserve Rx descriptor for the subsequent filling-in driver
1125  * and posting on the corresponding channel (@channelh)
1126  * via vxge_hw_ring_rxd_post().
1127  *
1128  * Returns: VXGE_HW_OK - success.
1129  * VXGE_HW_INF_OUT_OF_DESCRIPTORS - Currently no descriptors available.
1130  *
1131  */
1132 enum vxge_hw_status vxge_hw_ring_rxd_reserve(struct __vxge_hw_ring *ring,
1133         void **rxdh)
1134 {
1135         enum vxge_hw_status status;
1136         struct __vxge_hw_channel *channel;
1137
1138         channel = &ring->channel;
1139
1140         status = vxge_hw_channel_dtr_alloc(channel, rxdh);
1141
1142         if (status == VXGE_HW_OK) {
1143                 struct vxge_hw_ring_rxd_1 *rxdp =
1144                         (struct vxge_hw_ring_rxd_1 *)*rxdh;
1145
1146                 rxdp->control_0 = rxdp->control_1 = 0;
1147         }
1148
1149         return status;
1150 }
1151
1152 /**
1153  * vxge_hw_ring_rxd_free - Free descriptor.
1154  * @ring: Handle to the ring object used for receive
1155  * @rxdh: Descriptor handle.
1156  *
1157  * Free the reserved descriptor. This operation is "symmetrical" to
1158  * vxge_hw_ring_rxd_reserve. The "free-ing" completes the descriptor's
1159  * lifecycle.
1160  *
1161  * After free-ing (see vxge_hw_ring_rxd_free()) the descriptor again can
1162  * be:
1163  *
1164  * - reserved (vxge_hw_ring_rxd_reserve);
1165  *
1166  * - posted     (vxge_hw_ring_rxd_post);
1167  *
1168  * - completed (vxge_hw_ring_rxd_next_completed);
1169  *
1170  * - and recycled again (vxge_hw_ring_rxd_free).
1171  *
1172  * For alternative state transitions and more details please refer to
1173  * the design doc.
1174  *
1175  */
1176 void vxge_hw_ring_rxd_free(struct __vxge_hw_ring *ring, void *rxdh)
1177 {
1178         struct __vxge_hw_channel *channel;
1179
1180         channel = &ring->channel;
1181
1182         vxge_hw_channel_dtr_free(channel, rxdh);
1183
1184 }
1185
1186 /**
1187  * vxge_hw_ring_rxd_pre_post - Prepare rxd and post
1188  * @ring: Handle to the ring object used for receive
1189  * @rxdh: Descriptor handle.
1190  *
1191  * This routine prepares a rxd and posts
1192  */
1193 void vxge_hw_ring_rxd_pre_post(struct __vxge_hw_ring *ring, void *rxdh)
1194 {
1195         struct __vxge_hw_channel *channel;
1196
1197         channel = &ring->channel;
1198
1199         vxge_hw_channel_dtr_post(channel, rxdh);
1200 }
1201
1202 /**
1203  * vxge_hw_ring_rxd_post_post - Process rxd after post.
1204  * @ring: Handle to the ring object used for receive
1205  * @rxdh: Descriptor handle.
1206  *
1207  * Processes rxd after post
1208  */
1209 void vxge_hw_ring_rxd_post_post(struct __vxge_hw_ring *ring, void *rxdh)
1210 {
1211         struct vxge_hw_ring_rxd_1 *rxdp = (struct vxge_hw_ring_rxd_1 *)rxdh;
1212         struct __vxge_hw_channel *channel;
1213
1214         channel = &ring->channel;
1215
1216         rxdp->control_0 = VXGE_HW_RING_RXD_LIST_OWN_ADAPTER;
1217
1218         if (ring->stats->common_stats.usage_cnt > 0)
1219                 ring->stats->common_stats.usage_cnt--;
1220 }
1221
1222 /**
1223  * vxge_hw_ring_rxd_post - Post descriptor on the ring.
1224  * @ring: Handle to the ring object used for receive
1225  * @rxdh: Descriptor obtained via vxge_hw_ring_rxd_reserve().
1226  *
1227  * Post descriptor on the ring.
1228  * Prior to posting the descriptor should be filled in accordance with
1229  * Host/Titan interface specification for a given service (LL, etc.).
1230  *
1231  */
1232 void vxge_hw_ring_rxd_post(struct __vxge_hw_ring *ring, void *rxdh)
1233 {
1234         struct vxge_hw_ring_rxd_1 *rxdp = (struct vxge_hw_ring_rxd_1 *)rxdh;
1235         struct __vxge_hw_channel *channel;
1236
1237         channel = &ring->channel;
1238
1239         wmb();
1240         rxdp->control_0 = VXGE_HW_RING_RXD_LIST_OWN_ADAPTER;
1241
1242         vxge_hw_channel_dtr_post(channel, rxdh);
1243
1244         if (ring->stats->common_stats.usage_cnt > 0)
1245                 ring->stats->common_stats.usage_cnt--;
1246 }
1247
1248 /**
1249  * vxge_hw_ring_rxd_post_post_wmb - Process rxd after post with memory barrier.
1250  * @ring: Handle to the ring object used for receive
1251  * @rxdh: Descriptor handle.
1252  *
1253  * Processes rxd after post with memory barrier.
1254  */
1255 void vxge_hw_ring_rxd_post_post_wmb(struct __vxge_hw_ring *ring, void *rxdh)
1256 {
1257         wmb();
1258         vxge_hw_ring_rxd_post_post(ring, rxdh);
1259 }
1260
1261 /**
1262  * vxge_hw_ring_rxd_next_completed - Get the _next_ completed descriptor.
1263  * @ring: Handle to the ring object used for receive
1264  * @rxdh: Descriptor handle. Returned by HW.
1265  * @t_code:     Transfer code, as per Titan User Guide,
1266  *       Receive Descriptor Format. Returned by HW.
1267  *
1268  * Retrieve the _next_ completed descriptor.
1269  * HW uses ring callback (*vxge_hw_ring_callback_f) to notifiy
1270  * driver of new completed descriptors. After that
1271  * the driver can use vxge_hw_ring_rxd_next_completed to retrieve the rest
1272  * completions (the very first completion is passed by HW via
1273  * vxge_hw_ring_callback_f).
1274  *
1275  * Implementation-wise, the driver is free to call
1276  * vxge_hw_ring_rxd_next_completed either immediately from inside the
1277  * ring callback, or in a deferred fashion and separate (from HW)
1278  * context.
1279  *
1280  * Non-zero @t_code means failure to fill-in receive buffer(s)
1281  * of the descriptor.
1282  * For instance, parity error detected during the data transfer.
1283  * In this case Titan will complete the descriptor and indicate
1284  * for the host that the received data is not to be used.
1285  * For details please refer to Titan User Guide.
1286  *
1287  * Returns: VXGE_HW_OK - success.
1288  * VXGE_HW_INF_NO_MORE_COMPLETED_DESCRIPTORS - No completed descriptors
1289  * are currently available for processing.
1290  *
1291  * See also: vxge_hw_ring_callback_f{},
1292  * vxge_hw_fifo_rxd_next_completed(), enum vxge_hw_status{}.
1293  */
1294 enum vxge_hw_status vxge_hw_ring_rxd_next_completed(
1295         struct __vxge_hw_ring *ring, void **rxdh, u8 *t_code)
1296 {
1297         struct __vxge_hw_channel *channel;
1298         struct vxge_hw_ring_rxd_1 *rxdp;
1299         enum vxge_hw_status status = VXGE_HW_OK;
1300         u64 control_0, own;
1301
1302         channel = &ring->channel;
1303
1304         vxge_hw_channel_dtr_try_complete(channel, rxdh);
1305
1306         rxdp = *rxdh;
1307         if (rxdp == NULL) {
1308                 status = VXGE_HW_INF_NO_MORE_COMPLETED_DESCRIPTORS;
1309                 goto exit;
1310         }
1311
1312         control_0 = rxdp->control_0;
1313         own = control_0 & VXGE_HW_RING_RXD_LIST_OWN_ADAPTER;
1314         *t_code = (u8)VXGE_HW_RING_RXD_T_CODE_GET(control_0);
1315
1316         /* check whether it is not the end */
1317         if (!own || *t_code == VXGE_HW_RING_T_CODE_FRM_DROP) {
1318
1319                 vxge_assert((rxdp)->host_control !=
1320                                 0);
1321
1322                 ++ring->cmpl_cnt;
1323                 vxge_hw_channel_dtr_complete(channel);
1324
1325                 vxge_assert(*t_code != VXGE_HW_RING_RXD_T_CODE_UNUSED);
1326
1327                 ring->stats->common_stats.usage_cnt++;
1328                 if (ring->stats->common_stats.usage_max <
1329                                 ring->stats->common_stats.usage_cnt)
1330                         ring->stats->common_stats.usage_max =
1331                                 ring->stats->common_stats.usage_cnt;
1332
1333                 status = VXGE_HW_OK;
1334                 goto exit;
1335         }
1336
1337         /* reset it. since we don't want to return
1338          * garbage to the driver */
1339         *rxdh = NULL;
1340         status = VXGE_HW_INF_NO_MORE_COMPLETED_DESCRIPTORS;
1341 exit:
1342         return status;
1343 }
1344
1345 /**
1346  * vxge_hw_ring_handle_tcode - Handle transfer code.
1347  * @ring: Handle to the ring object used for receive
1348  * @rxdh: Descriptor handle.
1349  * @t_code: One of the enumerated (and documented in the Titan user guide)
1350  * "transfer codes".
1351  *
1352  * Handle descriptor's transfer code. The latter comes with each completed
1353  * descriptor.
1354  *
1355  * Returns: one of the enum vxge_hw_status{} enumerated types.
1356  * VXGE_HW_OK                   - for success.
1357  * VXGE_HW_ERR_CRITICAL         - when encounters critical error.
1358  */
1359 enum vxge_hw_status vxge_hw_ring_handle_tcode(
1360         struct __vxge_hw_ring *ring, void *rxdh, u8 t_code)
1361 {
1362         struct __vxge_hw_channel *channel;
1363         enum vxge_hw_status status = VXGE_HW_OK;
1364
1365         channel = &ring->channel;
1366
1367         /* If the t_code is not supported and if the
1368          * t_code is other than 0x5 (unparseable packet
1369          * such as unknown UPV6 header), Drop it !!!
1370          */
1371
1372         if (t_code ==  VXGE_HW_RING_T_CODE_OK ||
1373                 t_code == VXGE_HW_RING_T_CODE_L3_PKT_ERR) {
1374                 status = VXGE_HW_OK;
1375                 goto exit;
1376         }
1377
1378         if (t_code > VXGE_HW_RING_T_CODE_MULTI_ERR) {
1379                 status = VXGE_HW_ERR_INVALID_TCODE;
1380                 goto exit;
1381         }
1382
1383         ring->stats->rxd_t_code_err_cnt[t_code]++;
1384 exit:
1385         return status;
1386 }
1387
1388 /**
1389  * __vxge_hw_non_offload_db_post - Post non offload doorbell
1390  *
1391  * @fifo: fifohandle
1392  * @txdl_ptr: The starting location of the TxDL in host memory
1393  * @num_txds: The highest TxD in this TxDL (0 to 255 means 1 to 256)
1394  * @no_snoop: No snoop flags
1395  *
1396  * This function posts a non-offload doorbell to doorbell FIFO
1397  *
1398  */
1399 static void __vxge_hw_non_offload_db_post(struct __vxge_hw_fifo *fifo,
1400         u64 txdl_ptr, u32 num_txds, u32 no_snoop)
1401 {
1402         struct __vxge_hw_channel *channel;
1403
1404         channel = &fifo->channel;
1405
1406         writeq(VXGE_HW_NODBW_TYPE(VXGE_HW_NODBW_TYPE_NODBW) |
1407                 VXGE_HW_NODBW_LAST_TXD_NUMBER(num_txds) |
1408                 VXGE_HW_NODBW_GET_NO_SNOOP(no_snoop),
1409                 &fifo->nofl_db->control_0);
1410
1411         mmiowb();
1412
1413         writeq(txdl_ptr, &fifo->nofl_db->txdl_ptr);
1414
1415         mmiowb();
1416 }
1417
1418 /**
1419  * vxge_hw_fifo_free_txdl_count_get - returns the number of txdls available in
1420  * the fifo
1421  * @fifoh: Handle to the fifo object used for non offload send
1422  */
1423 u32 vxge_hw_fifo_free_txdl_count_get(struct __vxge_hw_fifo *fifoh)
1424 {
1425         return vxge_hw_channel_dtr_count(&fifoh->channel);
1426 }
1427
1428 /**
1429  * vxge_hw_fifo_txdl_reserve - Reserve fifo descriptor.
1430  * @fifoh: Handle to the fifo object used for non offload send
1431  * @txdlh: Reserved descriptor. On success HW fills this "out" parameter
1432  *        with a valid handle.
1433  * @txdl_priv: Buffer to return the pointer to per txdl space
1434  *
1435  * Reserve a single TxDL (that is, fifo descriptor)
1436  * for the subsequent filling-in by driver)
1437  * and posting on the corresponding channel (@channelh)
1438  * via vxge_hw_fifo_txdl_post().
1439  *
1440  * Note: it is the responsibility of driver to reserve multiple descriptors
1441  * for lengthy (e.g., LSO) transmit operation. A single fifo descriptor
1442  * carries up to configured number (fifo.max_frags) of contiguous buffers.
1443  *
1444  * Returns: VXGE_HW_OK - success;
1445  * VXGE_HW_INF_OUT_OF_DESCRIPTORS - Currently no descriptors available
1446  *
1447  */
1448 enum vxge_hw_status vxge_hw_fifo_txdl_reserve(
1449         struct __vxge_hw_fifo *fifo,
1450         void **txdlh, void **txdl_priv)
1451 {
1452         struct __vxge_hw_channel *channel;
1453         enum vxge_hw_status status;
1454         int i;
1455
1456         channel = &fifo->channel;
1457
1458         status = vxge_hw_channel_dtr_alloc(channel, txdlh);
1459
1460         if (status == VXGE_HW_OK) {
1461                 struct vxge_hw_fifo_txd *txdp =
1462                         (struct vxge_hw_fifo_txd *)*txdlh;
1463                 struct __vxge_hw_fifo_txdl_priv *priv;
1464
1465                 priv = __vxge_hw_fifo_txdl_priv(fifo, txdp);
1466
1467                 /* reset the TxDL's private */
1468                 priv->align_dma_offset = 0;
1469                 priv->align_vaddr_start = priv->align_vaddr;
1470                 priv->align_used_frags = 0;
1471                 priv->frags = 0;
1472                 priv->alloc_frags = fifo->config->max_frags;
1473                 priv->next_txdl_priv = NULL;
1474
1475                 *txdl_priv = (void *)(size_t)txdp->host_control;
1476
1477                 for (i = 0; i < fifo->config->max_frags; i++) {
1478                         txdp = ((struct vxge_hw_fifo_txd *)*txdlh) + i;
1479                         txdp->control_0 = txdp->control_1 = 0;
1480                 }
1481         }
1482
1483         return status;
1484 }
1485
1486 /**
1487  * vxge_hw_fifo_txdl_buffer_set - Set transmit buffer pointer in the
1488  * descriptor.
1489  * @fifo: Handle to the fifo object used for non offload send
1490  * @txdlh: Descriptor handle.
1491  * @frag_idx: Index of the data buffer in the caller's scatter-gather list
1492  *            (of buffers).
1493  * @dma_pointer: DMA address of the data buffer referenced by @frag_idx.
1494  * @size: Size of the data buffer (in bytes).
1495  *
1496  * This API is part of the preparation of the transmit descriptor for posting
1497  * (via vxge_hw_fifo_txdl_post()). The related "preparation" APIs include
1498  * vxge_hw_fifo_txdl_mss_set() and vxge_hw_fifo_txdl_cksum_set_bits().
1499  * All three APIs fill in the fields of the fifo descriptor,
1500  * in accordance with the Titan specification.
1501  *
1502  */
1503 void vxge_hw_fifo_txdl_buffer_set(struct __vxge_hw_fifo *fifo,
1504                                   void *txdlh, u32 frag_idx,
1505                                   dma_addr_t dma_pointer, u32 size)
1506 {
1507         struct __vxge_hw_fifo_txdl_priv *txdl_priv;
1508         struct vxge_hw_fifo_txd *txdp, *txdp_last;
1509         struct __vxge_hw_channel *channel;
1510
1511         channel = &fifo->channel;
1512
1513         txdl_priv = __vxge_hw_fifo_txdl_priv(fifo, txdlh);
1514         txdp = (struct vxge_hw_fifo_txd *)txdlh  +  txdl_priv->frags;
1515
1516         if (frag_idx != 0)
1517                 txdp->control_0 = txdp->control_1 = 0;
1518         else {
1519                 txdp->control_0 |= VXGE_HW_FIFO_TXD_GATHER_CODE(
1520                         VXGE_HW_FIFO_TXD_GATHER_CODE_FIRST);
1521                 txdp->control_1 |= fifo->interrupt_type;
1522                 txdp->control_1 |= VXGE_HW_FIFO_TXD_INT_NUMBER(
1523                         fifo->tx_intr_num);
1524                 if (txdl_priv->frags) {
1525                         txdp_last = (struct vxge_hw_fifo_txd *)txdlh  +
1526                         (txdl_priv->frags - 1);
1527                         txdp_last->control_0 |= VXGE_HW_FIFO_TXD_GATHER_CODE(
1528                                 VXGE_HW_FIFO_TXD_GATHER_CODE_LAST);
1529                 }
1530         }
1531
1532         vxge_assert(frag_idx < txdl_priv->alloc_frags);
1533
1534         txdp->buffer_pointer = (u64)dma_pointer;
1535         txdp->control_0 |= VXGE_HW_FIFO_TXD_BUFFER_SIZE(size);
1536         fifo->stats->total_buffers++;
1537         txdl_priv->frags++;
1538 }
1539
1540 /**
1541  * vxge_hw_fifo_txdl_post - Post descriptor on the fifo channel.
1542  * @fifo: Handle to the fifo object used for non offload send
1543  * @txdlh: Descriptor obtained via vxge_hw_fifo_txdl_reserve()
1544  * @frags: Number of contiguous buffers that are part of a single
1545  *         transmit operation.
1546  *
1547  * Post descriptor on the 'fifo' type channel for transmission.
1548  * Prior to posting the descriptor should be filled in accordance with
1549  * Host/Titan interface specification for a given service (LL, etc.).
1550  *
1551  */
1552 void vxge_hw_fifo_txdl_post(struct __vxge_hw_fifo *fifo, void *txdlh)
1553 {
1554         struct __vxge_hw_fifo_txdl_priv *txdl_priv;
1555         struct vxge_hw_fifo_txd *txdp_last;
1556         struct vxge_hw_fifo_txd *txdp_first;
1557         struct __vxge_hw_channel *channel;
1558
1559         channel = &fifo->channel;
1560
1561         txdl_priv = __vxge_hw_fifo_txdl_priv(fifo, txdlh);
1562         txdp_first = txdlh;
1563
1564         txdp_last = (struct vxge_hw_fifo_txd *)txdlh  +  (txdl_priv->frags - 1);
1565         txdp_last->control_0 |=
1566               VXGE_HW_FIFO_TXD_GATHER_CODE(VXGE_HW_FIFO_TXD_GATHER_CODE_LAST);
1567         txdp_first->control_0 |= VXGE_HW_FIFO_TXD_LIST_OWN_ADAPTER;
1568
1569         vxge_hw_channel_dtr_post(&fifo->channel, txdlh);
1570
1571         __vxge_hw_non_offload_db_post(fifo,
1572                 (u64)txdl_priv->dma_addr,
1573                 txdl_priv->frags - 1,
1574                 fifo->no_snoop_bits);
1575
1576         fifo->stats->total_posts++;
1577         fifo->stats->common_stats.usage_cnt++;
1578         if (fifo->stats->common_stats.usage_max <
1579                 fifo->stats->common_stats.usage_cnt)
1580                 fifo->stats->common_stats.usage_max =
1581                         fifo->stats->common_stats.usage_cnt;
1582 }
1583
1584 /**
1585  * vxge_hw_fifo_txdl_next_completed - Retrieve next completed descriptor.
1586  * @fifo: Handle to the fifo object used for non offload send
1587  * @txdlh: Descriptor handle. Returned by HW.
1588  * @t_code: Transfer code, as per Titan User Guide,
1589  *          Transmit Descriptor Format.
1590  *          Returned by HW.
1591  *
1592  * Retrieve the _next_ completed descriptor.
1593  * HW uses channel callback (*vxge_hw_channel_callback_f) to notifiy
1594  * driver of new completed descriptors. After that
1595  * the driver can use vxge_hw_fifo_txdl_next_completed to retrieve the rest
1596  * completions (the very first completion is passed by HW via
1597  * vxge_hw_channel_callback_f).
1598  *
1599  * Implementation-wise, the driver is free to call
1600  * vxge_hw_fifo_txdl_next_completed either immediately from inside the
1601  * channel callback, or in a deferred fashion and separate (from HW)
1602  * context.
1603  *
1604  * Non-zero @t_code means failure to process the descriptor.
1605  * The failure could happen, for instance, when the link is
1606  * down, in which case Titan completes the descriptor because it
1607  * is not able to send the data out.
1608  *
1609  * For details please refer to Titan User Guide.
1610  *
1611  * Returns: VXGE_HW_OK - success.
1612  * VXGE_HW_INF_NO_MORE_COMPLETED_DESCRIPTORS - No completed descriptors
1613  * are currently available for processing.
1614  *
1615  */
1616 enum vxge_hw_status vxge_hw_fifo_txdl_next_completed(
1617         struct __vxge_hw_fifo *fifo, void **txdlh,
1618         enum vxge_hw_fifo_tcode *t_code)
1619 {
1620         struct __vxge_hw_channel *channel;
1621         struct vxge_hw_fifo_txd *txdp;
1622         enum vxge_hw_status status = VXGE_HW_OK;
1623
1624         channel = &fifo->channel;
1625
1626         vxge_hw_channel_dtr_try_complete(channel, txdlh);
1627
1628         txdp = *txdlh;
1629         if (txdp == NULL) {
1630                 status = VXGE_HW_INF_NO_MORE_COMPLETED_DESCRIPTORS;
1631                 goto exit;
1632         }
1633
1634         /* check whether host owns it */
1635         if (!(txdp->control_0 & VXGE_HW_FIFO_TXD_LIST_OWN_ADAPTER)) {
1636
1637                 vxge_assert(txdp->host_control != 0);
1638
1639                 vxge_hw_channel_dtr_complete(channel);
1640
1641                 *t_code = (u8)VXGE_HW_FIFO_TXD_T_CODE_GET(txdp->control_0);
1642
1643                 if (fifo->stats->common_stats.usage_cnt > 0)
1644                         fifo->stats->common_stats.usage_cnt--;
1645
1646                 status = VXGE_HW_OK;
1647                 goto exit;
1648         }
1649
1650         /* no more completions */
1651         *txdlh = NULL;
1652         status = VXGE_HW_INF_NO_MORE_COMPLETED_DESCRIPTORS;
1653 exit:
1654         return status;
1655 }
1656
1657 /**
1658  * vxge_hw_fifo_handle_tcode - Handle transfer code.
1659  * @fifo: Handle to the fifo object used for non offload send
1660  * @txdlh: Descriptor handle.
1661  * @t_code: One of the enumerated (and documented in the Titan user guide)
1662  *          "transfer codes".
1663  *
1664  * Handle descriptor's transfer code. The latter comes with each completed
1665  * descriptor.
1666  *
1667  * Returns: one of the enum vxge_hw_status{} enumerated types.
1668  * VXGE_HW_OK - for success.
1669  * VXGE_HW_ERR_CRITICAL - when encounters critical error.
1670  */
1671 enum vxge_hw_status vxge_hw_fifo_handle_tcode(struct __vxge_hw_fifo *fifo,
1672                                               void *txdlh,
1673                                               enum vxge_hw_fifo_tcode t_code)
1674 {
1675         struct __vxge_hw_channel *channel;
1676
1677         enum vxge_hw_status status = VXGE_HW_OK;
1678         channel = &fifo->channel;
1679
1680         if (((t_code & 0x7) < 0) || ((t_code & 0x7) > 0x4)) {
1681                 status = VXGE_HW_ERR_INVALID_TCODE;
1682                 goto exit;
1683         }
1684
1685         fifo->stats->txd_t_code_err_cnt[t_code]++;
1686 exit:
1687         return status;
1688 }
1689
1690 /**
1691  * vxge_hw_fifo_txdl_free - Free descriptor.
1692  * @fifo: Handle to the fifo object used for non offload send
1693  * @txdlh: Descriptor handle.
1694  *
1695  * Free the reserved descriptor. This operation is "symmetrical" to
1696  * vxge_hw_fifo_txdl_reserve. The "free-ing" completes the descriptor's
1697  * lifecycle.
1698  *
1699  * After free-ing (see vxge_hw_fifo_txdl_free()) the descriptor again can
1700  * be:
1701  *
1702  * - reserved (vxge_hw_fifo_txdl_reserve);
1703  *
1704  * - posted (vxge_hw_fifo_txdl_post);
1705  *
1706  * - completed (vxge_hw_fifo_txdl_next_completed);
1707  *
1708  * - and recycled again (vxge_hw_fifo_txdl_free).
1709  *
1710  * For alternative state transitions and more details please refer to
1711  * the design doc.
1712  *
1713  */
1714 void vxge_hw_fifo_txdl_free(struct __vxge_hw_fifo *fifo, void *txdlh)
1715 {
1716         struct __vxge_hw_fifo_txdl_priv *txdl_priv;
1717         u32 max_frags;
1718         struct __vxge_hw_channel *channel;
1719
1720         channel = &fifo->channel;
1721
1722         txdl_priv = __vxge_hw_fifo_txdl_priv(fifo,
1723                         (struct vxge_hw_fifo_txd *)txdlh);
1724
1725         max_frags = fifo->config->max_frags;
1726
1727         vxge_hw_channel_dtr_free(channel, txdlh);
1728 }
1729
1730 /**
1731  * vxge_hw_vpath_mac_addr_add - Add the mac address entry for this vpath
1732  *               to MAC address table.
1733  * @vp: Vpath handle.
1734  * @macaddr: MAC address to be added for this vpath into the list
1735  * @macaddr_mask: MAC address mask for macaddr
1736  * @duplicate_mode: Duplicate MAC address add mode. Please see
1737  *             enum vxge_hw_vpath_mac_addr_add_mode{}
1738  *
1739  * Adds the given mac address and mac address mask into the list for this
1740  * vpath.
1741  * see also: vxge_hw_vpath_mac_addr_delete, vxge_hw_vpath_mac_addr_get and
1742  * vxge_hw_vpath_mac_addr_get_next
1743  *
1744  */
1745 enum vxge_hw_status
1746 vxge_hw_vpath_mac_addr_add(
1747         struct __vxge_hw_vpath_handle *vp,
1748         u8 (macaddr)[ETH_ALEN],
1749         u8 (macaddr_mask)[ETH_ALEN],
1750         enum vxge_hw_vpath_mac_addr_add_mode duplicate_mode)
1751 {
1752         u32 i;
1753         u64 data1 = 0ULL;
1754         u64 data2 = 0ULL;
1755         enum vxge_hw_status status = VXGE_HW_OK;
1756
1757         if (vp == NULL) {
1758                 status = VXGE_HW_ERR_INVALID_HANDLE;
1759                 goto exit;
1760         }
1761
1762         for (i = 0; i < ETH_ALEN; i++) {
1763                 data1 <<= 8;
1764                 data1 |= (u8)macaddr[i];
1765
1766                 data2 <<= 8;
1767                 data2 |= (u8)macaddr_mask[i];
1768         }
1769
1770         switch (duplicate_mode) {
1771         case VXGE_HW_VPATH_MAC_ADDR_ADD_DUPLICATE:
1772                 i = 0;
1773                 break;
1774         case VXGE_HW_VPATH_MAC_ADDR_DISCARD_DUPLICATE:
1775                 i = 1;
1776                 break;
1777         case VXGE_HW_VPATH_MAC_ADDR_REPLACE_DUPLICATE:
1778                 i = 2;
1779                 break;
1780         default:
1781                 i = 0;
1782                 break;
1783         }
1784
1785         status = __vxge_hw_vpath_rts_table_set(vp,
1786                         VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_ADD_ENTRY,
1787                         VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DA,
1788                         0,
1789                         VXGE_HW_RTS_ACCESS_STEER_DATA0_DA_MAC_ADDR(data1),
1790                         VXGE_HW_RTS_ACCESS_STEER_DATA1_DA_MAC_ADDR_MASK(data2)|
1791                         VXGE_HW_RTS_ACCESS_STEER_DATA1_DA_MAC_ADDR_MODE(i));
1792 exit:
1793         return status;
1794 }
1795
1796 /**
1797  * vxge_hw_vpath_mac_addr_get - Get the first mac address entry for this vpath
1798  *               from MAC address table.
1799  * @vp: Vpath handle.
1800  * @macaddr: First MAC address entry for this vpath in the list
1801  * @macaddr_mask: MAC address mask for macaddr
1802  *
1803  * Returns the first mac address and mac address mask in the list for this
1804  * vpath.
1805  * see also: vxge_hw_vpath_mac_addr_get_next
1806  *
1807  */
1808 enum vxge_hw_status
1809 vxge_hw_vpath_mac_addr_get(
1810         struct __vxge_hw_vpath_handle *vp,
1811         u8 (macaddr)[ETH_ALEN],
1812         u8 (macaddr_mask)[ETH_ALEN])
1813 {
1814         u32 i;
1815         u64 data1 = 0ULL;
1816         u64 data2 = 0ULL;
1817         enum vxge_hw_status status = VXGE_HW_OK;
1818
1819         if (vp == NULL) {
1820                 status = VXGE_HW_ERR_INVALID_HANDLE;
1821                 goto exit;
1822         }
1823
1824         status = __vxge_hw_vpath_rts_table_get(vp,
1825                         VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_LIST_FIRST_ENTRY,
1826                         VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DA,
1827                         0, &data1, &data2);
1828
1829         if (status != VXGE_HW_OK)
1830                 goto exit;
1831
1832         data1 = VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_DA_MAC_ADDR(data1);
1833
1834         data2 = VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_DA_MAC_ADDR_MASK(data2);
1835
1836         for (i = ETH_ALEN; i > 0; i--) {
1837                 macaddr[i-1] = (u8)(data1 & 0xFF);
1838                 data1 >>= 8;
1839
1840                 macaddr_mask[i-1] = (u8)(data2 & 0xFF);
1841                 data2 >>= 8;
1842         }
1843 exit:
1844         return status;
1845 }
1846
1847 /**
1848  * vxge_hw_vpath_mac_addr_get_next - Get the next mac address entry for this
1849  * vpath
1850  *               from MAC address table.
1851  * @vp: Vpath handle.
1852  * @macaddr: Next MAC address entry for this vpath in the list
1853  * @macaddr_mask: MAC address mask for macaddr
1854  *
1855  * Returns the next mac address and mac address mask in the list for this
1856  * vpath.
1857  * see also: vxge_hw_vpath_mac_addr_get
1858  *
1859  */
1860 enum vxge_hw_status
1861 vxge_hw_vpath_mac_addr_get_next(
1862         struct __vxge_hw_vpath_handle *vp,
1863         u8 (macaddr)[ETH_ALEN],
1864         u8 (macaddr_mask)[ETH_ALEN])
1865 {
1866         u32 i;
1867         u64 data1 = 0ULL;
1868         u64 data2 = 0ULL;
1869         enum vxge_hw_status status = VXGE_HW_OK;
1870
1871         if (vp == NULL) {
1872                 status = VXGE_HW_ERR_INVALID_HANDLE;
1873                 goto exit;
1874         }
1875
1876         status = __vxge_hw_vpath_rts_table_get(vp,
1877                         VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_LIST_NEXT_ENTRY,
1878                         VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DA,
1879                         0, &data1, &data2);
1880
1881         if (status != VXGE_HW_OK)
1882                 goto exit;
1883
1884         data1 = VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_DA_MAC_ADDR(data1);
1885
1886         data2 = VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_DA_MAC_ADDR_MASK(data2);
1887
1888         for (i = ETH_ALEN; i > 0; i--) {
1889                 macaddr[i-1] = (u8)(data1 & 0xFF);
1890                 data1 >>= 8;
1891
1892                 macaddr_mask[i-1] = (u8)(data2 & 0xFF);
1893                 data2 >>= 8;
1894         }
1895
1896 exit:
1897         return status;
1898 }
1899
1900 /**
1901  * vxge_hw_vpath_mac_addr_delete - Delete the mac address entry for this vpath
1902  *               to MAC address table.
1903  * @vp: Vpath handle.
1904  * @macaddr: MAC address to be added for this vpath into the list
1905  * @macaddr_mask: MAC address mask for macaddr
1906  *
1907  * Delete the given mac address and mac address mask into the list for this
1908  * vpath.
1909  * see also: vxge_hw_vpath_mac_addr_add, vxge_hw_vpath_mac_addr_get and
1910  * vxge_hw_vpath_mac_addr_get_next
1911  *
1912  */
1913 enum vxge_hw_status
1914 vxge_hw_vpath_mac_addr_delete(
1915         struct __vxge_hw_vpath_handle *vp,
1916         u8 (macaddr)[ETH_ALEN],
1917         u8 (macaddr_mask)[ETH_ALEN])
1918 {
1919         u32 i;
1920         u64 data1 = 0ULL;
1921         u64 data2 = 0ULL;
1922         enum vxge_hw_status status = VXGE_HW_OK;
1923
1924         if (vp == NULL) {
1925                 status = VXGE_HW_ERR_INVALID_HANDLE;
1926                 goto exit;
1927         }
1928
1929         for (i = 0; i < ETH_ALEN; i++) {
1930                 data1 <<= 8;
1931                 data1 |= (u8)macaddr[i];
1932
1933                 data2 <<= 8;
1934                 data2 |= (u8)macaddr_mask[i];
1935         }
1936
1937         status = __vxge_hw_vpath_rts_table_set(vp,
1938                         VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_DELETE_ENTRY,
1939                         VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DA,
1940                         0,
1941                         VXGE_HW_RTS_ACCESS_STEER_DATA0_DA_MAC_ADDR(data1),
1942                         VXGE_HW_RTS_ACCESS_STEER_DATA1_DA_MAC_ADDR_MASK(data2));
1943 exit:
1944         return status;
1945 }
1946
1947 /**
1948  * vxge_hw_vpath_vid_add - Add the vlan id entry for this vpath
1949  *               to vlan id table.
1950  * @vp: Vpath handle.
1951  * @vid: vlan id to be added for this vpath into the list
1952  *
1953  * Adds the given vlan id into the list for this  vpath.
1954  * see also: vxge_hw_vpath_vid_delete
1955  *
1956  */
1957 enum vxge_hw_status
1958 vxge_hw_vpath_vid_add(struct __vxge_hw_vpath_handle *vp, u64 vid)
1959 {
1960         enum vxge_hw_status status = VXGE_HW_OK;
1961
1962         if (vp == NULL) {
1963                 status = VXGE_HW_ERR_INVALID_HANDLE;
1964                 goto exit;
1965         }
1966
1967         status = __vxge_hw_vpath_rts_table_set(vp,
1968                         VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_ADD_ENTRY,
1969                         VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_VID,
1970                         0, VXGE_HW_RTS_ACCESS_STEER_DATA0_VLAN_ID(vid), 0);
1971 exit:
1972         return status;
1973 }
1974
1975 /**
1976  * vxge_hw_vpath_vid_delete - Delete the vlan id entry for this vpath
1977  *               to vlan id table.
1978  * @vp: Vpath handle.
1979  * @vid: vlan id to be added for this vpath into the list
1980  *
1981  * Adds the given vlan id into the list for this  vpath.
1982  * see also: vxge_hw_vpath_vid_add
1983  *
1984  */
1985 enum vxge_hw_status
1986 vxge_hw_vpath_vid_delete(struct __vxge_hw_vpath_handle *vp, u64 vid)
1987 {
1988         enum vxge_hw_status status = VXGE_HW_OK;
1989
1990         if (vp == NULL) {
1991                 status = VXGE_HW_ERR_INVALID_HANDLE;
1992                 goto exit;
1993         }
1994
1995         status = __vxge_hw_vpath_rts_table_set(vp,
1996                         VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_DELETE_ENTRY,
1997                         VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_VID,
1998                         0, VXGE_HW_RTS_ACCESS_STEER_DATA0_VLAN_ID(vid), 0);
1999 exit:
2000         return status;
2001 }
2002
2003 /**
2004  * vxge_hw_vpath_promisc_enable - Enable promiscuous mode.
2005  * @vp: Vpath handle.
2006  *
2007  * Enable promiscuous mode of Titan-e operation.
2008  *
2009  * See also: vxge_hw_vpath_promisc_disable().
2010  */
2011 enum vxge_hw_status vxge_hw_vpath_promisc_enable(
2012                         struct __vxge_hw_vpath_handle *vp)
2013 {
2014         u64 val64;
2015         struct __vxge_hw_virtualpath *vpath;
2016         enum vxge_hw_status status = VXGE_HW_OK;
2017
2018         if ((vp == NULL) || (vp->vpath->ringh == NULL)) {
2019                 status = VXGE_HW_ERR_INVALID_HANDLE;
2020                 goto exit;
2021         }
2022
2023         vpath = vp->vpath;
2024
2025         /* Enable promiscuous mode for function 0 only */
2026         if (!(vpath->hldev->access_rights &
2027                 VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM))
2028                 return VXGE_HW_OK;
2029
2030         val64 = readq(&vpath->vp_reg->rxmac_vcfg0);
2031
2032         if (!(val64 & VXGE_HW_RXMAC_VCFG0_UCAST_ALL_ADDR_EN)) {
2033
2034                 val64 |= VXGE_HW_RXMAC_VCFG0_UCAST_ALL_ADDR_EN |
2035                          VXGE_HW_RXMAC_VCFG0_MCAST_ALL_ADDR_EN |
2036                          VXGE_HW_RXMAC_VCFG0_BCAST_EN |
2037                          VXGE_HW_RXMAC_VCFG0_ALL_VID_EN;
2038
2039                 writeq(val64, &vpath->vp_reg->rxmac_vcfg0);
2040         }
2041 exit:
2042         return status;
2043 }
2044
2045 /**
2046  * vxge_hw_vpath_promisc_disable - Disable promiscuous mode.
2047  * @vp: Vpath handle.
2048  *
2049  * Disable promiscuous mode of Titan-e operation.
2050  *
2051  * See also: vxge_hw_vpath_promisc_enable().
2052  */
2053 enum vxge_hw_status vxge_hw_vpath_promisc_disable(
2054                         struct __vxge_hw_vpath_handle *vp)
2055 {
2056         u64 val64;
2057         struct __vxge_hw_virtualpath *vpath;
2058         enum vxge_hw_status status = VXGE_HW_OK;
2059
2060         if ((vp == NULL) || (vp->vpath->ringh == NULL)) {
2061                 status = VXGE_HW_ERR_INVALID_HANDLE;
2062                 goto exit;
2063         }
2064
2065         vpath = vp->vpath;
2066
2067         val64 = readq(&vpath->vp_reg->rxmac_vcfg0);
2068
2069         if (val64 & VXGE_HW_RXMAC_VCFG0_UCAST_ALL_ADDR_EN) {
2070
2071                 val64 &= ~(VXGE_HW_RXMAC_VCFG0_UCAST_ALL_ADDR_EN |
2072                            VXGE_HW_RXMAC_VCFG0_MCAST_ALL_ADDR_EN |
2073                            VXGE_HW_RXMAC_VCFG0_ALL_VID_EN);
2074
2075                 writeq(val64, &vpath->vp_reg->rxmac_vcfg0);
2076         }
2077 exit:
2078         return status;
2079 }
2080
2081 /*
2082  * vxge_hw_vpath_bcast_enable - Enable broadcast
2083  * @vp: Vpath handle.
2084  *
2085  * Enable receiving broadcasts.
2086  */
2087 enum vxge_hw_status vxge_hw_vpath_bcast_enable(
2088                         struct __vxge_hw_vpath_handle *vp)
2089 {
2090         u64 val64;
2091         struct __vxge_hw_virtualpath *vpath;
2092         enum vxge_hw_status status = VXGE_HW_OK;
2093
2094         if ((vp == NULL) || (vp->vpath->ringh == NULL)) {
2095                 status = VXGE_HW_ERR_INVALID_HANDLE;
2096                 goto exit;
2097         }
2098
2099         vpath = vp->vpath;
2100
2101         val64 = readq(&vpath->vp_reg->rxmac_vcfg0);
2102
2103         if (!(val64 & VXGE_HW_RXMAC_VCFG0_BCAST_EN)) {
2104                 val64 |= VXGE_HW_RXMAC_VCFG0_BCAST_EN;
2105                 writeq(val64, &vpath->vp_reg->rxmac_vcfg0);
2106         }
2107 exit:
2108         return status;
2109 }
2110
2111 /**
2112  * vxge_hw_vpath_mcast_enable - Enable multicast addresses.
2113  * @vp: Vpath handle.
2114  *
2115  * Enable Titan-e multicast addresses.
2116  * Returns: VXGE_HW_OK on success.
2117  *
2118  */
2119 enum vxge_hw_status vxge_hw_vpath_mcast_enable(
2120                         struct __vxge_hw_vpath_handle *vp)
2121 {
2122         u64 val64;
2123         struct __vxge_hw_virtualpath *vpath;
2124         enum vxge_hw_status status = VXGE_HW_OK;
2125
2126         if ((vp == NULL) || (vp->vpath->ringh == NULL)) {
2127                 status = VXGE_HW_ERR_INVALID_HANDLE;
2128                 goto exit;
2129         }
2130
2131         vpath = vp->vpath;
2132
2133         val64 = readq(&vpath->vp_reg->rxmac_vcfg0);
2134
2135         if (!(val64 & VXGE_HW_RXMAC_VCFG0_MCAST_ALL_ADDR_EN)) {
2136                 val64 |= VXGE_HW_RXMAC_VCFG0_MCAST_ALL_ADDR_EN;
2137                 writeq(val64, &vpath->vp_reg->rxmac_vcfg0);
2138         }
2139 exit:
2140         return status;
2141 }
2142
2143 /**
2144  * vxge_hw_vpath_mcast_disable - Disable  multicast addresses.
2145  * @vp: Vpath handle.
2146  *
2147  * Disable Titan-e multicast addresses.
2148  * Returns: VXGE_HW_OK - success.
2149  * VXGE_HW_ERR_INVALID_HANDLE - Invalid handle
2150  *
2151  */
2152 enum vxge_hw_status
2153 vxge_hw_vpath_mcast_disable(struct __vxge_hw_vpath_handle *vp)
2154 {
2155         u64 val64;
2156         struct __vxge_hw_virtualpath *vpath;
2157         enum vxge_hw_status status = VXGE_HW_OK;
2158
2159         if ((vp == NULL) || (vp->vpath->ringh == NULL)) {
2160                 status = VXGE_HW_ERR_INVALID_HANDLE;
2161                 goto exit;
2162         }
2163
2164         vpath = vp->vpath;
2165
2166         val64 = readq(&vpath->vp_reg->rxmac_vcfg0);
2167
2168         if (val64 & VXGE_HW_RXMAC_VCFG0_MCAST_ALL_ADDR_EN) {
2169                 val64 &= ~VXGE_HW_RXMAC_VCFG0_MCAST_ALL_ADDR_EN;
2170                 writeq(val64, &vpath->vp_reg->rxmac_vcfg0);
2171         }
2172 exit:
2173         return status;
2174 }
2175
2176 /*
2177  * vxge_hw_vpath_alarm_process - Process Alarms.
2178  * @vpath: Virtual Path.
2179  * @skip_alarms: Do not clear the alarms
2180  *
2181  * Process vpath alarms.
2182  *
2183  */
2184 enum vxge_hw_status vxge_hw_vpath_alarm_process(
2185                         struct __vxge_hw_vpath_handle *vp,
2186                         u32 skip_alarms)
2187 {
2188         enum vxge_hw_status status = VXGE_HW_OK;
2189
2190         if (vp == NULL) {
2191                 status = VXGE_HW_ERR_INVALID_HANDLE;
2192                 goto exit;
2193         }
2194
2195         status = __vxge_hw_vpath_alarm_process(vp->vpath, skip_alarms);
2196 exit:
2197         return status;
2198 }
2199
2200 /**
2201  * vxge_hw_vpath_msix_set - Associate MSIX vectors with TIM interrupts and
2202  *                            alrms
2203  * @vp: Virtual Path handle.
2204  * @tim_msix_id: MSIX vectors associated with VXGE_HW_MAX_INTR_PER_VP number of
2205  *             interrupts(Can be repeated). If fifo or ring are not enabled
2206  *             the MSIX vector for that should be set to 0
2207  * @alarm_msix_id: MSIX vector for alarm.
2208  *
2209  * This API will associate a given MSIX vector numbers with the four TIM
2210  * interrupts and alarm interrupt.
2211  */
2212 void
2213 vxge_hw_vpath_msix_set(struct __vxge_hw_vpath_handle *vp, int *tim_msix_id,
2214                        int alarm_msix_id)
2215 {
2216         u64 val64;
2217         struct __vxge_hw_virtualpath *vpath = vp->vpath;
2218         struct vxge_hw_vpath_reg __iomem *vp_reg = vpath->vp_reg;
2219         u32 vp_id = vp->vpath->vp_id;
2220
2221         val64 =  VXGE_HW_INTERRUPT_CFG0_GROUP0_MSIX_FOR_TXTI(
2222                   (vp_id * 4) + tim_msix_id[0]) |
2223                  VXGE_HW_INTERRUPT_CFG0_GROUP1_MSIX_FOR_TXTI(
2224                   (vp_id * 4) + tim_msix_id[1]);
2225
2226         writeq(val64, &vp_reg->interrupt_cfg0);
2227
2228         writeq(VXGE_HW_INTERRUPT_CFG2_ALARM_MAP_TO_MSG(
2229                         (vpath->hldev->first_vp_id * 4) + alarm_msix_id),
2230                         &vp_reg->interrupt_cfg2);
2231
2232         if (vpath->hldev->config.intr_mode ==
2233                                         VXGE_HW_INTR_MODE_MSIX_ONE_SHOT) {
2234                 __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(
2235                                 VXGE_HW_ONE_SHOT_VECT0_EN_ONE_SHOT_VECT0_EN,
2236                                 0, 32), &vp_reg->one_shot_vect0_en);
2237                 __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(
2238                                 VXGE_HW_ONE_SHOT_VECT1_EN_ONE_SHOT_VECT1_EN,
2239                                 0, 32), &vp_reg->one_shot_vect1_en);
2240                 __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(
2241                                 VXGE_HW_ONE_SHOT_VECT2_EN_ONE_SHOT_VECT2_EN,
2242                                 0, 32), &vp_reg->one_shot_vect2_en);
2243         }
2244 }
2245
2246 /**
2247  * vxge_hw_vpath_msix_mask - Mask MSIX Vector.
2248  * @vp: Virtual Path handle.
2249  * @msix_id:  MSIX ID
2250  *
2251  * The function masks the msix interrupt for the given msix_id
2252  *
2253  * Returns: 0,
2254  * Otherwise, VXGE_HW_ERR_WRONG_IRQ if the msix index is out of range
2255  * status.
2256  * See also:
2257  */
2258 void
2259 vxge_hw_vpath_msix_mask(struct __vxge_hw_vpath_handle *vp, int msix_id)
2260 {
2261         struct __vxge_hw_device *hldev = vp->vpath->hldev;
2262         __vxge_hw_pio_mem_write32_upper(
2263                 (u32) vxge_bVALn(vxge_mBIT(msix_id  >> 2), 0, 32),
2264                 &hldev->common_reg->set_msix_mask_vect[msix_id % 4]);
2265 }
2266
2267 /**
2268  * vxge_hw_vpath_msix_clear - Clear MSIX Vector.
2269  * @vp: Virtual Path handle.
2270  * @msix_id:  MSI ID
2271  *
2272  * The function clears the msix interrupt for the given msix_id
2273  *
2274  * Returns: 0,
2275  * Otherwise, VXGE_HW_ERR_WRONG_IRQ if the msix index is out of range
2276  * status.
2277  * See also:
2278  */
2279 void vxge_hw_vpath_msix_clear(struct __vxge_hw_vpath_handle *vp, int msix_id)
2280 {
2281         struct __vxge_hw_device *hldev = vp->vpath->hldev;
2282
2283         if ((hldev->config.intr_mode == VXGE_HW_INTR_MODE_MSIX_ONE_SHOT))
2284                 __vxge_hw_pio_mem_write32_upper(
2285                         (u32) vxge_bVALn(vxge_mBIT((msix_id >> 2)), 0, 32),
2286                         &hldev->common_reg->clr_msix_one_shot_vec[msix_id % 4]);
2287         else
2288                 __vxge_hw_pio_mem_write32_upper(
2289                         (u32) vxge_bVALn(vxge_mBIT((msix_id >> 2)), 0, 32),
2290                         &hldev->common_reg->clear_msix_mask_vect[msix_id % 4]);
2291 }
2292
2293 /**
2294  * vxge_hw_vpath_msix_unmask - Unmask the MSIX Vector.
2295  * @vp: Virtual Path handle.
2296  * @msix_id:  MSI ID
2297  *
2298  * The function unmasks the msix interrupt for the given msix_id
2299  *
2300  * Returns: 0,
2301  * Otherwise, VXGE_HW_ERR_WRONG_IRQ if the msix index is out of range
2302  * status.
2303  * See also:
2304  */
2305 void
2306 vxge_hw_vpath_msix_unmask(struct __vxge_hw_vpath_handle *vp, int msix_id)
2307 {
2308         struct __vxge_hw_device *hldev = vp->vpath->hldev;
2309         __vxge_hw_pio_mem_write32_upper(
2310                         (u32)vxge_bVALn(vxge_mBIT(msix_id >> 2), 0, 32),
2311                         &hldev->common_reg->clear_msix_mask_vect[msix_id%4]);
2312 }
2313
2314 /**
2315  * vxge_hw_vpath_inta_mask_tx_rx - Mask Tx and Rx interrupts.
2316  * @vp: Virtual Path handle.
2317  *
2318  * Mask Tx and Rx vpath interrupts.
2319  *
2320  * See also: vxge_hw_vpath_inta_mask_tx_rx()
2321  */
2322 void vxge_hw_vpath_inta_mask_tx_rx(struct __vxge_hw_vpath_handle *vp)
2323 {
2324         u64     tim_int_mask0[4] = {[0 ...3] = 0};
2325         u32     tim_int_mask1[4] = {[0 ...3] = 0};
2326         u64     val64;
2327         struct __vxge_hw_device *hldev = vp->vpath->hldev;
2328
2329         VXGE_HW_DEVICE_TIM_INT_MASK_SET(tim_int_mask0,
2330                 tim_int_mask1, vp->vpath->vp_id);
2331
2332         val64 = readq(&hldev->common_reg->tim_int_mask0);
2333
2334         if ((tim_int_mask0[VXGE_HW_VPATH_INTR_TX] != 0) ||
2335                 (tim_int_mask0[VXGE_HW_VPATH_INTR_RX] != 0)) {
2336                 writeq((tim_int_mask0[VXGE_HW_VPATH_INTR_TX] |
2337                         tim_int_mask0[VXGE_HW_VPATH_INTR_RX] | val64),
2338                         &hldev->common_reg->tim_int_mask0);
2339         }
2340
2341         val64 = readl(&hldev->common_reg->tim_int_mask1);
2342
2343         if ((tim_int_mask1[VXGE_HW_VPATH_INTR_TX] != 0) ||
2344                 (tim_int_mask1[VXGE_HW_VPATH_INTR_RX] != 0)) {
2345                 __vxge_hw_pio_mem_write32_upper(
2346                         (tim_int_mask1[VXGE_HW_VPATH_INTR_TX] |
2347                         tim_int_mask1[VXGE_HW_VPATH_INTR_RX] | val64),
2348                         &hldev->common_reg->tim_int_mask1);
2349         }
2350 }
2351
2352 /**
2353  * vxge_hw_vpath_inta_unmask_tx_rx - Unmask Tx and Rx interrupts.
2354  * @vp: Virtual Path handle.
2355  *
2356  * Unmask Tx and Rx vpath interrupts.
2357  *
2358  * See also: vxge_hw_vpath_inta_mask_tx_rx()
2359  */
2360 void vxge_hw_vpath_inta_unmask_tx_rx(struct __vxge_hw_vpath_handle *vp)
2361 {
2362         u64     tim_int_mask0[4] = {[0 ...3] = 0};
2363         u32     tim_int_mask1[4] = {[0 ...3] = 0};
2364         u64     val64;
2365         struct __vxge_hw_device *hldev = vp->vpath->hldev;
2366
2367         VXGE_HW_DEVICE_TIM_INT_MASK_SET(tim_int_mask0,
2368                 tim_int_mask1, vp->vpath->vp_id);
2369
2370         val64 = readq(&hldev->common_reg->tim_int_mask0);
2371
2372         if ((tim_int_mask0[VXGE_HW_VPATH_INTR_TX] != 0) ||
2373            (tim_int_mask0[VXGE_HW_VPATH_INTR_RX] != 0)) {
2374                 writeq((~(tim_int_mask0[VXGE_HW_VPATH_INTR_TX] |
2375                         tim_int_mask0[VXGE_HW_VPATH_INTR_RX])) & val64,
2376                         &hldev->common_reg->tim_int_mask0);
2377         }
2378
2379         if ((tim_int_mask1[VXGE_HW_VPATH_INTR_TX] != 0) ||
2380            (tim_int_mask1[VXGE_HW_VPATH_INTR_RX] != 0)) {
2381                 __vxge_hw_pio_mem_write32_upper(
2382                         (~(tim_int_mask1[VXGE_HW_VPATH_INTR_TX] |
2383                           tim_int_mask1[VXGE_HW_VPATH_INTR_RX])) & val64,
2384                         &hldev->common_reg->tim_int_mask1);
2385         }
2386 }
2387
2388 /**
2389  * vxge_hw_vpath_poll_rx - Poll Rx Virtual Path for completed
2390  * descriptors and process the same.
2391  * @ring: Handle to the ring object used for receive
2392  *
2393  * The function polls the Rx for the completed  descriptors and calls
2394  * the driver via supplied completion   callback.
2395  *
2396  * Returns: VXGE_HW_OK, if the polling is completed successful.
2397  * VXGE_HW_COMPLETIONS_REMAIN: There are still more completed
2398  * descriptors available which are yet to be processed.
2399  *
2400  * See also: vxge_hw_vpath_poll_rx()
2401  */
2402 enum vxge_hw_status vxge_hw_vpath_poll_rx(struct __vxge_hw_ring *ring)
2403 {
2404         u8 t_code;
2405         enum vxge_hw_status status = VXGE_HW_OK;
2406         void *first_rxdh;
2407         u64 val64 = 0;
2408         int new_count = 0;
2409
2410         ring->cmpl_cnt = 0;
2411
2412         status = vxge_hw_ring_rxd_next_completed(ring, &first_rxdh, &t_code);
2413         if (status == VXGE_HW_OK)
2414                 ring->callback(ring, first_rxdh,
2415                         t_code, ring->channel.userdata);
2416
2417         if (ring->cmpl_cnt != 0) {
2418                 ring->doorbell_cnt += ring->cmpl_cnt;
2419                 if (ring->doorbell_cnt >= ring->rxds_limit) {
2420                         /*
2421                          * Each RxD is of 4 qwords, update the number of
2422                          * qwords replenished
2423                          */
2424                         new_count = (ring->doorbell_cnt * 4);
2425
2426                         /* For each block add 4 more qwords */
2427                         ring->total_db_cnt += ring->doorbell_cnt;
2428                         if (ring->total_db_cnt >= ring->rxds_per_block) {
2429                                 new_count += 4;
2430                                 /* Reset total count */
2431                                 ring->total_db_cnt %= ring->rxds_per_block;
2432                         }
2433                         writeq(VXGE_HW_PRC_RXD_DOORBELL_NEW_QW_CNT(new_count),
2434                                 &ring->vp_reg->prc_rxd_doorbell);
2435                         val64 =
2436                           readl(&ring->common_reg->titan_general_int_status);
2437                         ring->doorbell_cnt = 0;
2438                 }
2439         }
2440
2441         return status;
2442 }
2443
2444 /**
2445  * vxge_hw_vpath_poll_tx - Poll Tx for completed descriptors and process
2446  * the same.
2447  * @fifo: Handle to the fifo object used for non offload send
2448  *
2449  * The function polls the Tx for the completed descriptors and calls
2450  * the driver via supplied completion callback.
2451  *
2452  * Returns: VXGE_HW_OK, if the polling is completed successful.
2453  * VXGE_HW_COMPLETIONS_REMAIN: There are still more completed
2454  * descriptors available which are yet to be processed.
2455  */
2456 enum vxge_hw_status vxge_hw_vpath_poll_tx(struct __vxge_hw_fifo *fifo,
2457                                         struct sk_buff ***skb_ptr, int nr_skb,
2458                                         int *more)
2459 {
2460         enum vxge_hw_fifo_tcode t_code;
2461         void *first_txdlh;
2462         enum vxge_hw_status status = VXGE_HW_OK;
2463         struct __vxge_hw_channel *channel;
2464
2465         channel = &fifo->channel;
2466
2467         status = vxge_hw_fifo_txdl_next_completed(fifo,
2468                                 &first_txdlh, &t_code);
2469         if (status == VXGE_HW_OK)
2470                 if (fifo->callback(fifo, first_txdlh, t_code,
2471                         channel->userdata, skb_ptr, nr_skb, more) != VXGE_HW_OK)
2472                         status = VXGE_HW_COMPLETIONS_REMAIN;
2473
2474         return status;
2475 }