static inline int handle_ndp(struct task_base *tbase, uint16_t nb_rx, struct rte_mbuf ***mbufs_ptr)
{
struct rte_mbuf **mbufs = *mbufs_ptr;
+ prox_rte_ipv6_hdr *ipv6_hdr;
int i;
prox_rte_ether_hdr *hdr[MAX_PKT_BURST];
int skip = 0;
+ uint16_t vlan = 0;
for (i = 0; i < nb_rx; i++) {
PREFETCH0(mbufs[i]);
PREFETCH0(hdr[i]);
}
for (i = 0; i < nb_rx; i++) {
- prox_rte_ipv6_hdr *ipv6_hdr = (prox_rte_ipv6_hdr *)(hdr[i] + 1);
- if (unlikely((hdr[i]->ether_type == ETYPE_IPv6) && (ipv6_hdr->proto == ICMPv6))) {
+ ipv6_hdr = prox_get_ipv6_hdr(hdr[i], rte_pktmbuf_pkt_len(mbufs[i]), &vlan);
+ if (unlikely((ipv6_hdr) && (ipv6_hdr->proto == ICMPv6))) {
dump_l3(tbase, mbufs[i]);
tx_ring(tbase, tbase->l3.ctrl_plane_ring, NDP_PKT_FROM_NET_TO_MASTER, mbufs[i]);
skip++;
}
}
- if (nb_rx == 0)
+ if (unlikely(nb_rx == 0)) {
+ TASK_STATS_ADD_IDLE(&tbase->aux->stats, rte_rdtsc() - cur_tsc);
return 0;
+ }
if (l3_ndp == PROX_L3)
skip = handle_l3(tbase, nb_rx, mbufs_ptr);
if (skip)
TASK_STATS_ADD_RX_NON_DP(&tbase->aux->stats, skip);
- if (likely(nb_rx > 0)) {
- TASK_STATS_ADD_RX(&tbase->aux->stats, nb_rx);
- return nb_rx - skip;
- }
- TASK_STATS_ADD_IDLE(&tbase->aux->stats, rte_rdtsc() - cur_tsc);
- return 0;
+
+ TASK_STATS_ADD_RX(&tbase->aux->stats, nb_rx);
+ return nb_rx - skip;
}
uint16_t rx_pkt_hw(struct task_base *tbase, struct rte_mbuf ***mbufs)