#include "lconf.h"
#include "prox_cfg.h"
#include "prox_shared.h"
+#include "prox_compat.h"
struct task_qinq_decap4 {
struct task_base base;
struct prox_port_cfg *port = find_reachable_port(targ);
if (port) {
- task->offload_crc = port->capabilities.tx_offload_cksum;
+ task->offload_crc = port->requested_tx_offload & (RTE_ETH_TX_OFFLOAD_IPV4_CKSUM | RTE_ETH_TX_OFFLOAD_UDP_CKSUM);
}
// By default, calling this function 1K times per second => 64K ARP per second max
svlan = rte_be_to_cpu_16(svlan & 0xFF0F);
cvlan = rte_be_to_cpu_16(cvlan & 0xFF0F);
+#if RTE_VERSION >= RTE_VERSION_NUM(20,11,0,0)
+ plogx_err("Can't convert key %016lx qinq %d|%d (%x|%x) to gre_id, rss=%x flags=%lx, status_err_len=%x, L2Tag=%d type=%d\n",
+ key, svlan, cvlan, svlan, cvlan, mbuf->hash.rss, mbuf->ol_flags, mbuf->dynfield1[0], mbuf->vlan_tci_outer, mbuf->packet_type);
+#else
#if RTE_VERSION >= RTE_VERSION_NUM(2,1,0,0)
plogx_err("Can't convert key %016lx qinq %d|%d (%x|%x) to gre_id, rss=%x flags=%lx, status_err_len=%lx, L2Tag=%d type=%d\n",
key, svlan, cvlan, svlan, cvlan, mbuf->hash.rss, mbuf->ol_flags, mbuf->udata64, mbuf->vlan_tci_outer, mbuf->packet_type);
key, svlan, cvlan, svlan, cvlan, mbuf->ol_flags, mbuf->reserved);
#endif
#endif
+#endif
#else
plogx_err("Can't convert ip %x to gre_id\n", rte_bswap32(packet->ipv4_hdr.src_addr));
#endif
void* entry_in_hash;
int ret, key_found = 0;
- ret = rte_table_hash_key8_ext_dosig_ops.
- f_add(hash, key, data, &key_found, &entry_in_hash);
+ ret = prox_rte_table_key8_add(hash, key, data, &key_found, &entry_in_hash);
if (unlikely(ret)) {
plogx_err("Failed to add key: ip %x, gre %x\n", key->ip, key->gre_id);
return 1;
for (uint16_t i = 0; i < n_msgs; ++i) {
msgs[i]->data.tsc = rte_rdtsc() + cpe_timeout;
- ret = rte_table_hash_key8_ext_dosig_ops.
- f_add(cpe_table, &msgs[i]->key, &msgs[i]->data, &key_found, &entry_in_hash);
+ ret = prox_rte_table_key8_add(cpe_table, &msgs[i]->key, &msgs[i]->data, &key_found, &entry_in_hash);
if (unlikely(ret)) {
plogx_err("Failed to add key %x, gre %x\n", msgs[i]->key.ip, msgs[i]->key.gre_id);
}
uint64_t pkts_mask = RTE_LEN2MASK(n_pkts, uint64_t);
uint64_t lookup_hit_mask = 0;
struct qinq_gre_data* entries[64];
- rte_table_hash_key8_ext_dosig_ops.f_lookup(task->qinq_gre_table, task->fake_packets, pkts_mask, &lookup_hit_mask, (void**)entries);
+ prox_rte_table_key8_lookup(task->qinq_gre_table, task->fake_packets, pkts_mask, &lookup_hit_mask, (void**)entries);
TASK_STATS_ADD_RX(&task->base.aux->stats, n_pkts);
for (uint16_t j = 0; j < n_pkts; ++j) {
void* entry_in_hash;
int ret, key_found = 0;
- ret = rte_table_hash_key8_ext_dosig_ops.
- f_add(task->cpe_table, &key, &data, &key_found, &entry_in_hash);
+ ret = prox_rte_table_key8_add(task->cpe_table, &key, &data, &key_found, &entry_in_hash);
if (unlikely(ret)) {
plogx_err("Failed to add key %x, gre %x\n", key.ip, key.gre_id);
}
extract_key_bulk(mbufs, n_pkts, task);
- rte_table_hash_key8_ext_dosig_ops.f_lookup(task->qinq_gre_table, task->fake_packets, pkts_mask, &lookup_hit_mask, (void**)entries);
+ prox_rte_table_key8_lookup(task->qinq_gre_table, task->fake_packets, pkts_mask, &lookup_hit_mask, (void**)entries);
if (likely(lookup_hit_mask == pkts_mask)) {
for (uint16_t j = 0; j < n_pkts; ++j) {
static inline void gre_encap(struct task_qinq_decap4 *task, uint32_t src_ipv4, struct rte_mbuf *mbuf, uint32_t gre_id)
{
#ifdef USE_QINQ
- struct ipv4_hdr *pip = (struct ipv4_hdr *)(1 + rte_pktmbuf_mtod(mbuf, struct qinq_hdr *));
+ prox_rte_ipv4_hdr *pip = (prox_rte_ipv4_hdr *)(1 + rte_pktmbuf_mtod(mbuf, struct qinq_hdr *));
#else
- struct ipv4_hdr *pip = (struct ipv4_hdr *)(1 + rte_pktmbuf_mtod(mbuf, struct ether_hdr *));
+ prox_rte_ipv4_hdr *pip = (prox_rte_ipv4_hdr *)(1 + rte_pktmbuf_mtod(mbuf, prox_rte_ether_hdr *));
#endif
uint16_t ip_len = rte_be_to_cpu_16(pip->total_length);
uint16_t padlen = rte_pktmbuf_pkt_len(mbuf) - 20 - ip_len - sizeof(struct qinq_hdr);
rte_pktmbuf_trim(mbuf, padlen);
}
- PROX_PANIC(rte_pktmbuf_data_len(mbuf) - padlen + 20 > ETHER_MAX_LEN,
+ PROX_PANIC(rte_pktmbuf_data_len(mbuf) - padlen + 20 > PROX_RTE_ETHER_MAX_LEN,
"Would need to fragment packet new size = %u - not implemented\n",
rte_pktmbuf_data_len(mbuf) - padlen + 20);
#ifdef USE_QINQ
/* prepend only 20 bytes instead of 28, 8 bytes are present from the QinQ */
- struct ether_hdr *peth = (struct ether_hdr *)rte_pktmbuf_prepend(mbuf, 20);
+ prox_rte_ether_hdr *peth = (prox_rte_ether_hdr *)rte_pktmbuf_prepend(mbuf, 20);
#else
- struct ether_hdr *peth = (struct ether_hdr *)rte_pktmbuf_prepend(mbuf, 28);
+ prox_rte_ether_hdr *peth = (prox_rte_ether_hdr *)rte_pktmbuf_prepend(mbuf, 28);
#endif
PROX_ASSERT(peth);
if (task->runtime_flags & TASK_TX_CRC) {
/* calculate IP CRC here to avoid problems with -O3 flag with gcc */
#ifdef MPLS_ROUTING
- prox_ip_cksum(mbuf, pip, sizeof(struct ether_hdr) + sizeof(struct mpls_hdr), sizeof(struct ipv4_hdr), task->offload_crc);
+ prox_ip_cksum(mbuf, pip, sizeof(prox_rte_ether_hdr) + sizeof(struct mpls_hdr), sizeof(prox_rte_ipv4_hdr), task->offload_crc);
#else
- prox_ip_cksum(mbuf, pip, sizeof(struct ether_hdr), sizeof(struct ipv4_hdr), task->offload_crc);
+ prox_ip_cksum(mbuf, pip, sizeof(prox_rte_ether_hdr), sizeof(prox_rte_ipv4_hdr), task->offload_crc);
#endif
}
/* new IP header */
- struct ipv4_hdr *p_tunnel_ip = (struct ipv4_hdr *)(peth + 1);
- rte_memcpy(p_tunnel_ip, &tunnel_ip_proto, sizeof(struct ipv4_hdr));
- ip_len += sizeof(struct ipv4_hdr) + sizeof(struct gre_hdr);
+ prox_rte_ipv4_hdr *p_tunnel_ip = (prox_rte_ipv4_hdr *)(peth + 1);
+ rte_memcpy(p_tunnel_ip, &tunnel_ip_proto, sizeof(prox_rte_ipv4_hdr));
+ ip_len += sizeof(prox_rte_ipv4_hdr) + sizeof(struct gre_hdr);
p_tunnel_ip->total_length = rte_cpu_to_be_16(ip_len);
p_tunnel_ip->src_addr = src_ipv4;
static inline uint8_t gre_encap_route(uint32_t src_ipv4, struct rte_mbuf *mbuf, uint32_t gre_id, struct task_qinq_decap4 *task)
{
- PROX_PANIC(rte_pktmbuf_data_len(mbuf) + DOWNSTREAM_DELTA > ETHER_MAX_LEN,
+ PROX_PANIC(rte_pktmbuf_data_len(mbuf) + DOWNSTREAM_DELTA > PROX_RTE_ETHER_MAX_LEN,
"Would need to fragment packet new size = %u - not implemented\n",
rte_pktmbuf_data_len(mbuf) + DOWNSTREAM_DELTA);
PROX_ASSERT(packet);
PREFETCH0(packet);
- struct ipv4_hdr *pip = &((struct cpe_pkt_delta *)packet)->pkt.ipv4_hdr;
+ prox_rte_ipv4_hdr *pip = &((struct cpe_pkt_delta *)packet)->pkt.ipv4_hdr;
uint16_t ip_len = rte_be_to_cpu_16(pip->total_length);
/* returns 0 on success, returns -ENOENT of failure (or -EINVAL if first or last parameter is NULL) */
#endif
/* New IP header */
- rte_memcpy(&packet->tunnel_ip_hdr, &tunnel_ip_proto, sizeof(struct ipv4_hdr));
- ip_len += sizeof(struct ipv4_hdr) + sizeof(struct gre_hdr);
+ rte_memcpy(&packet->tunnel_ip_hdr, &tunnel_ip_proto, sizeof(prox_rte_ipv4_hdr));
+ ip_len += sizeof(prox_rte_ipv4_hdr) + sizeof(struct gre_hdr);
packet->tunnel_ip_hdr.total_length = rte_cpu_to_be_16(ip_len);
packet->tunnel_ip_hdr.src_addr = src_ipv4;
packet->tunnel_ip_hdr.dst_addr = task->next_hops[next_hop_index].ip_dst;
if (task->runtime_flags & TASK_TX_CRC) {
#ifdef MPLS_ROUTING
- prox_ip_cksum(mbuf, (void *)&(packet->tunnel_ip_hdr), sizeof(struct ether_hdr) + sizeof(struct mpls_hdr), sizeof(struct ipv4_hdr), task->offload_crc);
+ prox_ip_cksum(mbuf, (void *)&(packet->tunnel_ip_hdr), sizeof(prox_rte_ether_hdr) + sizeof(struct mpls_hdr), sizeof(prox_rte_ipv4_hdr), task->offload_crc);
#else
- prox_ip_cksum(mbuf, (void *)&(packet->tunnel_ip_hdr), sizeof(struct ether_hdr), sizeof(struct ipv4_hdr), task->offload_crc);
+ prox_ip_cksum(mbuf, (void *)&(packet->tunnel_ip_hdr), sizeof(prox_rte_ether_hdr), sizeof(prox_rte_ipv4_hdr), task->offload_crc);
#endif
}