#include "defaults.h"
#include "prox_globals.h"
#include "stats_task.h"
+#include "packet_utils.h"
-// runtime_flags 8 bits only
+// runtime_flags 16 bits only
#define TASK_MPLS_TAGGING 0x0001
#define TASK_ROUTING 0x0002
#define TASK_CLASSIFY 0x0004
#define TASK_MARK 0x0020
#define TASK_FP_HANDLE_ARP 0x0040
#define TASK_TX_CRC 0x0080
+#define TASK_L3 0x0100
// flag_features 64 bits
#define TASK_FEATURE_ROUTING 0x0001
#define TASK_FEATURE_NEVER_DISCARDS 0x0008
#define TASK_FEATURE_NO_RX 0x0010
#define TASK_FEATURE_TXQ_FLAGS_NOOFFLOADS 0x0020
-#define TASK_FEATURE_TXQ_FLAGS_NOMULTSEGS 0x0040
+#define TASK_FEATURE_TXQ_FLAGS_MULTSEGS 0x0040
#define TASK_FEATURE_ZERO_RX 0x0080
#define TASK_FEATURE_TXQ_FLAGS_REFCOUNT 0x0100
#define TASK_FEATURE_TSC_RX 0x0200
#define TASK_FEATURE_LUT_QINQ_HASH 0x4000
#define TASK_FEATURE_RX_ALL 0x8000
#define TASK_MULTIPLE_MAC 0x10000
+#define TASK_FEATURE_TXQ_FLAGS_MULTIPLE_MEMPOOL 0x20000
#define FLAG_TX_FLUSH 0x01
#define FLAG_NEVER_FLUSH 0x02
uint32_t n_trace;
uint32_t cur_trace;
void *pkt_mbuf_addr[MAX_RING_BURST]; /* To track reordering */
- uint8_t pkt_cpy[MAX_RING_BURST][128];
+ uint8_t pkt_cpy[MAX_RING_BURST][DUMP_PKT_LEN];
uint16_t pkt_cpy_len[MAX_RING_BURST];
};
#define MAX_RX_PKT_ALL 16384
+#define RX_BUCKET_SIZE (2 * MAX_RING_BURST + 1) /* Limit RX bucket size */
+#define TX_BUCKET_SIZE (MAX_RING_BURST +1)
+
#define MAX_STACKED_RX_FUCTIONS 16
typedef uint16_t (*rx_pkt_func) (struct task_base *tbase, struct rte_mbuf ***mbufs);
struct task_base_aux {
/* Not used when PROX_STATS is not defined */
struct task_rt_stats stats;
- struct task_rt_dump task_rt_dump;
/* Used if TASK_TSC_RX is enabled*/
struct {
struct rte_mbuf **all_mbufs;
- int rx_prev_count;
- int rx_prev_idx;
+ uint16_t rx_prev_count;
+ uint16_t rx_prev_idx;
uint16_t (*rx_pkt_prev[MAX_STACKED_RX_FUCTIONS])(struct task_base *tbase, struct rte_mbuf ***mbufs);
- uint32_t rx_bucket[MAX_RING_BURST + 1];
- uint32_t tx_bucket[MAX_RING_BURST + 1];
+ uint32_t rx_bucket[RX_BUCKET_SIZE];
+ uint32_t tx_bucket[TX_BUCKET_SIZE];
+ int (*tx_pkt_l2)(struct task_base *tbase, struct rte_mbuf **mbufs, const uint16_t n_pkts, uint8_t *out);
int (*tx_pkt_orig)(struct task_base *tbase, struct rte_mbuf **mbufs, const uint16_t n_pkts, uint8_t *out);
int (*tx_pkt_hw)(struct task_base *tbase, struct rte_mbuf **mbufs, const uint16_t n_pkts, uint8_t *out);
uint16_t (*tx_pkt_try)(struct task_base *tbase, struct rte_mbuf **mbufs, const uint16_t n_pkts);
void (*start)(struct task_base *tbase);
void (*stop_last)(struct task_base *tbase);
void (*start_first)(struct task_base *tbase);
+ struct task_rt_dump task_rt_dump;
};
/* The task_base is accessed for _all_ task types. In case
- no debugging is needed, it has been optimized to fit
+ no debugging or l3 is needed, it has been optimized to fit
into a single cache line to minimize cache pollution */
struct task_base {
int (*handle_bulk)(struct task_base *tbase, struct rte_mbuf **mbufs, const uint16_t n_pkts);
struct tx_params_sw tx_params_sw;
struct tx_params_hw_sw tx_params_hw_sw;
};
+ struct l3_base l3;
+ uint32_t local_ipv4;
} __attribute__((packed)) __rte_cache_aligned;
static void task_base_add_rx_pkt_function(struct task_base *tbase, rx_pkt_func to_add)
return;
}
- for (int16_t i = tbase->aux->rx_prev_count; i >= 0; --i) {
- tbase->aux->rx_pkt_prev[i + 1] = tbase->aux->rx_pkt_prev[i];
+ for (int16_t i = tbase->aux->rx_prev_count; i > 0; --i) {
+ tbase->aux->rx_pkt_prev[i] = tbase->aux->rx_pkt_prev[i - 1];
}
tbase->aux->rx_pkt_prev[0] = tbase->rx_pkt;
tbase->rx_pkt = to_add;
int cur = 0;
int found = 0;
- if (tbase->aux->rx_prev_count == 1) {
+ if (unlikely(tbase->aux->rx_prev_count == 0)) {
+ return;
+ } else if (tbase->rx_pkt == to_del) {
tbase->rx_pkt = tbase->aux->rx_pkt_prev[0];
+ for (int16_t i = 0; i < tbase->aux->rx_prev_count - 1; ++i) {
+ tbase->aux->rx_pkt_prev[i] = tbase->aux->rx_pkt_prev[i + 1];
+ }
found = 1;
} else {
for (int16_t i = 0; i < tbase->aux->rx_prev_count; ++i) {