}
else {
- struct ether_hdr *eth = rte_pktmbuf_mtod(mbuf, struct ether_hdr *);
- struct ipv4_hdr *ip = (struct ipv4_hdr*)(eth + 1);
- struct tcp_hdr *tcp = (struct tcp_hdr*)(ip + 1);
+ prox_rte_ether_hdr *eth = rte_pktmbuf_mtod(mbuf, prox_rte_ether_hdr *);
+ prox_rte_ipv4_hdr *ip = (prox_rte_ipv4_hdr*)(eth + 1);
+ prox_rte_tcp_hdr *tcp = (prox_rte_tcp_hdr*)(ip + 1);
task->out_saved = 0;
task->cancelled = 1;
const uint64_t hz = rte_get_tsc_hz();
- ret->tt_cfg[PEER_CLIENT] = token_time_cfg_create(up, hz, ETHER_MAX_LEN + 20);
- ret->tt_cfg[PEER_SERVER] = token_time_cfg_create(dn, hz, ETHER_MAX_LEN + 20);
+ ret->tt_cfg[PEER_CLIENT] = token_time_cfg_create(up, hz, PROX_RTE_ETHER_MAX_LEN + 20);
+ ret->tt_cfg[PEER_SERVER] = token_time_cfg_create(dn, hz, PROX_RTE_ETHER_MAX_LEN + 20);
if (!strcmp(proto, "tcp")) {
ret->proto = IPPROTO_TCP;
struct token_time_cfg tt_cfg = {
.bpp = targ->rate_bps,
.period = rte_get_tsc_hz(),
- .bytes_max = n_descriptors * (ETHER_MIN_LEN + 20),
+ .bytes_max = n_descriptors * (PROX_RTE_ETHER_MIN_LEN + 20),
};
token_time_init(&task->token_time, &tt_cfg);
task->heap = heap_create(targ->n_concur_conn, socket);
task->seed = rte_rdtsc();
- /* task->token_time.bytes_max = MAX_PKT_BURST * (ETHER_MAX_LEN + 20); */
+ /* task->token_time.bytes_max = MAX_PKT_BURST * (PROX_RTE_ETHER_MAX_LEN + 20); */
/* To avoid overflowing the tx descriptors, the token bucket
size needs to be limited. The descriptors are filled most
struct token_time_cfg tt_cfg = {
.bpp = targ->rate_bps,
.period = rte_get_tsc_hz(),
- .bytes_max = prox_port_cfg[targ->tx_port_queue[0].port].n_txd * (ETHER_MIN_LEN + 20),
+ .bytes_max = prox_port_cfg[targ->tx_port_queue[0].port].n_txd * (PROX_RTE_ETHER_MIN_LEN + 20),
};
token_time_init(&task->token_time, &tt_cfg);