#include "random.h"
#include "handle_impair.h"
#include "prefetch.h"
+#include "prox_port_cfg.h"
#if RTE_VERSION < RTE_VERSION_NUM(1,8,0,0)
#define RTE_CACHE_LINE_SIZE CACHE_LINE_SIZE
unsigned queue_head;
unsigned queue_tail;
unsigned queue_mask;
- int tresh;
+ int tresh_no_drop;
+ int tresh_duplicate;
+ int tresh_delay;
unsigned int seed;
struct random state;
uint64_t last_idx;
struct queue *buffer;
uint32_t socket_id;
- int need_update;
+ uint32_t flags;
+ uint8_t src_mac[6];
};
+#define IMPAIR_NEED_UPDATE 1
+#define IMPAIR_SET_MAC 2
+
static int handle_bulk_impair(struct task_base *tbase, struct rte_mbuf **mbufs, uint16_t n_pkts);
static int handle_bulk_impair_random(struct task_base *tbase, struct rte_mbuf **mbufs, uint16_t n_pkts);
static int handle_bulk_random_drop(struct task_base *tbase, struct rte_mbuf **mbufs, uint16_t n_pkts);
-void task_impair_set_proba(struct task_base *tbase, float proba)
+void task_impair_set_proba_no_drop(struct task_base *tbase, float proba_no_drop)
+{
+ struct task_impair *task = (struct task_impair *)tbase;
+ task->tresh_no_drop = ((uint64_t) RAND_MAX) * (uint32_t)(proba_no_drop * 10000) / 1000000;
+}
+
+void task_impair_set_proba_delay(struct task_base *tbase, float proba_delay)
{
struct task_impair *task = (struct task_impair *)tbase;
- task->tresh = ((uint64_t) RAND_MAX) * (uint32_t)(proba * 10000) / 1000000;
+ task->tresh_delay = ((uint64_t) RAND_MAX) * (uint32_t)(proba_delay * 10000) / 1000000;
+ task->flags |= IMPAIR_NEED_UPDATE;
+}
+
+void task_impair_set_proba_duplicate(struct task_base *tbase, float proba_dup)
+{
+ struct task_impair *task = (struct task_impair *)tbase;
+ task->tresh_duplicate = ((uint64_t) RAND_MAX) * (uint32_t)(proba_dup * 10000) / 1000000;
}
void task_impair_set_delay_us(struct task_base *tbase, uint32_t delay_us, uint32_t random_delay_us)
{
struct task_impair *task = (struct task_impair *)tbase;
- task->need_update = 1;
+ task->flags |= IMPAIR_NEED_UPDATE;
task->random_delay_us = random_delay_us;
task->delay_us = delay_us;
}
struct task_impair *task = (struct task_impair *)tbase;
uint32_t queue_len = 0;
size_t mem_size;
- if (!task->need_update)
+ if ((task->flags & IMPAIR_NEED_UPDATE) == 0)
return;
- task->need_update = 0;
+ task->flags &= ~IMPAIR_NEED_UPDATE;
uint64_t now = rte_rdtsc();
uint8_t out[MAX_PKT_BURST] = {0};
uint64_t now_idx = (now >> DELAY_ACCURACY) & DELAY_MAX_MASK;
uint16_t idx = 0;
while (idx < MAX_PKT_BURST && task->queue_tail != task->queue_head) {
if (task->queue[task->queue_tail].tsc <= now) {
- out[idx] = rand_r(&task->seed) <= task->tresh? 0 : OUT_DISCARD;
+ out[idx] = rand_r(&task->seed) <= task->tresh_no_drop? 0 : OUT_DISCARD;
new_mbufs[idx++] = task->queue[task->queue_tail].mbuf;
task->queue_tail = (task->queue_tail + 1) & task->queue_mask;
}
while ((pkt_idx < MAX_PKT_BURST) && (task->last_idx != ((now_idx - 1) & DELAY_MAX_MASK))) {
struct queue *queue = &task->buffer[task->last_idx];
while ((pkt_idx < MAX_PKT_BURST) && (queue->queue_tail != queue->queue_head)) {
- out[pkt_idx] = rand_r(&task->seed) <= task->tresh? 0 : OUT_DISCARD;
+ out[pkt_idx] = rand_r(&task->seed) <= task->tresh_no_drop? 0 : OUT_DISCARD;
new_mbufs[pkt_idx++] = queue->queue_elem[queue->queue_tail].mbuf;
queue->queue_tail = (queue->queue_tail + 1) & task->queue_mask;
}
}
} else if (task->random_delay_us) {
size_t size = (DELAY_MAX_MASK + 1) * sizeof(struct queue);
- plog_info("Allocating %zd bytes\n", size);
+ plog_info("\t\tAllocating %zd bytes\n", size);
task->buffer = prox_zmalloc(size, task->socket_id);
PROX_PANIC(task->buffer == NULL, "Not enough memory to allocate buffer\n");
- plog_info("Allocating %d x %zd bytes\n", DELAY_MAX_MASK + 1, mem_size);
+ plog_info("\t\tAllocating %d x %zd bytes\n", DELAY_MAX_MASK + 1, mem_size);
for (int i = 0; i < DELAY_MAX_MASK + 1; i++) {
task->buffer[i].queue_elem = prox_zmalloc(mem_size, task->socket_id);
{
struct task_impair *task = (struct task_impair *)tbase;
uint8_t out[MAX_PKT_BURST];
+ prox_rte_ether_hdr * hdr[MAX_PKT_BURST];
+ int ret = 0;
+ for (uint16_t i = 0; i < n_pkts; ++i) {
+ PREFETCH0(mbufs[i]);
+ }
for (uint16_t i = 0; i < n_pkts; ++i) {
- out[i] = rand_r(&task->seed) <= task->tresh? 0 : OUT_DISCARD;
+ hdr[i] = rte_pktmbuf_mtod(mbufs[i], prox_rte_ether_hdr *);
+ PREFETCH0(hdr[i]);
}
- return task->base.tx_pkt(&task->base, mbufs, n_pkts, out);
+ if (task->flags & IMPAIR_SET_MAC) {
+ for (uint16_t i = 0; i < n_pkts; ++i) {
+ prox_rte_ether_addr_copy((prox_rte_ether_addr *)&task->src_mac[0], &hdr[i]->s_addr);
+ out[i] = rand_r(&task->seed) <= task->tresh_no_drop? 0 : OUT_DISCARD;
+ }
+ } else {
+ for (uint16_t i = 0; i < n_pkts; ++i) {
+ out[i] = rand_r(&task->seed) <= task->tresh_no_drop? 0 : OUT_DISCARD;
+ }
+ }
+ ret = task->base.tx_pkt(&task->base, mbufs, n_pkts, out);
task_impair_update(tbase);
+ return ret;
}
static int handle_bulk_impair(struct task_base *tbase, struct rte_mbuf **mbufs, uint16_t n_pkts)
uint16_t enqueue_failed;
uint16_t i;
int ret = 0;
+ prox_rte_ether_hdr * hdr[MAX_PKT_BURST];
+ for (uint16_t i = 0; i < n_pkts; ++i) {
+ PREFETCH0(mbufs[i]);
+ }
+ for (uint16_t i = 0; i < n_pkts; ++i) {
+ hdr[i] = rte_pktmbuf_mtod(mbufs[i], prox_rte_ether_hdr *);
+ PREFETCH0(hdr[i]);
+ }
int nb_empty_slots = (task->queue_tail - task->queue_head + task->queue_mask) & task->queue_mask;
if (likely(nb_empty_slots >= n_pkts)) {
/* We know n_pkts fits, no need to check for every packet */
for (i = 0; i < n_pkts; ++i) {
+ if (task->flags & IMPAIR_SET_MAC)
+ prox_rte_ether_addr_copy((prox_rte_ether_addr *)&task->src_mac[0], &hdr[i]->s_addr);
task->queue[task->queue_head].tsc = now + task->delay_time;
task->queue[task->queue_head].mbuf = mbufs[i];
task->queue_head = (task->queue_head + 1) & task->queue_mask;
} else {
for (i = 0; i < n_pkts; ++i) {
if (((task->queue_head + 1) & task->queue_mask) != task->queue_tail) {
+ if (task->flags & IMPAIR_SET_MAC)
+ prox_rte_ether_addr_copy((prox_rte_ether_addr *)&task->src_mac[0], &hdr[i]->s_addr);
task->queue[task->queue_head].tsc = now + task->delay_time;
task->queue[task->queue_head].mbuf = mbufs[i];
task->queue_head = (task->queue_head + 1) & task->queue_mask;
struct rte_mbuf *new_mbufs[MAX_PKT_BURST];
uint16_t idx = 0;
- if (task->tresh != RAND_MAX) {
+ if (task->tresh_no_drop != RAND_MAX) {
while (idx < MAX_PKT_BURST && task->queue_tail != task->queue_head) {
if (task->queue[task->queue_tail].tsc <= now) {
- out[idx] = rand_r(&task->seed) <= task->tresh? 0 : OUT_DISCARD;
+ out[idx] = rand_r(&task->seed) <= task->tresh_no_drop? 0 : OUT_DISCARD;
new_mbufs[idx] = task->queue[task->queue_tail].mbuf;
PREFETCH0(new_mbufs[idx]);
PREFETCH0(&new_mbufs[idx]->cacheline1);
int ret = 0;
uint64_t packet_time, idx;
uint64_t now_idx = (now >> DELAY_ACCURACY) & DELAY_MAX_MASK;
+ prox_rte_ether_hdr * hdr[MAX_PKT_BURST];
+ for (uint16_t i = 0; i < n_pkts; ++i) {
+ PREFETCH0(mbufs[i]);
+ }
+ for (uint16_t i = 0; i < n_pkts; ++i) {
+ hdr[i] = rte_pktmbuf_mtod(mbufs[i], prox_rte_ether_hdr *);
+ PREFETCH0(hdr[i]);
+ }
for (i = 0; i < n_pkts; ++i) {
- packet_time = now + random_delay(&task->state, task->delay_time, task->delay_time_mask);
+ if (rand_r(&task->seed) <= task->tresh_delay)
+ packet_time = now + random_delay(&task->state, task->delay_time, task->delay_time_mask);
+ else
+ packet_time = now;
idx = (packet_time >> DELAY_ACCURACY) & DELAY_MAX_MASK;
while (idx != ((now_idx - 1) & DELAY_MAX_MASK)) {
struct queue *queue = &task->buffer[idx];
if (((queue->queue_head + 1) & task->queue_mask) != queue->queue_tail) {
+ if (task->flags & IMPAIR_SET_MAC)
+ prox_rte_ether_addr_copy((prox_rte_ether_addr *)&task->src_mac[0], &hdr[i]->s_addr);
queue->queue_elem[queue->queue_head].mbuf = mbufs[i];
queue->queue_head = (queue->queue_head + 1) & task->queue_mask;
break;
ret+= task->base.tx_pkt(&task->base, mbufs + i, 1, out);
plog_warn("Unexpectdly dropping packets\n");
}
+#if RTE_VERSION >= RTE_VERSION_NUM(19,11,0,0)
+ if (rand_r(&task->seed) <= task->tresh_duplicate) {
+ mbufs[i] = rte_pktmbuf_copy(mbufs[i], mbufs[i]->pool, 0, UINT32_MAX);
+ if (mbufs[i] == NULL) {
+ plog_err("Failed to duplicate mbuf\n");
+ } else
+ i = i - 1;
+ }
+#endif
}
struct rte_mbuf *new_mbufs[MAX_PKT_BURST];
while ((pkt_idx < MAX_PKT_BURST) && (task->last_idx != ((now_idx - 1) & DELAY_MAX_MASK))) {
struct queue *queue = &task->buffer[task->last_idx];
while ((pkt_idx < MAX_PKT_BURST) && (queue->queue_tail != queue->queue_head)) {
- out[pkt_idx] = rand_r(&task->seed) <= task->tresh? 0 : OUT_DISCARD;
+ out[pkt_idx] = rand_r(&task->seed) <= task->tresh_no_drop? 0 : OUT_DISCARD;
new_mbufs[pkt_idx] = queue->queue_elem[queue->queue_tail].mbuf;
PREFETCH0(new_mbufs[pkt_idx]);
PREFETCH0(&new_mbufs[pkt_idx]->cacheline1);
uint64_t delay_us = 0;
task->seed = rte_rdtsc();
- if (targ->probability == 0)
- targ->probability = 1000000;
- task->tresh = ((uint64_t) RAND_MAX) * targ->probability / 1000000;
+ task->tresh_no_drop = ((uint64_t) RAND_MAX) * targ->probability_no_drop / 1000000;
+ task->tresh_delay = ((uint64_t) RAND_MAX) * targ->probability_delay / 1000000;
+ task->tresh_duplicate = ((uint64_t) RAND_MAX) * targ->probability_duplicate / 1000000;
if ((targ->delay_us == 0) && (targ->random_delay_us == 0)) {
tbase->handle_bulk = handle_bulk_random_drop;
task->queue_tail = 0;
} else if (targ->random_delay_us) {
size_t size = (DELAY_MAX_MASK + 1) * sizeof(struct queue);
- plog_info("Allocating %zd bytes\n", size);
+ plog_info("\t\tAllocating %zd bytes\n", size);
task->buffer = prox_zmalloc(size, socket_id);
PROX_PANIC(task->buffer == NULL, "Not enough memory to allocate buffer\n");
- plog_info("Allocating %d x %zd bytes\n", DELAY_MAX_MASK + 1, mem_size);
+ plog_info("\t\tAllocating %d x %zd bytes\n", DELAY_MAX_MASK + 1, mem_size);
for (int i = 0; i < DELAY_MAX_MASK + 1; i++) {
task->buffer[i].queue_elem = prox_zmalloc(mem_size, socket_id);
}
}
random_init_seed(&task->state);
+ if (targ->nb_txports) {
+ memcpy(&task->src_mac[0], &prox_port_cfg[tbase->tx_params_hw.tx_port_queue[0].port].eth_addr, sizeof(prox_rte_ether_addr));
+ task->flags = IMPAIR_SET_MAC;
+ } else {
+ task->flags = 0;
+ }
}
static struct task_init tinit = {