Added support for reporting packet (mis)order. 27/70527/3
authorXavier Simonart <xavier.simonart@intel.com>
Thu, 2 Jul 2020 08:02:40 +0000 (10:02 +0200)
committerXavier Simonart <xavier.simonart@intel.com>
Mon, 21 Sep 2020 07:56:18 +0000 (09:56 +0200)
The "Latency" screen has been updated with 3 columns:
- mis-ordered
  Count the number of mis-ordered packets.
- extent:
  Gives an indication of how mis-ordered the packets are.
  Receiving packet "x - 5" after receiving packet "x" will
  cause an extent of 5.
- duplicate:
  Count number of duplicate packets.

Following commands have been added for the impair mode:
- proba no drop: replaces the former "probability" command.
  Percentage of forwarded packets. So 99.5 means 0.5% of packet drop.
- proba delay
  Percentage of delayed packets for the impair mode.
- proba duplicate
  Percentage of duplicate packets.

Similar parameters are supported within the config files:
- proba no drop
- proba delay
- proba duplicate

Note: it is recommanded to use the signature when measuring packet
misorder, as otherwise unexpected packet would cause miscounts.

Change-Id: I037f606f264d6e2bd7f123df5ed57ab7df8386d7
Signed-off-by: Xavier Simonart <xavier.simonart@intel.com>
18 files changed:
VNFs/DPPD-PROX/cmd_parser.c
VNFs/DPPD-PROX/commands.c
VNFs/DPPD-PROX/defaults.c
VNFs/DPPD-PROX/display_latency.c
VNFs/DPPD-PROX/eld.h
VNFs/DPPD-PROX/handle_gen.c
VNFs/DPPD-PROX/handle_impair.c
VNFs/DPPD-PROX/handle_impair.h
VNFs/DPPD-PROX/handle_lat.c
VNFs/DPPD-PROX/handle_lat.h
VNFs/DPPD-PROX/handle_master.c
VNFs/DPPD-PROX/main.c
VNFs/DPPD-PROX/packet_utils.c
VNFs/DPPD-PROX/prox_args.c
VNFs/DPPD-PROX/stats_latency.c
VNFs/DPPD-PROX/stats_latency.h
VNFs/DPPD-PROX/task_init.c
VNFs/DPPD-PROX/task_init.h

index 2d3b570..51a71f4 100644 (file)
@@ -398,16 +398,16 @@ static int parse_cmd_count(const char *str, struct input *input)
        return 0;
 }
 
-static int parse_cmd_set_probability(const char *str, struct input *input)
+static int parse_cmd_set_proba_no_drop(const char *str, struct input *input)
 {
        unsigned lcores[RTE_MAX_LCORE], lcore_id, task_id, nb_cores;
-       float probability;
+       float proba_no_drop;
 
        if (parse_cores_task(str, lcores, &task_id, &nb_cores))
                return -1;
        if (!(str = strchr_skip_twice(str, ' ')))
                return -1;
-       if (sscanf(str, "%f", &probability) != 1)
+       if (sscanf(str, "%f", &proba_no_drop) != 1)
                return -1;
 
        if (cores_task_are_valid(lcores, task_id, nb_cores)) {
@@ -417,7 +417,59 @@ static int parse_cmd_set_probability(const char *str, struct input *input)
                                plog_err("Core %u task %u is not impairing packets\n", lcore_id, task_id);
                        } else {
                                struct task_base *tbase = lcore_cfg[lcore_id].tasks_all[task_id];
-                               task_impair_set_proba(tbase, probability);
+                               task_impair_set_proba_no_drop(tbase, proba_no_drop);
+                       }
+               }
+       }
+       return 0;
+}
+
+static int parse_cmd_set_proba_delay(const char *str, struct input *input)
+{
+       unsigned lcores[RTE_MAX_LCORE], lcore_id, task_id, nb_cores;
+       float proba_delay;
+
+       if (parse_cores_task(str, lcores, &task_id, &nb_cores))
+               return -1;
+       if (!(str = strchr_skip_twice(str, ' ')))
+               return -1;
+       if (sscanf(str, "%f", &proba_delay) != 1)
+               return -1;
+
+       if (cores_task_are_valid(lcores, task_id, nb_cores)) {
+               for (unsigned int i = 0; i < nb_cores; i++) {
+                       lcore_id = lcores[i];
+                       if (!task_is_mode(lcore_id, task_id, "impair")) {
+                               plog_err("Core %u task %u is not impairing packets\n", lcore_id, task_id);
+                       } else {
+                               struct task_base *tbase = lcore_cfg[lcore_id].tasks_all[task_id];
+                               task_impair_set_proba_delay(tbase, proba_delay);
+                       }
+               }
+       }
+       return 0;
+}
+
+static int parse_cmd_set_proba_duplicate(const char *str, struct input *input)
+{
+       unsigned lcores[RTE_MAX_LCORE], lcore_id, task_id, nb_cores;
+       float proba_duplicate;
+
+       if (parse_cores_task(str, lcores, &task_id, &nb_cores))
+               return -1;
+       if (!(str = strchr_skip_twice(str, ' ')))
+               return -1;
+       if (sscanf(str, "%f", &proba_duplicate) != 1)
+               return -1;
+
+       if (cores_task_are_valid(lcores, task_id, nb_cores)) {
+               for (unsigned int i = 0; i < nb_cores; i++) {
+                       lcore_id = lcores[i];
+                       if (!task_is_mode(lcore_id, task_id, "impair")) {
+                               plog_err("Core %u task %u is not impairing packets\n", lcore_id, task_id);
+                       } else {
+                               struct task_base *tbase = lcore_cfg[lcore_id].tasks_all[task_id];
+                               task_impair_set_proba_duplicate(tbase, proba_duplicate);
                        }
                }
        }
@@ -2240,7 +2292,12 @@ static struct cmd_str cmd_strings[] = {
        {"cgnat dump private hash", "<core id> <task id>", "Dump cgnat private hash table", parse_cmd_cgnat_private_hash},
        {"delay_us", "<core_id> <task_id> <delay_us>", "Set the delay in usec for the impair mode to <delay_us>", parse_cmd_delay_us},
        {"random delay_us", "<core_id> <task_id> <random delay_us>", "Set the delay in usec for the impair mode to <random delay_us>", parse_cmd_random_delay_us},
-       {"probability", "<core_id> <task_id> <probability>", "Set the percent of forwarded packets for the impair mode", parse_cmd_set_probability},
+       {"probability", "<core_id> <task_id> <probability>", "Old - Use <proba no drop> instead. Set the percent of forwarded packets for the impair mode", parse_cmd_set_proba_no_drop}, // old - backward compatibility
+       {"proba no drop", "<core_id> <task_id> <probability>", "Set the percent of forwarded packets for the impair mode", parse_cmd_set_proba_no_drop},
+       {"proba delay", "<core_id> <task_id> <probability>", "Set the percent of delayed packets for the impair mode", parse_cmd_set_proba_delay},
+#if RTE_VERSION >= RTE_VERSION_NUM(19,11,0,0)
+       {"proba duplicate", "<core_id> <task_id> <probability>", "Set the percent of duplicate packets for the impair mode", parse_cmd_set_proba_duplicate},
+#endif
        {"version", "", "Show version", parse_cmd_version},
        {"join igmp", "<core_id> <task_id> <ip>", "Send igmp membership report for group <ip>", parse_cmd_join_igmp},
        {"leave igmp", "<core_id> <task_id>", "Send igmp leave group", parse_cmd_leave_igmp},
index 32b974c..1b406e4 100644 (file)
@@ -113,7 +113,7 @@ static inline int wait_command_handled(struct lcore_cfg *lconf)
 static inline void start_l3(struct task_args *targ)
 {
        if (!task_is_master(targ)) {
-               if ((targ->nb_txrings != 0) || (targ->nb_txports != 0)) {
+               if ((targ->nb_txports != 0)) {
                        if (targ->flags & (TASK_ARG_L3|TASK_ARG_NDP))
                                task_start_l3(targ->tbase, targ);
                }
index 8dd29da..a2becb0 100644 (file)
@@ -187,6 +187,8 @@ void set_task_defaults(struct prox_cfg* prox_cfg, struct lcore_cfg* lcore_cfg_in
 
                        targ->runtime_flags |= TASK_TX_CRC;
                        targ->accuracy_limit_nsec = 5000;
+                       targ->probability_delay = 1000000;
+                       targ->probability_no_drop = 1000000;
                }
        }
 }
index 04382e4..f43dd69 100644 (file)
@@ -26,6 +26,9 @@ static struct display_column *stddev_col;
 static struct display_column *accuracy_limit_col;
 static struct display_column *used_col;
 static struct display_column *lost_col;
+static struct display_column *mis_ordered_col;
+static struct display_column *extent_col;
+static struct display_column *duplicate_col;
 static struct display_page display_page_latency;
 
 static void display_latency_draw_frame(struct screen_state *screen_state)
@@ -68,12 +71,18 @@ static void display_latency_draw_frame(struct screen_state *screen_state)
        used_col = display_table_add_col(acc);
        display_column_init(used_col, "Used Packets (%)", 16);
        accuracy_limit_col = display_table_add_col(acc);
-       display_column_init(accuracy_limit_col, "limit (us)", 16);
+       display_column_init(accuracy_limit_col, "limit (us)", 12);
 
        display_table_init(other, "Other");
 
        lost_col = display_table_add_col(other);
-       display_column_init(lost_col, "Lost Packets", 16);
+       display_column_init(lost_col, "Lost", 12);
+       mis_ordered_col = display_table_add_col(other);
+       display_column_init(mis_ordered_col, "mis-ordered", 12);
+       extent_col = display_table_add_col(other);
+       display_column_init(extent_col, "extent", 12);
+       duplicate_col = display_table_add_col(other);
+       display_column_init(duplicate_col, "duplicate", 12);
 
        display_page_draw_frame(&display_page_latency, n_latency);
 
@@ -117,8 +126,11 @@ static void display_stats_latency_entry(int row, struct stats_latency *stats_lat
        }
 
        display_column_print(accuracy_limit_col, row, "%s", print_time_unit_usec(dst, &accuracy_limit));
-       display_column_print(lost_col, row, "%16"PRIu64"", stats_latency->lost_packets);
+       display_column_print(lost_col, row, "%12"PRIu64"", stats_latency->lost_packets);
        display_column_print(used_col, row, "%3u.%06u", used / AFTER_POINT, used % AFTER_POINT);
+       display_column_print(mis_ordered_col, row, "%12"PRIu64"", stats_latency->mis_ordered);
+       display_column_print(extent_col, row, "%12"PRIu64"", stats_latency->extent);
+       display_column_print(duplicate_col, row, "%12"PRIu64"", stats_latency->duplicate);
 }
 
 static void display_latency_draw_stats(struct screen_state *screen_state)
index b5de59d..2731beb 100644 (file)
@@ -76,7 +76,10 @@ static uint32_t early_loss_detect_add(struct early_loss_detect *eld, uint32_t pa
        old_queue_id = eld->entries[queue_pos];
        eld->entries[queue_pos] = packet_index >> PACKET_QUEUE_BITS;
 
-       return (eld->entries[queue_pos] - old_queue_id - 1) & QUEUE_ID_MASK;
+       if (eld->entries[queue_pos] != old_queue_id)
+               return (eld->entries[queue_pos] - old_queue_id - 1) & QUEUE_ID_MASK;
+       else
+               return 0;
 }
 
 #endif /* _ELD_H_ */
index adcabd7..81175cb 100644 (file)
@@ -1254,7 +1254,7 @@ static struct rte_mempool *task_gen_create_mempool(struct task_args *targ, uint1
        uint32_t mbuf_size = TX_MBUF_SIZE;
        if (max_frame_size + (unsigned)sizeof(struct rte_mbuf) + RTE_PKTMBUF_HEADROOM > mbuf_size)
                mbuf_size = max_frame_size + (unsigned)sizeof(struct rte_mbuf) + RTE_PKTMBUF_HEADROOM;
-       plog_info("\tCreating mempool with name '%s'\n", name);
+       plog_info("\t\tCreating mempool with name '%s'\n", name);
        ret = rte_mempool_create(name, targ->nb_mbuf - 1, mbuf_size,
                                 targ->nb_cache_mbuf, sizeof(struct rte_pktmbuf_pool_private),
                                 rte_pktmbuf_pool_init, NULL, rte_pktmbuf_init, 0,
@@ -1262,7 +1262,7 @@ static struct rte_mempool *task_gen_create_mempool(struct task_args *targ, uint1
        PROX_PANIC(ret == NULL, "Failed to allocate dummy memory pool on socket %u with %u elements\n",
                   sock_id, targ->nb_mbuf - 1);
 
-        plog_info("\tMempool %p size = %u * %u cache %u, socket %d\n", ret,
+        plog_info("\t\tMempool %p size = %u * %u cache %u, socket %d\n", ret,
                   targ->nb_mbuf - 1, mbuf_size, targ->nb_cache_mbuf, sock_id);
 
        return ret;
@@ -1552,7 +1552,7 @@ static void init_task_gen(struct task_base *tbase, struct task_args *targ)
        PROX_PANIC((task->lat_pos || task->accur_pos) && !task->lat_enabled, "lat not enabled by lat pos or accur pos configured\n");
 
        task->generator_id = targ->generator_id;
-       plog_info("\tGenerator id = %d\n", task->generator_id);
+       plog_info("\t\tGenerator id = %d\n", task->generator_id);
 
        // Allocate array holding bytes to tsc for supported frame sizes
        task->bytes_to_tsc = prox_zmalloc(task->max_frame_size * MAX_PKT_BURST * sizeof(task->bytes_to_tsc[0]), task->socket_id);
@@ -1564,7 +1564,7 @@ static void init_task_gen(struct task_base *tbase, struct task_args *targ)
        uint64_t bytes_per_hz = UINT64_MAX;
        if ((task->port) && (task->port->max_link_speed != UINT32_MAX)) {
                bytes_per_hz = task->port->max_link_speed * 125000L;
-               plog_info("\tPort %u: max link speed is %ld Mbps\n",
+               plog_info("\t\tPort %u: max link speed is %ld Mbps\n",
                        (uint8_t)(task->port - prox_port_cfg), 8 * bytes_per_hz / 1000000);
        }
        // There are cases where hz estimate might be slighly over-estimated
@@ -1582,10 +1582,10 @@ static void init_task_gen(struct task_base *tbase, struct task_args *targ)
                task->imix_pkt_sizes[i] = targ->imix_pkt_sizes[i];
        }
        if (!strcmp(targ->pcap_file, "")) {
-               plog_info("\tUsing inline definition of a packet\n");
+               plog_info("\t\tUsing inline definition of a packet\n");
                task_init_gen_load_pkt_inline(task, targ);
        } else {
-               plog_info("Loading from pcap %s\n", targ->pcap_file);
+               plog_info("\t\tLoading from pcap %s\n", targ->pcap_file);
                task_init_gen_load_pcap(task, targ);
        }
 
index 3896b70..a147d44 100644 (file)
@@ -55,7 +55,9 @@ struct task_impair {
        unsigned queue_head;
        unsigned queue_tail;
        unsigned queue_mask;
-       int tresh;
+       int tresh_no_drop;
+       int tresh_duplicate;
+       int tresh_delay;
        unsigned int seed;
        struct random state;
        uint64_t last_idx;
@@ -72,10 +74,23 @@ static int handle_bulk_impair(struct task_base *tbase, struct rte_mbuf **mbufs,
 static int handle_bulk_impair_random(struct task_base *tbase, struct rte_mbuf **mbufs, uint16_t n_pkts);
 static int handle_bulk_random_drop(struct task_base *tbase, struct rte_mbuf **mbufs, uint16_t n_pkts);
 
-void task_impair_set_proba(struct task_base *tbase, float proba)
+void task_impair_set_proba_no_drop(struct task_base *tbase, float proba_no_drop)
 {
        struct task_impair *task = (struct task_impair *)tbase;
-       task->tresh = ((uint64_t) RAND_MAX) * (uint32_t)(proba * 10000) / 1000000;
+       task->tresh_no_drop = ((uint64_t) RAND_MAX) * (uint32_t)(proba_no_drop * 10000) / 1000000;
+}
+
+void task_impair_set_proba_delay(struct task_base *tbase, float proba_delay)
+{
+       struct task_impair *task = (struct task_impair *)tbase;
+       task->tresh_delay = ((uint64_t) RAND_MAX) * (uint32_t)(proba_delay * 10000) / 1000000;
+       task->flags |= IMPAIR_NEED_UPDATE;
+}
+
+void task_impair_set_proba_duplicate(struct task_base *tbase, float proba_dup)
+{
+       struct task_impair *task = (struct task_impair *)tbase;
+       task->tresh_duplicate = ((uint64_t) RAND_MAX) * (uint32_t)(proba_dup * 10000) / 1000000;
 }
 
 void task_impair_set_delay_us(struct task_base *tbase, uint32_t delay_us, uint32_t random_delay_us)
@@ -118,7 +133,7 @@ static void task_impair_update(struct task_base *tbase)
                        uint16_t idx = 0;
                        while (idx < MAX_PKT_BURST && task->queue_tail != task->queue_head) {
                                if (task->queue[task->queue_tail].tsc <= now) {
-                                       out[idx] = rand_r(&task->seed) <= task->tresh? 0 : OUT_DISCARD;
+                                       out[idx] = rand_r(&task->seed) <= task->tresh_no_drop? 0 : OUT_DISCARD;
                                        new_mbufs[idx++] = task->queue[task->queue_tail].mbuf;
                                        task->queue_tail = (task->queue_tail + 1) & task->queue_mask;
                                }
@@ -140,7 +155,7 @@ static void task_impair_update(struct task_base *tbase)
                        while ((pkt_idx < MAX_PKT_BURST) && (task->last_idx != ((now_idx - 1) & DELAY_MAX_MASK))) {
                                struct queue *queue = &task->buffer[task->last_idx];
                                while ((pkt_idx < MAX_PKT_BURST) && (queue->queue_tail != queue->queue_head)) {
-                                       out[pkt_idx] = rand_r(&task->seed) <= task->tresh? 0 : OUT_DISCARD;
+                                       out[pkt_idx] = rand_r(&task->seed) <= task->tresh_no_drop? 0 : OUT_DISCARD;
                                        new_mbufs[pkt_idx++] = queue->queue_elem[queue->queue_tail].mbuf;
                                        queue->queue_tail = (queue->queue_tail + 1) & task->queue_mask;
                                }
@@ -175,10 +190,10 @@ static void task_impair_update(struct task_base *tbase)
                }
        } else if (task->random_delay_us) {
                size_t size = (DELAY_MAX_MASK + 1) * sizeof(struct queue);
-               plog_info("Allocating %zd bytes\n", size);
+               plog_info("\t\tAllocating %zd bytes\n", size);
                task->buffer = prox_zmalloc(size, task->socket_id);
                PROX_PANIC(task->buffer == NULL, "Not enough memory to allocate buffer\n");
-               plog_info("Allocating %d x %zd bytes\n", DELAY_MAX_MASK + 1, mem_size);
+               plog_info("\t\tAllocating %d x %zd bytes\n", DELAY_MAX_MASK + 1, mem_size);
 
                for (int i = 0; i < DELAY_MAX_MASK + 1; i++) {
                        task->buffer[i].queue_elem = prox_zmalloc(mem_size, task->socket_id);
@@ -204,11 +219,11 @@ static int handle_bulk_random_drop(struct task_base *tbase, struct rte_mbuf **mb
        if (task->flags & IMPAIR_SET_MAC) {
                for (uint16_t i = 0; i < n_pkts; ++i) {
                        prox_rte_ether_addr_copy((prox_rte_ether_addr *)&task->src_mac[0], &hdr[i]->s_addr);
-                       out[i] = rand_r(&task->seed) <= task->tresh? 0 : OUT_DISCARD;
+                       out[i] = rand_r(&task->seed) <= task->tresh_no_drop? 0 : OUT_DISCARD;
                }
        } else {
                for (uint16_t i = 0; i < n_pkts; ++i) {
-                       out[i] = rand_r(&task->seed) <= task->tresh? 0 : OUT_DISCARD;
+                       out[i] = rand_r(&task->seed) <= task->tresh_no_drop? 0 : OUT_DISCARD;
                }
        }
        ret = task->base.tx_pkt(&task->base, mbufs, n_pkts, out);
@@ -268,10 +283,10 @@ static int handle_bulk_impair(struct task_base *tbase, struct rte_mbuf **mbufs,
        struct rte_mbuf *new_mbufs[MAX_PKT_BURST];
        uint16_t idx = 0;
 
-       if (task->tresh != RAND_MAX) {
+       if (task->tresh_no_drop != RAND_MAX) {
                while (idx < MAX_PKT_BURST && task->queue_tail != task->queue_head) {
                        if (task->queue[task->queue_tail].tsc <= now) {
-                               out[idx] = rand_r(&task->seed) <= task->tresh? 0 : OUT_DISCARD;
+                               out[idx] = rand_r(&task->seed) <= task->tresh_no_drop? 0 : OUT_DISCARD;
                                new_mbufs[idx] = task->queue[task->queue_tail].mbuf;
                                PREFETCH0(new_mbufs[idx]);
                                PREFETCH0(&new_mbufs[idx]->cacheline1);
@@ -346,7 +361,10 @@ static int handle_bulk_impair_random(struct task_base *tbase, struct rte_mbuf **
        }
 
        for (i = 0; i < n_pkts; ++i) {
-               packet_time = now + random_delay(&task->state, task->delay_time, task->delay_time_mask);
+               if (rand_r(&task->seed) <= task->tresh_delay)
+                       packet_time = now + random_delay(&task->state, task->delay_time, task->delay_time_mask);
+               else
+                       packet_time = now;
                idx = (packet_time >> DELAY_ACCURACY) & DELAY_MAX_MASK;
                while (idx != ((now_idx - 1) & DELAY_MAX_MASK)) {
                        struct queue *queue = &task->buffer[idx];
@@ -366,6 +384,15 @@ static int handle_bulk_impair_random(struct task_base *tbase, struct rte_mbuf **
                        ret+= task->base.tx_pkt(&task->base, mbufs + i, 1, out);
                        plog_warn("Unexpectdly dropping packets\n");
                }
+#if RTE_VERSION >= RTE_VERSION_NUM(19,11,0,0)
+               if (rand_r(&task->seed) <= task->tresh_duplicate) {
+                       mbufs[i] = rte_pktmbuf_copy(mbufs[i], mbufs[i]->pool, 0, UINT32_MAX);
+                       if (mbufs[i] == NULL) {
+                               plog_err("Failed to duplicate mbuf\n");
+                       } else
+                               i = i - 1;
+               }
+#endif
        }
 
        struct rte_mbuf *new_mbufs[MAX_PKT_BURST];
@@ -374,7 +401,7 @@ static int handle_bulk_impair_random(struct task_base *tbase, struct rte_mbuf **
        while ((pkt_idx < MAX_PKT_BURST) && (task->last_idx != ((now_idx - 1) & DELAY_MAX_MASK))) {
                struct queue *queue = &task->buffer[task->last_idx];
                while ((pkt_idx < MAX_PKT_BURST) && (queue->queue_tail != queue->queue_head)) {
-                       out[pkt_idx] = rand_r(&task->seed) <= task->tresh? 0 : OUT_DISCARD;
+                       out[pkt_idx] = rand_r(&task->seed) <= task->tresh_no_drop? 0 : OUT_DISCARD;
                        new_mbufs[pkt_idx] = queue->queue_elem[queue->queue_tail].mbuf;
                        PREFETCH0(new_mbufs[pkt_idx]);
                        PREFETCH0(&new_mbufs[pkt_idx]->cacheline1);
@@ -399,10 +426,10 @@ static void init_task(struct task_base *tbase, struct task_args *targ)
        uint64_t delay_us = 0;
 
        task->seed = rte_rdtsc();
-       if (targ->probability == 0)
-               targ->probability = 1000000;
 
-       task->tresh = ((uint64_t) RAND_MAX) * targ->probability / 1000000;
+       task->tresh_no_drop = ((uint64_t) RAND_MAX) * targ->probability_no_drop / 1000000;
+       task->tresh_delay = ((uint64_t) RAND_MAX) * targ->probability_delay / 1000000;
+       task->tresh_duplicate = ((uint64_t) RAND_MAX) * targ->probability_duplicate / 1000000;
 
        if ((targ->delay_us == 0) && (targ->random_delay_us == 0)) {
                tbase->handle_bulk = handle_bulk_random_drop;
@@ -438,10 +465,10 @@ static void init_task(struct task_base *tbase, struct task_args *targ)
                task->queue_tail = 0;
        } else if (targ->random_delay_us) {
                size_t size = (DELAY_MAX_MASK + 1) * sizeof(struct queue);
-               plog_info("Allocating %zd bytes\n", size);
+               plog_info("\t\tAllocating %zd bytes\n", size);
                task->buffer = prox_zmalloc(size, socket_id);
                PROX_PANIC(task->buffer == NULL, "Not enough memory to allocate buffer\n");
-               plog_info("Allocating %d x %zd bytes\n", DELAY_MAX_MASK + 1, mem_size);
+               plog_info("\t\tAllocating %d x %zd bytes\n", DELAY_MAX_MASK + 1, mem_size);
 
                for (int i = 0; i < DELAY_MAX_MASK + 1; i++) {
                        task->buffer[i].queue_elem = prox_zmalloc(mem_size, socket_id);
index 162213e..c2d10ab 100644 (file)
@@ -18,6 +18,8 @@
 #define _HANDLE_IMPAIR_H_
 
 void task_impair_set_delay_us(struct task_base *tbase, uint32_t delay_us, uint32_t random_delay_us);
-void task_impair_set_proba(struct task_base *tbase, float proba);
+void task_impair_set_proba_no_drop(struct task_base *tbase, float proba);
+void task_impair_set_proba_delay(struct task_base *tbase, float proba);
+void task_impair_set_proba_duplicate(struct task_base *tbase, float proba);
 
 #endif /* _HANDLE_IMPAIR_H_ */
index ef4da31..0c0e7b5 100644 (file)
@@ -115,6 +115,7 @@ struct task_lat {
        FILE *fp_tx;
        struct prox_port_cfg *port;
        uint64_t *bytes_to_tsc;
+       uint64_t *previous_packet;
 };
 /* This function calculate the difference between rx and tx_time
  * Both values are uint32_t (see handle_lat_bulk)
@@ -440,6 +441,17 @@ static uint32_t task_lat_early_loss_detect(struct task_lat *task, uint32_t packe
        return early_loss_detect_add(eld, packet_id);
 }
 
+static void lat_test_check_duplicate(struct task_lat *task, struct lat_test *lat_test, uint32_t packet_id, uint8_t generator_id)
+{
+       struct early_loss_detect *eld = &task->eld[generator_id];
+       uint32_t old_queue_id, queue_pos;
+
+       queue_pos = packet_id & PACKET_QUEUE_MASK;
+       old_queue_id = eld->entries[queue_pos];
+       if ((packet_id >> PACKET_QUEUE_BITS) == old_queue_id)
+               lat_test->duplicate++;
+}
+
 static uint64_t tsc_extrapolate_backward(struct task_lat *task, uint64_t tsc_from, uint64_t bytes, uint64_t tsc_minimum)
 {
 #ifdef NO_LAT_EXTRAPOLATION
@@ -462,6 +474,15 @@ static void lat_test_histogram_add(struct lat_test *lat_test, uint64_t lat_tsc)
        lat_test->buckets[bucket_id]++;
 }
 
+static void lat_test_check_ordering(struct task_lat *task, struct lat_test *lat_test, uint32_t packet_id, uint8_t generator_id)
+{
+       if (packet_id < task->previous_packet[generator_id]) {
+               lat_test->mis_ordered++;
+               lat_test->extent += task->previous_packet[generator_id] - packet_id;
+       }
+       task->previous_packet[generator_id] = packet_id;
+}
+
 static void lat_test_add_lost(struct lat_test *lat_test, uint64_t lost_packets)
 {
        lat_test->lost_packets += lost_packets;
@@ -613,7 +634,8 @@ static int handle_lat_bulk(struct task_base *tbase, struct rte_mbuf **mbufs, uin
                                // Skip unexpected packet
                                continue;
                        }
-
+                       lat_test_check_ordering(task, task->lat_test, packet_id, generator_id);
+                       lat_test_check_duplicate(task, task->lat_test, packet_id, generator_id);
                        lat_test_add_lost(task->lat_test, task_lat_early_loss_detect(task, packet_id, generator_id));
                } else {
                        generator_id = 0;
@@ -702,7 +724,7 @@ static void task_init_generator_count(struct task_lat *task)
                plog_info("\tNo generators found, hard-coding to %u generators\n", task->generator_count);
        } else
                task->generator_count = *generator_count;
-       plog_info("\tLatency using %u generators\n", task->generator_count);
+       plog_info("\t\tLatency using %u generators\n", task->generator_count);
 }
 
 static void task_lat_init_eld(struct task_lat *task, uint8_t socket_id)
@@ -786,6 +808,8 @@ static void init_task_lat(struct task_base *tbase, struct task_args *targ)
         if (task->unique_id_pos) {
                task_lat_init_eld(task, socket_id);
                task_lat_reset_eld(task);
+               task->previous_packet = prox_zmalloc(sizeof(task->previous_packet) * task->generator_count , socket_id);
+               PROX_PANIC(task->previous_packet == NULL, "Failed to allocate array for storing previous packet\n");
         }
        task->lat_test = &task->lt[task->using_lt];
 
@@ -803,7 +827,7 @@ static void init_task_lat(struct task_base *tbase, struct task_args *targ)
                // It can be UINT32_MAX (virtual devices or not supported by DPDK < 16.04)
                if (port->max_link_speed != UINT32_MAX) {
                        bytes_per_hz = port->max_link_speed * 125000L;
-                       plog_info("\tPort %u: max link speed is %ld Mbps\n",
+                       plog_info("\t\tPort %u: max link speed is %ld Mbps\n",
                                (uint8_t)(port - prox_port_cfg), 8 * bytes_per_hz / 1000000);
                }
        }
index a80afc9..475682c 100644 (file)
@@ -52,6 +52,9 @@ struct lat_test {
        uint64_t buckets[LAT_BUCKET_COUNT];
        uint64_t bucket_size;
        uint64_t lost_packets;
+       uint64_t mis_ordered;
+       uint64_t extent;
+       uint64_t duplicate;
 };
 
 static struct time_unit lat_test_get_accuracy_limit(struct lat_test *lat_test)
@@ -157,6 +160,9 @@ static void lat_test_combine(struct lat_test *dst, struct lat_test *src)
        if (src->accuracy_limit_tsc > dst->accuracy_limit_tsc)
                dst->accuracy_limit_tsc = src->accuracy_limit_tsc;
        dst->lost_packets += src->lost_packets;
+       dst->mis_ordered += src->mis_ordered;
+       dst->extent += src->extent;
+       dst->duplicate += src->duplicate;
 
 #ifdef LATENCY_HISTOGRAM
        _lat_test_histogram_combine(dst, src);
@@ -178,6 +184,9 @@ static void lat_test_reset(struct lat_test *lat_test)
        lat_test->accuracy_limit_tsc = 0;
 
        lat_test->lost_packets = 0;
+       lat_test->mis_ordered = 0;
+       lat_test->extent = 0;
+       lat_test->duplicate = 0;
 
        memset(lat_test->buckets, 0, sizeof(lat_test->buckets));
 }
index 1026a17..b0dbc9c 100644 (file)
@@ -979,7 +979,7 @@ void init_ctrl_plane(struct task_base *tbase)
                rte_socket_id(), 0);
        PROX_PANIC(ret == NULL, "Failed to allocate ARP memory pool on socket %u with %u elements\n",
                rte_socket_id(), NB_ARP_MBUF);
-       plog_info("\t\tMempool %p (%s) size = %u * %u cache %u, socket %d\n", ret, name, NB_ARP_MBUF,
+       plog_info("\tMempool %p (%s) size = %u * %u cache %u, socket %d\n", ret, name, NB_ARP_MBUF,
                ARP_MBUF_SIZE, NB_CACHE_ARP_MBUF, rte_socket_id());
        tbase->l3.arp_nd_pool = ret;
 }
index a863ffb..b9fe80a 100644 (file)
@@ -990,10 +990,10 @@ static void setup_all_task_structs(void)
 
        while(prox_core_next(&lcore_id, 1) == 0) {
                lconf = &lcore_cfg[lcore_id];
-               plog_info("\tInitializing struct for core %d with %d task\n", lcore_id, lconf->n_tasks_all);
+               plog_info("\t*** Initializing core %d (%d task) ***\n", lcore_id, lconf->n_tasks_all);
                for (uint8_t task_id = 0; task_id < lconf->n_tasks_all; ++task_id) {
                        if (!task_is_master(&lconf->targs[task_id])) {
-                               plog_info("\tInitializing struct for core %d task %d\n", lcore_id, task_id);
+                               plog_info("\t\tInitializing struct for core %d task %d\n", lcore_id, task_id);
                                lconf->targs[task_id].tmaster = tmaster;
                                lconf->tasks_all[task_id] = init_task_struct(&lconf->targs[task_id]);
                        }
index 9507d24..70d5c02 100644 (file)
@@ -485,21 +485,21 @@ void task_init_l3(struct task_base *tbase, struct task_args *targ)
                .hash_func_init_val = 0,
        };
        if (targ->flags & TASK_ARG_L3) {
-               plog_info("\tInitializing L3 (IPv4)\n");
+               plog_info("\t\tInitializing L3 (IPv4)\n");
                tbase->l3.ip_hash = rte_hash_create(&hash_params);
                PROX_PANIC(tbase->l3.ip_hash == NULL, "Failed to set up ip hash table\n");
                hash_name[0]++;
        }
 
        if (targ->flags & TASK_ARG_NDP) {
-               plog_info("\tInitializing NDP (IPv6)\n");
+               plog_info("\t\tInitializing NDP (IPv6)\n");
                hash_params.key_len = sizeof(struct ipv6_addr);
                tbase->l3.ip6_hash = rte_hash_create(&hash_params);
                PROX_PANIC(tbase->l3.ip6_hash == NULL, "Failed to set up ip hash table\n");
        }
        tbase->l3.arp_table = (struct arp_table *)prox_zmalloc(n_entries * sizeof(struct arp_table), socket_id);
        PROX_PANIC(tbase->l3.arp_table == NULL, "Failed to allocate memory for %u entries in arp/ndp table\n", n_entries);
-       plog_info("\tarp/ndp table, with %d entries of size %ld\n", n_entries, sizeof(struct l3_base));
+       plog_info("\t\tarp/ndp table, with %d entries of size %ld\n", n_entries, sizeof(struct l3_base));
 
        targ->lconf->ctrl_func_p[targ->task] = handle_ctrl_plane_pkts;
        targ->lconf->ctrl_timeout = freq_to_tsc(targ->ctrl_freq);
index 5af1931..3e3e41b 100644 (file)
@@ -1141,7 +1141,7 @@ static int get_core_cfg(unsigned sindex, char *str, void *data)
        if (STR_EQ(str, "packet id pos")) {
                return parse_int(&targ->packet_id_pos, pkey);
        }
-       if (STR_EQ(str, "probability")) {
+       if (STR_EQ(str, "probability")) { // old - use "probability no drop" instead
                float probability;
                int rc = parse_float(&probability, pkey);
                if (probability == 0) {
@@ -1151,9 +1151,44 @@ static int get_core_cfg(unsigned sindex, char *str, void *data)
                        set_errf("Probability must be < 100\n");
                        return -1;
                }
-               targ->probability = probability * 10000;
+               targ->probability_no_drop = probability * 10000;
                return rc;
        }
+       if (STR_EQ(str, "proba no drop")) {
+               float probability;
+               int rc = parse_float(&probability, pkey);
+               if (probability == 0) {
+                       set_errf("probability no drop must be != 0\n");
+                       return -1;
+               } else if (probability > 100.0) {
+                       set_errf("Probability must be < 100\n");
+                       return -1;
+               }
+               targ->probability_no_drop = probability * 10000;
+               return rc;
+       }
+       if (STR_EQ(str, "proba delay")) {
+               float probability;
+               int rc = parse_float(&probability, pkey);
+               if (probability > 100.0) {
+                       set_errf("Probability must be < 100\n");
+                       return -1;
+               }
+               targ->probability_delay = probability * 10000;
+               return rc;
+       }
+#if RTE_VERSION >= RTE_VERSION_NUM(19,11,0,0)
+       if (STR_EQ(str, "proba duplicate")) {
+               float probability;
+               int rc = parse_float(&probability, pkey);
+               if (probability > 100.0) {
+                       set_errf("probability duplicate must be < 100\n");
+                       return -1;
+               }
+               targ->probability_duplicate = probability * 10000;
+               return rc;
+       }
+#endif
        if (STR_EQ(str, "concur conn")) {
                return parse_int(&targ->n_concur_conn, pkey);
        }
index 58bad6f..5b2989d 100644 (file)
@@ -228,6 +228,9 @@ static void stats_latency_from_lat_test(struct stats_latency *dst, struct lat_te
        dst->tot_packets = src->tot_pkts;
        dst->tot_all_packets = src->tot_all_pkts;
        dst->lost_packets = src->lost_packets;
+       dst->mis_ordered = src->mis_ordered;
+       dst->extent = src->extent;
+       dst->duplicate = src->duplicate;
 }
 
 static void stats_latency_update_entry(struct stats_latency_manager_entry *entry)
index 32f3ba3..833bbff 100644 (file)
@@ -29,6 +29,9 @@ struct stats_latency {
 
        struct time_unit accuracy_limit;
        uint64_t         lost_packets;
+       uint64_t         mis_ordered;
+       uint64_t         extent;
+       uint64_t         duplicate;
        uint64_t         tot_packets;
        uint64_t         tot_all_packets;
 };
index fc12eae..9fc0562 100644 (file)
@@ -366,7 +366,7 @@ struct task_base *init_task_struct(struct task_args *targ)
        tbase->handle_bulk = t->handle;
 
        if (targ->flags & (TASK_ARG_L3|TASK_ARG_NDP)) {
-               plog_info("\tTask (%d,%d) configured in L3/NDP mode\n", targ->lconf->id, targ->id);
+               plog_info("\t\tTask (%d,%d) configured in L3/NDP mode\n", targ->lconf->id, targ->id);
                tbase->l3.ctrl_plane_ring = targ->ctrl_plane_ring;
                if (targ->nb_txports != 0) {
                        tbase->aux->tx_pkt_l2 = tbase->tx_pkt;
index 33b912e..d56837d 100644 (file)
@@ -204,7 +204,9 @@ struct task_args {
        uint32_t               lat_enabled;
        uint32_t               pkt_size;
        uint8_t                pkt_inline[MAX_PKT_SIZE];
-       uint32_t               probability;
+       uint32_t               probability_no_drop;
+       uint32_t               probability_duplicate;
+       uint32_t               probability_delay;
        char                   nat_table[256];
        uint32_t               use_src;
        char                   route_table[256];