X-Git-Url: https://gerrit.opnfv.org/gerrit/gitweb?a=blobdiff_plain;f=VNFs%2FDPPD-PROX%2Fhandle_gen.c;h=000d01763b79d8bbb01f65e1c3e77c9246c0f2d3;hb=deae87b73684b3a7a96c64c918c0342a52262744;hp=c48b4c138f8551b3477de9e4bf654a0b243fa573;hpb=7aa26ff208ab6c33f38e49f6d9abea15e6697b8c;p=samplevnf.git diff --git a/VNFs/DPPD-PROX/handle_gen.c b/VNFs/DPPD-PROX/handle_gen.c index c48b4c13..000d0176 100644 --- a/VNFs/DPPD-PROX/handle_gen.c +++ b/VNFs/DPPD-PROX/handle_gen.c @@ -52,7 +52,7 @@ struct pkt_template { uint16_t len; uint16_t l2_len; uint16_t l3_len; - uint8_t buf[ETHER_MAX_LEN]; + uint8_t *buf; }; #define MAX_TEMPLATE_INDEX 65536 @@ -97,6 +97,7 @@ struct task_gen { uint32_t n_pkts; /* number of packets in pcap */ uint32_t pkt_idx; /* current packet from pcap */ uint32_t pkt_count; /* how many pakets to generate */ + uint32_t max_frame_size; uint32_t runtime_flags; uint16_t lat_pos; uint16_t packet_id_pos; @@ -122,6 +123,7 @@ struct task_gen { struct ether_addr src_mac; uint8_t flags; uint8_t cksum_offload; + struct prox_port_cfg *port; } __rte_cache_aligned; static inline uint8_t ipv4_get_hdr_len(struct ipv4_hdr *ip) @@ -640,6 +642,19 @@ static int handle_gen_bulk(struct task_base *tbase, struct rte_mbuf **mbufs, uin int i, j; +#if RTE_VERSION < RTE_VERSION_NUM(16,4,0,0) + // On more recent DPDK, we use the speed_capa of the port, and not the negotiated speed + // If link is down, link_speed is 0 + if (unlikely(task->link_speed == 0)) { + if (task->port && task->port->link_speed != 0) { + task->link_speed = task->port->link_speed * 125000L; + plog_info("\tPort %u: link speed is %ld Mbps\n", + (uint8_t)(task->port - prox_port_cfg), 8 * task->link_speed / 1000000); + } else + return 0; + } +#endif + task_gen_update_config(task); if (task->pkt_count == 0) { @@ -686,14 +701,17 @@ static void init_task_gen_seeds(struct task_gen *task) random_init_seed(&task->rand[i].state); } -static uint32_t pcap_count_pkts(pcap_t *handle) +static uint32_t pcap_count_pkts(pcap_t *handle, uint32_t *max_frame_size) { struct pcap_pkthdr header; const uint8_t *buf; uint32_t ret = 0; + *max_frame_size = 0; long pkt1_fpos = ftell(pcap_file(handle)); while ((buf = pcap_next(handle, &header))) { + if (header.len > *max_frame_size) + *max_frame_size = header.len; ret++; } int ret2 = fseek(pcap_file(handle), pkt1_fpos, SEEK_SET); @@ -756,7 +774,7 @@ static int pcap_read_pkts(pcap_t *handle, const char *file_name, uint32_t n_pkts static int check_pkt_size(struct task_gen *task, uint32_t pkt_size, int do_panic) { const uint16_t min_len = sizeof(struct ether_hdr) + sizeof(struct ipv4_hdr); - const uint16_t max_len = ETHER_MAX_LEN - 4; + const uint16_t max_len = task->max_frame_size; if (do_panic) { PROX_PANIC(pkt_size == 0, "Invalid packet size length (no packet defined?)\n"); @@ -917,8 +935,6 @@ static void task_init_gen_load_pkt_inline(struct task_gen *task, struct task_arg { const int socket_id = rte_lcore_to_socket_id(targ->lconf->id); - if (targ->pkt_size > sizeof(task->pkt_template[0].buf)) - targ->pkt_size = sizeof(task->pkt_template[0].buf); task->n_pkts = 1; size_t mem_size = task->n_pkts * sizeof(*task->pkt_template); @@ -927,7 +943,17 @@ static void task_init_gen_load_pkt_inline(struct task_gen *task, struct task_arg PROX_PANIC(task->pkt_template == NULL || task->pkt_template_orig == NULL, - "Failed to allocate %lu bytes (in huge pages) for pcap file\n", mem_size); + "Failed to allocate %lu bytes (in huge pages) for packet template\n", mem_size); + + task->pkt_template->buf = prox_zmalloc(task->max_frame_size, socket_id); + task->pkt_template_orig->buf = prox_zmalloc(task->max_frame_size, socket_id); + PROX_PANIC(task->pkt_template->buf == NULL || + task->pkt_template_orig->buf == NULL, + "Failed to allocate %u bytes (in huge pages) for packet\n", task->max_frame_size); + + PROX_PANIC(targ->pkt_size > task->max_frame_size, + targ->pkt_size > ETHER_MAX_LEN + 2 * PROX_VLAN_TAG_SIZE - 4 ? + "pkt_size too high and jumbo frames disabled" : "pkt_size > mtu"); rte_memcpy(task->pkt_template_orig[0].buf, targ->pkt_inline, targ->pkt_size); task->pkt_template_orig[0].len = targ->pkt_size; @@ -940,11 +966,15 @@ static void task_init_gen_load_pcap(struct task_gen *task, struct task_args *tar { const int socket_id = rte_lcore_to_socket_id(targ->lconf->id); char err[PCAP_ERRBUF_SIZE]; + uint32_t max_frame_size; pcap_t *handle = pcap_open_offline(targ->pcap_file, err); PROX_PANIC(handle == NULL, "Failed to open PCAP file: %s\n", err); - task->n_pkts = pcap_count_pkts(handle); + task->n_pkts = pcap_count_pkts(handle, &max_frame_size); plogx_info("%u packets in pcap file '%s'\n", task->n_pkts, targ->pcap_file); + PROX_PANIC(max_frame_size > task->max_frame_size, + max_frame_size > ETHER_MAX_LEN + 2 * PROX_VLAN_TAG_SIZE -4 ? + "pkt_size too high and jumbo frames disabled" : "pkt_size > mtu"); if (targ->n_pkts) task->n_pkts = RTE_MIN(task->n_pkts, targ->n_pkts); @@ -957,24 +987,41 @@ static void task_init_gen_load_pcap(struct task_gen *task, struct task_args *tar task->pkt_template_orig == NULL, "Failed to allocate %lu bytes (in huge pages) for pcap file\n", mem_size); + for (uint i = 0; i < task->n_pkts; i++) { + task->pkt_template[i].buf = prox_zmalloc(max_frame_size, socket_id); + task->pkt_template_orig[i].buf = prox_zmalloc(max_frame_size, socket_id); + + PROX_PANIC(task->pkt_template->buf == NULL || + task->pkt_template_orig->buf == NULL, + "Failed to allocate %u bytes (in huge pages) for pcap file\n", task->max_frame_size); + } + pcap_read_pkts(handle, targ->pcap_file, task->n_pkts, task->pkt_template_orig, NULL); pcap_close(handle); task_gen_reset_pkt_templates(task); } -static struct rte_mempool *task_gen_create_mempool(struct task_args *targ) +static struct rte_mempool *task_gen_create_mempool(struct task_args *targ, uint16_t max_frame_size) { static char name[] = "gen_pool"; struct rte_mempool *ret; const int sock_id = rte_lcore_to_socket_id(targ->lconf->id); name[0]++; - ret = rte_mempool_create(name, targ->nb_mbuf - 1, MBUF_SIZE, + uint32_t mbuf_size = TX_MBUF_SIZE; + if (max_frame_size + (unsigned)sizeof(struct rte_mbuf) + RTE_PKTMBUF_HEADROOM > mbuf_size) + mbuf_size = max_frame_size + (unsigned)sizeof(struct rte_mbuf) + RTE_PKTMBUF_HEADROOM; + plog_info("\t\tCreating mempool with name '%s'\n", name); + ret = rte_mempool_create(name, targ->nb_mbuf - 1, mbuf_size, targ->nb_cache_mbuf, sizeof(struct rte_pktmbuf_pool_private), rte_pktmbuf_pool_init, NULL, rte_pktmbuf_init, 0, sock_id, 0); PROX_PANIC(ret == NULL, "Failed to allocate dummy memory pool on socket %u with %u elements\n", sock_id, targ->nb_mbuf - 1); + + plog_info("\t\tMempool %p size = %u * %u cache %u, socket %d\n", ret, + targ->nb_mbuf - 1, mbuf_size, targ->nb_cache_mbuf, sock_id); + return ret; } @@ -1051,22 +1098,23 @@ static void init_task_gen_pcap(struct task_base *tbase, struct task_args *targ) { struct task_gen_pcap *task = (struct task_gen_pcap *)tbase; const uint32_t sockid = rte_lcore_to_socket_id(targ->lconf->id); + uint32_t max_frame_size; task->loop = targ->loop; task->pkt_idx = 0; task->hz = rte_get_tsc_hz(); - task->local_mbuf.mempool = task_gen_create_mempool(targ); - - PROX_PANIC(!strcmp(targ->pcap_file, ""), "No pcap file defined\n"); - char err[PCAP_ERRBUF_SIZE]; pcap_t *handle = pcap_open_offline(targ->pcap_file, err); PROX_PANIC(handle == NULL, "Failed to open PCAP file: %s\n", err); - task->n_pkts = pcap_count_pkts(handle); + task->n_pkts = pcap_count_pkts(handle, &max_frame_size); plogx_info("%u packets in pcap file '%s'\n", task->n_pkts, targ->pcap_file); + task->local_mbuf.mempool = task_gen_create_mempool(targ, max_frame_size); + + PROX_PANIC(!strcmp(targ->pcap_file, ""), "No pcap file defined\n"); + if (targ->n_pkts) { plogx_info("Configured to load %u packets\n", targ->n_pkts); if (task->n_pkts > targ->n_pkts) @@ -1083,6 +1131,11 @@ static void init_task_gen_pcap(struct task_base *tbase, struct task_args *targ) task->proto = (struct pkt_template *) mem; task->proto_tsc = (uint64_t *)(mem + task->n_pkts * sizeof(*task->proto)); + for (uint i = 0; i < targ->n_pkts; i++) { + task->proto[i].buf = prox_zmalloc(max_frame_size, sockid); + PROX_PANIC(task->proto[i].buf == NULL, "Failed to allocate %u bytes (in huge pages) for pcap file\n", max_frame_size); + } + pcap_read_pkts(handle, targ->pcap_file, task->n_pkts, task->proto, task->proto_tsc); pcap_close(handle); } @@ -1144,6 +1197,28 @@ static void start(struct task_base *tbase) if (tbase->l3.tmaster) { register_all_ip_to_ctrl_plane(task); } + if (task->port) { + // task->port->link_speed reports the link speed in Mbps e.g. 40k for a 40 Gbps NIC. + // task->link_speed reports link speed in Bytes per sec. +#if RTE_VERSION < RTE_VERSION_NUM(16,4,0,0) + // It can be 0 if link is down, and must hence be updated in fast path. + task->link_speed = task->port->link_speed * 125000L; + if (task->link_speed) + plog_info("\tPort %u: link speed is %ld Mbps\n", + (uint8_t)(task->port - prox_port_cfg), 8 * task->link_speed / 1000000); + else + plog_info("\tPort %u: link speed is %ld Mbps - link might be down\n", + (uint8_t)(task->port - prox_port_cfg), 8 * task->link_speed / 1000000); +#else + if (task->port->link_speed == UINT32_MAX) + task->link_speed = UINT64_MAX; + else { + task->link_speed = task->port->link_speed * 125000L; + plog_info("\tPort %u: link max speed is %ld Mbps\n", + (uint8_t)(task->port - prox_port_cfg), 8 * task->link_speed / 1000000); + } +#endif + } /* TODO Handle the case when two tasks transmit to the same port and one of them is stopped. In that case ARP (requests or replies) @@ -1167,7 +1242,8 @@ static void init_task_gen_early(struct task_args *targ) uint8_t *generator_count = prox_sh_find_system("generator_count"); if (generator_count == NULL) { - generator_count = prox_zmalloc(sizeof(*generator_count), 0); + generator_count = prox_zmalloc(sizeof(*generator_count), rte_lcore_to_socket_id(targ->lconf->id)); + PROX_PANIC(generator_count == NULL, "Failed to allocate generator count\n"); prox_sh_add_system("generator_count", generator_count); } targ->generator_id = *generator_count; @@ -1180,7 +1256,17 @@ static void init_task_gen(struct task_base *tbase, struct task_args *targ) task->packet_id_pos = targ->packet_id_pos; - task->local_mbuf.mempool = task_gen_create_mempool(targ); + struct prox_port_cfg *port = find_reachable_port(targ); + // TODO: check that all reachable ports have the same mtu... + if (port) { + task->cksum_offload = port->requested_tx_offload & (DEV_TX_OFFLOAD_IPV4_CKSUM | DEV_TX_OFFLOAD_UDP_CKSUM); + task->port = port; + task->max_frame_size = port->mtu + ETHER_HDR_LEN + 2 * PROX_VLAN_TAG_SIZE; + } else { + // Not generating to any port... + task->max_frame_size = ETHER_MAX_LEN; + } + task->local_mbuf.mempool = task_gen_create_mempool(targ, task->max_frame_size); PROX_PANIC(task->local_mbuf.mempool == NULL, "Failed to create mempool\n"); task->pkt_idx = 0; task->hz = rte_get_tsc_hz(); @@ -1190,9 +1276,16 @@ static void init_task_gen(struct task_base *tbase, struct task_args *targ) task->sig = targ->sig; task->new_rate_bps = targ->rate_bps; + /* + * For tokens, use 10 Gbps as base rate + * Scripts can then use speed command, with speed=100 as 10 Gbps and speed=400 as 40 Gbps + * Script can query prox "port info" command to find out the port link speed to know + * at which rate to start. Note that virtio running on OVS returns 10 Gbps, so a script has + * probably also to check the driver (as returned by the same "port info" command. + */ struct token_time_cfg tt_cfg = token_time_cfg_create(1250000000, rte_get_tsc_hz(), -1); - token_time_init(&task->token_time, &tt_cfg); + init_task_gen_seeds(task); task->min_bulk_size = targ->min_bulk_size; @@ -1210,9 +1303,8 @@ static void init_task_gen(struct task_base *tbase, struct task_args *targ) PROX_PANIC((task->lat_pos || task->accur_pos) && !task->lat_enabled, "lat not enabled by lat pos or accur pos configured\n"); task->generator_id = targ->generator_id; + plog_info("\tGenerator id = %d\n", task->generator_id); task->link_speed = UINT64_MAX; - if (targ->nb_txrings == 0 && targ->nb_txports == 1) - task->link_speed = 1250000000; if (!strcmp(targ->pcap_file, "")) { plog_info("\tUsing inline definition of a packet\n"); @@ -1222,7 +1314,8 @@ static void init_task_gen(struct task_base *tbase, struct task_args *targ) task_init_gen_load_pcap(task, targ); } - if ((targ->flags & DSF_KEEP_SRC_MAC) == 0 && (targ->nb_txrings || targ->nb_txports)) { + PROX_PANIC(((targ->nb_txrings == 0) && (targ->nb_txports == 0)), "Gen mode requires a tx ring or a tx port"); + if ((targ->flags & DSF_KEEP_SRC_MAC) == 0) { uint8_t *src_addr = prox_port_cfg[tbase->tx_params_hw.tx_port_queue->port].eth_addr.addr_bytes; for (uint32_t i = 0; i < task->n_pkts; ++i) { rte_memcpy(&task->pkt_template[i].buf[6], src_addr, 6); @@ -1233,11 +1326,6 @@ static void init_task_gen(struct task_base *tbase, struct task_args *targ) PROX_PANIC(task_gen_add_rand(tbase, targ->rand_str[i], targ->rand_offset[i], UINT32_MAX), "Failed to add random\n"); } - - struct prox_port_cfg *port = find_reachable_port(targ); - if (port) { - task->cksum_offload = port->capabilities.tx_offload_cksum; - } } static struct task_init task_init_gen = { @@ -1245,10 +1333,11 @@ static struct task_init task_init_gen = { .init = init_task_gen, .handle = handle_gen_bulk, .start = start, + .early_init = init_task_gen_early, #ifdef SOFT_CRC // For SOFT_CRC, no offload is needed. If both NOOFFLOADS and NOMULTSEGS flags are set the // vector mode is used by DPDK, resulting (theoretically) in higher performance. - .flag_features = TASK_FEATURE_NEVER_DISCARDS | TASK_FEATURE_NO_RX | TASK_FEATURE_TXQ_FLAGS_NOOFFLOADS | TASK_FEATURE_TXQ_FLAGS_NOMULTSEGS, + .flag_features = TASK_FEATURE_NEVER_DISCARDS | TASK_FEATURE_NO_RX | TASK_FEATURE_TXQ_FLAGS_NOOFFLOADS, #else .flag_features = TASK_FEATURE_NEVER_DISCARDS | TASK_FEATURE_NO_RX, #endif @@ -1261,10 +1350,11 @@ static struct task_init task_init_gen_l3 = { .init = init_task_gen, .handle = handle_gen_bulk, .start = start, + .early_init = init_task_gen_early, #ifdef SOFT_CRC // For SOFT_CRC, no offload is needed. If both NOOFFLOADS and NOMULTSEGS flags are set the // vector mode is used by DPDK, resulting (theoretically) in higher performance. - .flag_features = TASK_FEATURE_NEVER_DISCARDS | TASK_FEATURE_NO_RX | TASK_FEATURE_TXQ_FLAGS_NOOFFLOADS | TASK_FEATURE_TXQ_FLAGS_NOMULTSEGS, + .flag_features = TASK_FEATURE_NEVER_DISCARDS | TASK_FEATURE_NO_RX | TASK_FEATURE_TXQ_FLAGS_NOOFFLOADS, #else .flag_features = TASK_FEATURE_NEVER_DISCARDS | TASK_FEATURE_NO_RX, #endif @@ -1277,8 +1367,9 @@ static struct task_init task_init_gen_pcap = { .init = init_task_gen_pcap, .handle = handle_gen_pcap_bulk, .start = start_pcap, + .early_init = init_task_gen_early, #ifdef SOFT_CRC - .flag_features = TASK_FEATURE_NEVER_DISCARDS | TASK_FEATURE_NO_RX | TASK_FEATURE_TXQ_FLAGS_NOOFFLOADS | TASK_FEATURE_TXQ_FLAGS_NOMULTSEGS, + .flag_features = TASK_FEATURE_NEVER_DISCARDS | TASK_FEATURE_NO_RX | TASK_FEATURE_TXQ_FLAGS_NOOFFLOADS, #else .flag_features = TASK_FEATURE_NEVER_DISCARDS | TASK_FEATURE_NO_RX, #endif