X-Git-Url: https://gerrit.opnfv.org/gerrit/gitweb?a=blobdiff_plain;f=yardstick%2Fnetwork_services%2Fvnf_generic%2Fvnf%2Fvpe_vnf.py;h=bfff45c67b958576adc0580d393f6b1703cf0784;hb=7c8b47ee0c6fee700d46adfca118f02507981198;hp=cd4a008cefa3ae25e34b0107c8e4e74b03bb6c21;hpb=d374dc8ca09e7d6e08a531e68a21a8b107af21d0;p=yardstick.git diff --git a/yardstick/network_services/vnf_generic/vnf/vpe_vnf.py b/yardstick/network_services/vnf_generic/vnf/vpe_vnf.py index cd4a008ce..bfff45c67 100644 --- a/yardstick/network_services/vnf_generic/vnf/vpe_vnf.py +++ b/yardstick/network_services/vnf_generic/vnf/vpe_vnf.py @@ -24,18 +24,20 @@ import posixpath from six.moves import configparser, zip +from yardstick.common.process import check_if_process_failed from yardstick.network_services.helpers.samplevnf_helper import PortPairs from yardstick.network_services.pipeline import PipelineRules from yardstick.network_services.vnf_generic.vnf.sample_vnf import SampleVNF, DpdkVnfSetupEnvHelper +from yardstick.benchmark.contexts import base as ctx_base LOG = logging.getLogger(__name__) -VPE_PIPELINE_COMMAND = """sudo {tool_path} -p {port_mask_hex} -f {cfg_file} -s {script}""" +VPE_PIPELINE_COMMAND = "sudo {tool_path} -p {port_mask_hex} -f {cfg_file} -s {script} {hwlb}" VPE_COLLECT_KPI = """\ -Pkts in:\s(\d+)\r\n\ -\tPkts dropped by AH:\s(\d+)\r\n\ -\tPkts dropped by other:\s(\d+)\ +Pkts in:\\s(\\d+)\r\n\ +\tPkts dropped by AH:\\s(\\d+)\r\n\ +\tPkts dropped by other:\\s(\\d+)\ """ @@ -50,15 +52,35 @@ class ConfigCreate(object): config.set(tm_q, 'cfg', '/tmp/full_tm_profile_10G.cfg') return config - def __init__(self, uplink_ports, downlink_ports, socket): + def __init__(self, vnfd_helper, socket): super(ConfigCreate, self).__init__() self.sw_q = -1 self.sink_q = -1 self.n_pipeline = 1 - self.uplink_ports = uplink_ports - self.downlink_ports = downlink_ports + self.vnfd_helper = vnfd_helper + self.uplink_ports = self.vnfd_helper.port_pairs.uplink_ports + self.downlink_ports = self.vnfd_helper.port_pairs.downlink_ports self.pipeline_per_port = 9 self.socket = socket + self._dpdk_port_to_link_id_map = None + + @property + def dpdk_port_to_link_id_map(self): + # we need interface name -> DPDK port num (PMD ID) -> LINK ID + # LINK ID -> PMD ID is governed by the port mask + # LINK instances are created implicitly based on the PORT_MASK application startup + # argument. LINK0 is the first port enabled in the PORT_MASK, port 1 is the next one, + # etc. The LINK ID is different than the DPDK PMD-level NIC port ID, which is the actual + # position in the bitmask mentioned above. For example, if bit 5 is the first bit set + # in the bitmask, then LINK0 is having the PMD ID of 5. This mechanism creates a + # contiguous LINK ID space and isolates the configuration file against changes in the + # board PCIe slots where NICs are plugged in. + if self._dpdk_port_to_link_id_map is None: + self._dpdk_port_to_link_id_map = {} + for link_id, port_name in enumerate(sorted(self.vnfd_helper.port_pairs.all_ports, + key=self.vnfd_helper.port_num)): + self._dpdk_port_to_link_id_map[port_name] = link_id + return self._dpdk_port_to_link_id_map def vpe_initialize(self, config): config.add_section('EAL') @@ -77,7 +99,7 @@ class ConfigCreate(object): def vpe_rxq(self, config): for port in self.downlink_ports: - new_section = 'RXQ{0}.0'.format(port) + new_section = 'RXQ{0}.0'.format(self.dpdk_port_to_link_id_map[port]) config.add_section(new_section) config.set(new_section, 'mempool', 'MEMPOOL1') @@ -94,7 +116,8 @@ class ConfigCreate(object): pktq = "SWQ{0}{1}".format(self.sw_q, sink) return pktq - def vpe_upstream(self, vnf_cfg, index=0): + def vpe_upstream(self, vnf_cfg, index=0): # pragma: no cover + # NOTE(ralonsoh): this function must be covered in UTs. parser = configparser.ConfigParser() parser.read(os.path.join(vnf_cfg, 'vpe_upstream')) @@ -102,7 +125,8 @@ class ConfigCreate(object): for k, v in parser.items(pipeline): if k == "pktq_in": if "RXQ" in v: - value = "RXQ{0}.0".format(self.uplink_ports[index]) + port = self.dpdk_port_to_link_id_map[self.uplink_ports[index]] + value = "RXQ{0}.0".format(port) else: value = self.get_sink_swq(parser, pipeline, k, index) @@ -110,7 +134,8 @@ class ConfigCreate(object): elif k == "pktq_out": if "TXQ" in v: - value = "TXQ{0}.0".format(self.downlink_ports[index]) + port = self.dpdk_port_to_link_id_map[self.downlink_ports[index]] + value = "TXQ{0}.0".format(port) else: self.sw_q += 1 value = self.get_sink_swq(parser, pipeline, k, index) @@ -124,30 +149,33 @@ class ConfigCreate(object): self.n_pipeline += 1 return parser - def vpe_downstream(self, vnf_cfg, index): + def vpe_downstream(self, vnf_cfg, index): # pragma: no cover + # NOTE(ralonsoh): this function must be covered in UTs. parser = configparser.ConfigParser() parser.read(os.path.join(vnf_cfg, 'vpe_downstream')) for pipeline in parser.sections(): for k, v in parser.items(pipeline): if k == "pktq_in": + port = self.dpdk_port_to_link_id_map[self.downlink_ports[index]] if "RXQ" not in v: value = self.get_sink_swq(parser, pipeline, k, index) elif "TM" in v: - value = "RXQ{0}.0 TM{1}".format(self.downlink_ports[index], index) + value = "RXQ{0}.0 TM{1}".format(port, index) else: - value = "RXQ{0}.0".format(self.downlink_ports[index]) + value = "RXQ{0}.0".format(port) parser.set(pipeline, k, value) if k == "pktq_out": + port = self.dpdk_port_to_link_id_map[self.uplink_ports[index]] if "TXQ" not in v: self.sw_q += 1 value = self.get_sink_swq(parser, pipeline, k, index) elif "TM" in v: - value = "TXQ{0}.0 TM{1}".format(self.uplink_ports[index], index) + value = "TXQ{0}.0 TM{1}".format(port, index) else: - value = "TXQ{0}.0".format(self.uplink_ports[index]) + value = "TXQ{0}.0".format(port) parser.set(pipeline, k, value) @@ -165,7 +193,7 @@ class ConfigCreate(object): config = self.vpe_initialize(config) config = self.vpe_rxq(config) config.write(cfg_file) - for index in range(0, len(self.uplink_ports)): + for index, _ in enumerate(self.uplink_ports): config = self.vpe_upstream(vnf_cfg, index) config.write(cfg_file) config = self.vpe_downstream(vnf_cfg, index) @@ -174,14 +202,19 @@ class ConfigCreate(object): def generate_vpe_script(self, interfaces): rules = PipelineRules(pipeline_id=1) - for priv_port, pub_port in zip(self.uplink_ports, self.downlink_ports): - priv_intf = interfaces[priv_port]["virtual-interface"] - pub_intf = interfaces[pub_port]["virtual-interface"] + for uplink_port, downlink_port in zip(self.uplink_ports, self.downlink_ports): - dst_port0_ip = priv_intf["dst_ip"] - dst_port1_ip = pub_intf["dst_ip"] - dst_port0_mac = priv_intf["dst_mac"] - dst_port1_mac = pub_intf["dst_mac"] + uplink_intf = \ + next(intf["virtual-interface"] for intf in interfaces + if intf["name"] == uplink_port) + downlink_intf = \ + next(intf["virtual-interface"] for intf in interfaces + if intf["name"] == downlink_port) + + dst_port0_ip = uplink_intf["dst_ip"] + dst_port1_ip = downlink_intf["dst_ip"] + dst_port0_mac = uplink_intf["dst_mac"] + dst_port1_mac = downlink_intf["dst_mac"] rules.add_firewall_script(dst_port0_ip) rules.next_pipeline() @@ -198,7 +231,7 @@ class ConfigCreate(object): return rules.get_string() - def generate_tm_cfg(self, vnf_cfg, index=0): + def generate_tm_cfg(self, vnf_cfg): vnf_cfg = os.path.join(vnf_cfg, "full_tm_profile_10G.cfg") if os.path.exists(vnf_cfg): return open(vnf_cfg).read() @@ -226,8 +259,7 @@ class VpeApproxSetupEnvHelper(DpdkVnfSetupEnvHelper): } self._build_vnf_ports() - vpe_conf = ConfigCreate(self.vnfd_helper.port_pairs.uplink_ports, - self.vnfd_helper.port_pairs.downlink_ports, self.socket) + vpe_conf = ConfigCreate(self.vnfd_helper, self.socket) vpe_conf.create_vpe_config(self.scenario_helper.vnf_cfg) config_basename = posixpath.basename(self.CFG_CONFIG) @@ -269,7 +301,13 @@ class VpeApproxVnf(SampleVNF): raise NotImplementedError def collect_kpi(self): + # we can't get KPIs if the VNF is down + check_if_process_failed(self._vnf_process) + physical_node = ctx_base.Context.get_physical_node_from_server( + self.scenario_helper.nodes[self.name]) + result = { + "physical_node": physical_node, 'pkt_in_up_stream': 0, 'pkt_drop_up_stream': 0, 'pkt_in_down_stream': 0,