X-Git-Url: https://gerrit.opnfv.org/gerrit/gitweb?a=blobdiff_plain;f=nfvbench%2Fnfvbench.py;h=891b2bb776b5eb007cc95dcd584dec39faa471f6;hb=87d88eab6fe50101abc607857f13af9273aef80a;hp=b36d328ca0798b689c34139b04dd8d64d0854757;hpb=fd72e102b0d6b6b62f72a06ad950e7959c54d7ef;p=nfvbench.git diff --git a/nfvbench/nfvbench.py b/nfvbench/nfvbench.py index b36d328..891b2bb 100644 --- a/nfvbench/nfvbench.py +++ b/nfvbench/nfvbench.py @@ -14,68 +14,70 @@ # under the License. # -from __init__ import __version__ import argparse -from attrdict import AttrDict -from chain_runner import ChainRunner -from collections import defaultdict -from config import config_load -from config import config_loads import copy -import credentials import datetime -from factory import BasicFactory -from fluentd import FluentLogHandler import importlib import json -import log -from log import LOG -from nfvbenchd import WebSocketIoServer import os -import pbr.version -from pkg_resources import resource_string -from specs import ChainType -from specs import Specs -from summarizer import NFVBenchSummarizer import sys import traceback -from traffic_client import TrafficGeneratorFactory -import utils + +from attrdict import AttrDict +from logging import FileHandler +import pbr.version +from pkg_resources import resource_string + +from .__init__ import __version__ +from .chain_runner import ChainRunner +from .cleanup import Cleaner +from .config import config_load +from .config import config_loads +from . import credentials +from .fluentd import FluentLogHandler +from . import log +from .log import LOG +from .nfvbenchd import WebServer +from .specs import ChainType +from .specs import Specs +from .summarizer import NFVBenchSummarizer +from . import utils fluent_logger = None + class NFVBench(object): """Main class of NFV benchmarking tool.""" + STATUS_OK = 'OK' STATUS_ERROR = 'ERROR' def __init__(self, config, openstack_spec, config_plugin, factory, notifier=None): + # the base config never changes for a given NFVbench instance self.base_config = config + # this is the running config, updated at every run() self.config = None self.config_plugin = config_plugin self.factory = factory self.notifier = notifier - self.cred = credentials.Credentials(config.openrc_file, None, False) + self.cred = credentials.Credentials(config.openrc_file, config.clouds_detail, None, False) \ + if config.openrc_file or config.clouds_detail else None self.chain_runner = None self.specs = Specs() self.specs.set_openstack_spec(openstack_spec) - self.clients = defaultdict(lambda: None) self.vni_ports = [] sys.stdout.flush() - def setup(self): - self.specs.set_run_spec(self.config_plugin.get_run_spec(self.specs.openstack)) - self.chain_runner = ChainRunner(self.config, - self.clients, - self.cred, - self.specs, - self.factory, - self.notifier) - def set_notifier(self, notifier): self.notifier = notifier - def run(self, opts): + def run(self, opts, args, dry_run=False): + """This run() method is called for every NFVbench benchmark request. + + In CLI mode, this method is called only once per invocation. + In REST server mode, this is called once per REST POST request + On dry_run, show the running config in json format then exit + """ status = NFVBench.STATUS_OK result = None message = '' @@ -83,17 +85,46 @@ class NFVBench(object): # take a snapshot of the current time for this new run # so that all subsequent logs can relate to this run fluent_logger.start_new_run() + LOG.info(args) try: - self.update_config(opts) - self.setup() - + # recalc the running config based on the base config and options for this run + self._update_config(opts) + + if dry_run: + print((json.dumps(self.config, sort_keys=True, indent=4))) + sys.exit(0) + + # check that an empty openrc file (no OpenStack) is only allowed + # with EXT chain + if (not self.config.openrc_file and not self.config.clouds_detail) and \ + self.config.service_chain != ChainType.EXT: + raise Exception("openrc_file or clouds_detail in the configuration is required" + " for PVP/PVVP chains") + + self.specs.set_run_spec(self.config_plugin.get_run_spec(self.config, + self.specs.openstack)) + self.chain_runner = ChainRunner(self.config, + self.cred, + self.specs, + self.factory, + self.notifier) + new_frame_sizes = [] + # make sure that the min frame size is 64 + min_packet_size = 64 + for frame_size in self.config.frame_sizes: + try: + if int(frame_size) < min_packet_size: + frame_size = str(min_packet_size) + LOG.info("Adjusting frame size %s bytes to minimum size %s bytes", + frame_size, min_packet_size) + if frame_size not in new_frame_sizes: + new_frame_sizes.append(frame_size) + except ValueError: + new_frame_sizes.append(frame_size.upper()) + self.config.frame_sizes = new_frame_sizes result = { "date": datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S'), "nfvbench_version": __version__, - "openstack_spec": { - "vswitch": self.specs.openstack.vswitch, - "encaps": self.specs.openstack.encaps - }, "config": self.config_plugin.prepare_results_config(copy.deepcopy(self.config)), "benchmarks": { "network": { @@ -102,6 +133,9 @@ class NFVBench(object): } } } + if self.specs.openstack: + result['openstack_spec'] = {"vswitch": self.specs.openstack.vswitch, + "encaps": self.specs.openstack.encaps} result['benchmarks']['network']['versions'].update(self.config_plugin.get_version()) except Exception: status = NFVBench.STATUS_ERROR @@ -113,21 +147,21 @@ class NFVBench(object): if self.chain_runner: self.chain_runner.close() - if status == NFVBench.STATUS_OK: - result = utils.dict_to_json_dict(result) - return { - 'status': status, - 'result': result - } - else: - return { - 'status': status, - 'error_message': message - } - - def print_summary(self, result): - """Print summary of the result""" - summary = NFVBenchSummarizer(result) + if status == NFVBench.STATUS_OK: + # result2 = utils.dict_to_json_dict(result) + return { + 'status': status, + 'result': result + } + return { + 'status': status, + 'error_message': message + } + + def prepare_summary(self, result): + """Prepare summary of the result to print and send it to logger (eg: fluentd).""" + global fluent_logger + summary = NFVBenchSummarizer(result, fluent_logger) LOG.info(str(summary)) def save(self, result): @@ -138,85 +172,159 @@ class NFVBench(object): self.config.service_chain, self.config.service_chain_count, self.config.flow_count, - self.config.frame_sizes) + self.config.frame_sizes, + self.config.user_id, + self.config.group_id) + + def _update_config(self, opts): + """Recalculate the running config based on the base config and opts. - def update_config(self, opts): + Sanity check on the config is done here as well. + """ self.config = AttrDict(dict(self.base_config)) + # Update log file handler if needed after a config update (REST mode) + if 'log_file' in opts: + if opts['log_file']: + (path, _filename) = os.path.split(opts['log_file']) + if not os.path.exists(path): + LOG.warning( + 'Path %s does not exist. Please verify root path is shared with host. Path ' + 'will be created.', path) + os.makedirs(path) + LOG.info('%s is created.', path) + if not any(isinstance(h, FileHandler) for h in log.getLogger().handlers): + log.add_file_logger(opts['log_file']) + else: + for h in log.getLogger().handlers: + if isinstance(h, FileHandler) and h.baseFilename != opts['log_file']: + # clean log file handler + log.getLogger().removeHandler(h) + log.add_file_logger(opts['log_file']) + self.config.update(opts) + config = self.config + + config.service_chain = config.service_chain.upper() + config.service_chain_count = int(config.service_chain_count) + if config.l2_loopback: + # force the number of chains to be 1 in case of untagged l2 loopback + # (on the other hand, multiple L2 vlan tagged service chains are allowed) + if not config.vlan_tagging: + config.service_chain_count = 1 + config.service_chain = ChainType.EXT + config.no_arp = True + LOG.info('Running L2 loopback: using EXT chain/no ARP') + + # allow oversized vlan lists, just clip them + try: + vlans = [list(v) for v in config.vlans] + for v in vlans: + del v[config.service_chain_count:] + config.vlans = vlans + except Exception: + pass - self.config.service_chain = self.config.service_chain.upper() - self.config.service_chain_count = int(self.config.service_chain_count) - self.config.flow_count = utils.parse_flow_count(self.config.flow_count) - required_flow_count = self.config.service_chain_count * 2 - if self.config.flow_count < required_flow_count: - LOG.info("Flow count '{}' has been set to minimum value of '{}' " - "for current configuration".format(self.config.flow_count, - required_flow_count)) - self.config.flow_count = required_flow_count - - if self.config.flow_count % 2 != 0: - self.config.flow_count += 1 - - self.config.duration_sec = float(self.config.duration_sec) - self.config.interval_sec = float(self.config.interval_sec) - - # Get traffic generator profile config - if not self.config.generator_profile: - self.config.generator_profile = self.config.traffic_generator.default_profile - - generator_factory = TrafficGeneratorFactory(self.config) - self.config.generator_config = \ - generator_factory.get_generator_config(self.config.generator_profile) - - if not any(self.config.generator_config.pcis): - raise Exception("PCI addresses configuration for selected traffic generator profile " - "({tg_profile}) are missing. Please specify them in configuration file." - .format(tg_profile=self.config.generator_profile)) - - if self.config.traffic is None or len(self.config.traffic) == 0: - raise Exception("No traffic profile found in traffic configuration, " - "please fill 'traffic' section in configuration file.") - - if isinstance(self.config.traffic, tuple): - self.config.traffic = self.config.traffic[0] - - self.config.frame_sizes = generator_factory.get_frame_sizes(self.config.traffic.profile) - - self.config.ipv6_mode = False - self.config.no_dhcp = True - self.config.same_network_only = True - if self.config.openrc_file: - self.config.openrc_file = os.path.expanduser(self.config.openrc_file) - - self.config.ndr_run = (not self.config.no_traffic - and 'ndr' in self.config.rate.strip().lower().split('_')) - self.config.pdr_run = (not self.config.no_traffic - and 'pdr' in self.config.rate.strip().lower().split('_')) - self.config.single_run = (not self.config.no_traffic - and not (self.config.ndr_run or self.config.pdr_run)) - - if self.config.vlans and len(self.config.vlans) != 2: - raise Exception('Number of configured VLAN IDs for VLAN tagging must be exactly 2.') - - self.config.json_file = self.config.json if self.config.json else None - if self.config.json_file: - (path, filename) = os.path.split(self.config.json) + # traffic profile override options + if 'frame_sizes' in opts: + unidir = False + if 'unidir' in opts: + unidir = opts['unidir'] + override_custom_traffic(config, opts['frame_sizes'], unidir) + LOG.info("Frame size has been set to %s for current configuration", opts['frame_sizes']) + + config.flow_count = utils.parse_flow_count(config.flow_count) + required_flow_count = config.service_chain_count * 2 + if config.flow_count < required_flow_count: + LOG.info("Flow count %d has been set to minimum value of '%d' " + "for current configuration", config.flow_count, + required_flow_count) + config.flow_count = required_flow_count + + if config.flow_count % 2: + config.flow_count += 1 + + # Possibly adjust the cache size + if config.cache_size < 0: + config.cache_size = config.flow_count + + # The size must be capped to 10000 (where does this limit come from?) + config.cache_size = min(config.cache_size, 10000) + + config.duration_sec = float(config.duration_sec) + config.interval_sec = float(config.interval_sec) + config.pause_sec = float(config.pause_sec) + + if config.traffic is None or not config.traffic: + raise Exception("Missing traffic property in configuration") + + if config.openrc_file: + config.openrc_file = os.path.expanduser(config.openrc_file) + if config.flavor.vcpus < 2: + raise Exception("Flavor vcpus must be >= 2") + + config.ndr_run = (not config.no_traffic and + 'ndr' in config.rate.strip().lower().split('_')) + config.pdr_run = (not config.no_traffic and + 'pdr' in config.rate.strip().lower().split('_')) + config.single_run = (not config.no_traffic and + not (config.ndr_run or config.pdr_run)) + + config.json_file = config.json if config.json else None + if config.json_file: + (path, _filename) = os.path.split(config.json) if not os.path.exists(path): raise Exception('Please provide existing path for storing results in JSON file. ' 'Path used: {path}'.format(path=path)) - self.config.std_json_path = self.config.std_json if self.config.std_json else None - if self.config.std_json_path: - if not os.path.exists(self.config.std_json): + config.std_json_path = config.std_json if config.std_json else None + if config.std_json_path: + if not os.path.exists(config.std_json): raise Exception('Please provide existing path for storing results in JSON file. ' - 'Path used: {path}'.format(path=self.config.std_json_path)) + 'Path used: {path}'.format(path=config.std_json_path)) + + # Check that multiqueue is between 1 and 8 (8 is the max allowed by libvirt/qemu) + if config.vif_multiqueue_size < 1 or config.vif_multiqueue_size > 8: + raise Exception('vif_multiqueue_size (%d) must be in [1..8]' % + config.vif_multiqueue_size) - self.config_plugin.validate_config(self.config, self.specs.openstack) + # VxLAN and MPLS sanity checks + if config.vxlan or config.mpls: + if config.vlan_tagging: + config.vlan_tagging = False + config.no_latency_streams = True + config.no_latency_stats = True + config.no_flow_stats = True + LOG.info('VxLAN or MPLS: vlan_tagging forced to False ' + '(inner VLAN tagging must be disabled)') + self.config_plugin.validate_config(config, self.specs.openstack) -def parse_opts_from_cli(): + +def bool_arg(x): + """Argument type to be used in parser.add_argument() + When a boolean like value is expected to be given + """ + return (str(x).lower() != 'false') \ + and (str(x).lower() != 'no') \ + and (str(x).lower() != '0') + + +def int_arg(x): + """Argument type to be used in parser.add_argument() + When an integer type value is expected to be given + (returns 0 if argument is invalid, hexa accepted) + """ + return int(x, 0) + + +def _parse_opts_from_cli(): parser = argparse.ArgumentParser() + parser.add_argument('--status', dest='status', + action='store_true', + default=None, + help='Provide NFVbench status') + parser.add_argument('-c', '--config', dest='config', action='store', help='Override default values with a config file or ' @@ -225,10 +333,8 @@ def parse_opts_from_cli(): parser.add_argument('--server', dest='server', default=None, - action='store', - metavar='', - help='Run nfvbench in server mode and pass' - ' the HTTP root folder full pathname') + action='store_true', + help='Run nfvbench in server mode') parser.add_argument('--host', dest='host', action='store', @@ -241,7 +347,7 @@ def parse_opts_from_cli(): help='Port on which server will be listening (default 7555)') parser.add_argument('-sc', '--service-chain', dest='service_chain', - choices=BasicFactory.chain_classes, + choices=ChainType.names, action='store', help='Service chain to run') @@ -273,13 +379,19 @@ def parse_opts_from_cli(): parser.add_argument('--inter-node', dest='inter_node', default=None, action='store_true', - help='run VMs in different compute nodes (PVVP only)') + help='(deprecated)') parser.add_argument('--sriov', dest='sriov', default=None, action='store_true', help='Use SRIOV (no vswitch - requires SRIOV support in compute nodes)') + parser.add_argument('--use-sriov-middle-net', dest='use_sriov_middle_net', + default=None, + action='store_true', + help='Use SRIOV to handle the middle network traffic ' + '(PVVP with SRIOV only)') + parser.add_argument('-d', '--debug', dest='debug', action='store_true', default=None, @@ -289,9 +401,16 @@ def parse_opts_from_cli(): action='store', help='Traffic generator profile to use') - parser.add_argument('-i', '--image', dest='image_name', - action='store', - help='VM image name to use') + parser.add_argument('-l3', '--l3-router', dest='l3_router', + default=None, + action='store_true', + help='Use L3 neutron routers to handle traffic') + + parser.add_argument('-garp', '--gratuitous-arp', dest='periodic_gratuitous_arp', + default=None, + action='store_true', + help='Use gratuitous ARP to maintain session between TG ' + 'and L3 routers to handle traffic') parser.add_argument('-0', '--no-traffic', dest='no_traffic', default=None, @@ -304,31 +423,47 @@ def parse_opts_from_cli(): help='Do not use ARP to find MAC addresses, ' 'instead use values in config file') - parser.add_argument('--no-reset', dest='no_reset', + parser.add_argument('--loop-vm-arp', dest='loop_vm_arp', default=None, action='store_true', - help='Do not reset counters prior to running') + help='Use ARP to find MAC addresses ' + 'instead of using values from TRex ports (VPP forwarder only)') - parser.add_argument('--no-int-config', dest='no_int_config', + parser.add_argument('--no-vswitch-access', dest='no_vswitch_access', default=None, action='store_true', - help='Skip interfaces config on EXT service chain') + help='Skip vswitch configuration and retrieving of stats') - parser.add_argument('--no-tor-access', dest='no_tor_access', + parser.add_argument('--vxlan', dest='vxlan', default=None, action='store_true', - help='Skip TOR switch configuration and retrieving of stats') + help='Enable VxLan encapsulation') - parser.add_argument('--no-vswitch-access', dest='no_vswitch_access', + parser.add_argument('--mpls', dest='mpls', default=None, action='store_true', - help='Skip vswitch configuration and retrieving of stats') + help='Enable MPLS encapsulation') parser.add_argument('--no-cleanup', dest='no_cleanup', default=None, action='store_true', help='no cleanup after run') + parser.add_argument('--cleanup', dest='cleanup', + default=None, + action='store_true', + help='Cleanup NFVbench resources (prompt to confirm)') + + parser.add_argument('--force-cleanup', dest='force_cleanup', + default=None, + action='store_true', + help='Cleanup NFVbench resources (do not prompt)') + + parser.add_argument('--restart', dest='restart', + default=None, + action='store_true', + help='Restart TRex server') + parser.add_argument('--json', dest='json', action='store', help='store results in json format file', @@ -346,10 +481,15 @@ def parse_opts_from_cli(): action='store_true', help='print the default config in yaml format (unedited)') + parser.add_argument('--show-pre-config', dest='show_pre_config', + default=None, + action='store_true', + help='print the config in json format (cfg file applied)') + parser.add_argument('--show-config', dest='show_config', default=None, action='store_true', - help='print the running config in json format') + help='print the running config in json format (final)') parser.add_argument('-ss', '--show-summary', dest='summary', action='store', @@ -376,6 +516,121 @@ def parse_opts_from_cli(): help='Filename for saving logs', metavar='') + parser.add_argument('--user-label', '--userlabel', dest='user_label', + action='store', + help='Custom label for performance records') + + parser.add_argument('--hypervisor', dest='hypervisor', + action='store', + metavar='', + help='Where chains must run ("compute", "az:", "az:compute")') + + parser.add_argument('--l2-loopback', '--l2loopback', dest='l2_loopback', + action='store', + metavar='', + help='Port to port or port to switch to port L2 loopback ' + 'tagged with given VLAN id(s) or not (given \'no-tag\') ' + '\'true\': use current vlans; \'false\': disable this mode.') + + parser.add_argument('--i40e-mixed', dest='i40e_mixed', + action='store', + default=None, + metavar='', + help='TRex behavior when dealing with a i40e network card driver' + ' [ https://trex-tgn.cisco.com/youtrack/issue/trex-528 ]') + + parser.add_argument('--user-info', dest='user_info', + action='append', + metavar='', + help='Custom data to be included as is ' + 'in the json report config branch - ' + ' example, pay attention! no space: ' + '--user-info=\'{"status":"explore","description":' + '{"target":"lab","ok":true,"version":2020}}\' - ' + 'this option may be repeated; given data will be merged.') + + parser.add_argument('--vlan-tagging', dest='vlan_tagging', + type=bool_arg, + metavar='', + action='store', + default=None, + help='Override the NFVbench \'vlan_tagging\' parameter') + + parser.add_argument('--intf-speed', dest='intf_speed', + metavar='', + action='store', + default=None, + help='Override the NFVbench \'intf_speed\' ' + 'parameter (e.g. 10Gbps, auto, 16.72Gbps)') + + parser.add_argument('--cores', dest='cores', + type=int_arg, + metavar='', + action='store', + default=None, + help='Override the T-Rex \'cores\' parameter') + + parser.add_argument('--cache-size', dest='cache_size', + type=int_arg, + metavar='', + action='store', + default=None, + help='Specify the FE cache size (default: 0, flow-count if < 0)') + + parser.add_argument('--service-mode', dest='service_mode', + action='store_true', + default=None, + help='Enable T-Rex service mode (for debugging purpose)') + + parser.add_argument('--no-e2e-check', dest='no_e2e_check', + action='store_true', + default=None, + help='Skip "end to end" connectivity check (on test purpose)') + + parser.add_argument('--no-flow-stats', dest='no_flow_stats', + action='store_true', + default=None, + help='Disable additional flow stats (on high load traffic)') + + parser.add_argument('--no-latency-stats', dest='no_latency_stats', + action='store_true', + default=None, + help='Disable flow stats for latency traffic') + + parser.add_argument('--no-latency-streams', dest='no_latency_streams', + action='store_true', + default=None, + help='Disable latency measurements (no streams)') + + parser.add_argument('--user-id', dest='user_id', + type=int_arg, + metavar='', + action='store', + default=None, + help='Change json/log files ownership with this user (int)') + + parser.add_argument('--group-id', dest='group_id', + type=int_arg, + metavar='', + action='store', + default=None, + help='Change json/log files ownership with this group (int)') + + parser.add_argument('--show-trex-log', dest='show_trex_log', + default=None, + action='store_true', + help='Show the current TRex local server log file contents' + ' => diagnostic/help in case of configuration problems') + + parser.add_argument('--debug-mask', dest='debug_mask', + type=int_arg, + metavar='', + action='store', + default=None, + help='General purpose register (debugging flags), ' + 'the hexadecimal notation (0x...) is accepted.' + 'Designed for development needs (default: 0).') + opts, unknown_opts = parser.parse_known_args() return opts, unknown_opts @@ -388,8 +643,7 @@ def load_default_config(): def override_custom_traffic(config, frame_sizes, unidir): - """Override the traffic profiles with a custom one - """ + """Override the traffic profiles with a custom one.""" if frame_sizes is not None: traffic_profile_name = "custom_traffic_profile" config.traffic_profile = [ @@ -407,6 +661,7 @@ def override_custom_traffic(config, frame_sizes, unidir): "profile": traffic_profile_name } + def check_physnet(name, netattrs): if not netattrs.physical_network: raise Exception("SRIOV requires physical_network to be specified for the {n} network" @@ -415,133 +670,305 @@ def check_physnet(name, netattrs): raise Exception("SRIOV requires segmentation_id to be specified for the {n} network" .format(n=name)) +def status_cleanup(config, cleanup, force_cleanup): + LOG.info('Version: %s', pbr.version.VersionInfo('nfvbench').version_string_with_vcs()) + # check if another run is pending + ret_code = 0 + try: + with utils.RunLock(): + LOG.info('Status: idle') + except Exception: + LOG.info('Status: busy (run pending)') + ret_code = 1 + # check nfvbench resources + if config.openrc_file and config.service_chain != ChainType.EXT: + cleaner = Cleaner(config) + count = cleaner.show_resources() + if count and (cleanup or force_cleanup): + cleaner.clean(not force_cleanup) + sys.exit(ret_code) + def main(): global fluent_logger + run_summary_required = False try: log.setup() # load default config file config, default_cfg = load_default_config() + # possibly override the default user_id & group_id values + if 'USER_ID' in os.environ: + config.user_id = int(os.environ['USER_ID']) + if 'GROUP_ID' in os.environ: + config.group_id = int(os.environ['GROUP_ID']) + # create factory for platform specific classes try: factory_module = importlib.import_module(config['factory_module']) factory = getattr(factory_module, config['factory_class'])() except AttributeError: raise Exception("Requested factory module '{m}' or class '{c}' was not found." - .format(m=config['factory_module'], c=config['factory_class'])) + .format(m=config['factory_module'], + c=config['factory_class'])) from AttributeError # create config plugin for this platform config_plugin = factory.get_config_plugin_class()(config) config = config_plugin.get_config() - openstack_spec = config_plugin.get_openstack_spec() - - # setup the fluent logger as soon as possible right after the config plugin is called - if config.fluentd.logging_tag: - fluent_logger = FluentLogHandler(config.fluentd.logging_tag, - fluentd_ip=config.fluentd.ip, - fluentd_port=config.fluentd.port) - LOG.addHandler(fluent_logger) - else: - fluent_logger = None - opts, unknown_opts = parse_opts_from_cli() + opts, unknown_opts = _parse_opts_from_cli() log.set_level(debug=opts.debug) if opts.version: - print pbr.version.VersionInfo('nfvbench').version_string_with_vcs() + print((pbr.version.VersionInfo('nfvbench').version_string_with_vcs())) sys.exit(0) if opts.summary: - with open(opts.summary) as json_data: - print NFVBenchSummarizer(json.load(json_data)) + with open(opts.summary, encoding="utf-8") as json_data: + result = json.load(json_data) + if opts.user_label: + result['config']['user_label'] = opts.user_label + print((NFVBenchSummarizer(result, fluent_logger))) sys.exit(0) # show default config in text/yaml format if opts.show_default_config: - print default_cfg + print((default_cfg.decode("utf-8"))) + sys.exit(0) + + # dump the contents of the trex log file + if opts.show_trex_log: + try: + with open('/tmp/trex.log', encoding="utf-8") as trex_log_file: + print(trex_log_file.read(), end="") + except FileNotFoundError: + print("No TRex log file found!") sys.exit(0) + # mask info logging in case of further config dump + if opts.show_config or opts.show_pre_config: + LOG.setLevel(log.logging.WARNING) + config.name = '' if opts.config: # do not check extra_specs in flavor as it can contain any key/value pairs - whitelist_keys = ['extra_specs'] + # the same principle applies also to the optional user_info open property + whitelist_keys = ['extra_specs', 'user_info'] # override default config options with start config at path parsed from CLI # check if it is an inline yaml/json config or a file name if os.path.isfile(opts.config): - LOG.info('Loading configuration file: ' + opts.config) + LOG.info('Loading configuration file: %s', opts.config) config = config_load(opts.config, config, whitelist_keys) config.name = os.path.basename(opts.config) else: - LOG.info('Loading configuration string: ' + opts.config) + LOG.info('Loading configuration string: %s', opts.config) config = config_loads(opts.config, config, whitelist_keys) + # show current config in json format (before CLI overriding) + if opts.show_pre_config: + print((json.dumps(config, sort_keys=True, indent=4))) + sys.exit(0) + + # setup the fluent logger as soon as possible right after the config plugin is called, + # if there is any logging or result tag is set then initialize the fluent logger + for fluentd in config.fluentd: + if fluentd.logging_tag or fluentd.result_tag: + fluent_logger = FluentLogHandler(config.fluentd) + LOG.addHandler(fluent_logger) + break + # traffic profile override options override_custom_traffic(config, opts.frame_sizes, opts.unidir) - # copy over cli options that are used in config + # Copy over some of the cli options that are used in config. + # This explicit copy is sometimes necessary + # because some early evaluation depends on them + # and cannot wait for _update_config() coming further. + # It is good practice then to set them to None (<=> done) + # and even required if a specific conversion is performed here + # that would be corrupted by a default update (simple copy). + # On the other hand, some excessive assignments have been removed + # from here, since the _update_config() procedure does them well. + config.generator_profile = opts.generator_profile - if opts.sriov: + if opts.sriov is not None: config.sriov = True - if opts.log_file: + opts.sriov = None + if opts.log_file is not None: config.log_file = opts.log_file - - # show running config in json format - if opts.show_config: - print json.dumps(config, sort_keys=True, indent=4) - sys.exit(0) + opts.log_file = None + if opts.user_id is not None: + config.user_id = opts.user_id + opts.user_id = None + if opts.group_id is not None: + config.group_id = opts.group_id + opts.group_id = None + if opts.service_chain is not None: + config.service_chain = opts.service_chain + opts.service_chain = None + if opts.hypervisor is not None: + # can be any of 'comp1', 'nova:', 'nova:comp1' + config.compute_nodes = opts.hypervisor + opts.hypervisor = None + if opts.debug_mask is not None: + config.debug_mask = opts.debug_mask + opts.debug_mask = None + + # convert 'user_info' opt from json string to dictionnary + # and merge the result with the current config dictionnary + if opts.user_info is not None: + for user_info_json in opts.user_info: + user_info_dict = json.loads(user_info_json) + if config.user_info: + config.user_info = config.user_info + user_info_dict + else: + config.user_info = user_info_dict + opts.user_info = None + + # port to port loopback (direct or through switch) + # we accept the following syntaxes for the CLI argument + # 'false' : mode not enabled + # 'true' : mode enabled with currently defined vlan IDs + # 'no-tag' : mode enabled with no vlan tagging + # : mode enabled using the given (pair of) vlan ID lists + # - If present, a '_' char will separate left an right ports lists + # e.g. 'a_x' => vlans: [[a],[x]] + # 'a,b,c_x,y,z' => [[a,b,c],[x,y,z]] + # - Otherwise the given vlan ID list applies to both sides + # e.g. 'a' => vlans: [[a],[a]] + # 'a,b' => [[a,b],[a,b]] + # - Vlan lists size needs to be at least the actual SCC value + # - Unless overriden in CLI opts, config.service_chain_count + # is adjusted to the size of the VLAN ID lists given here. + + if opts.l2_loopback is not None: + arg_pair = opts.l2_loopback.lower().split('_') + if arg_pair[0] == 'false': + config.l2_loopback = False + else: + config.l2_loopback = True + if config.service_chain != ChainType.EXT: + LOG.info('Changing service chain type to EXT') + config.service_chain = ChainType.EXT + if not config.no_arp: + LOG.info('Disabling ARP') + config.no_arp = True + if arg_pair[0] == 'true': + pass + else: + # here explicit (not)tagging is not CLI overridable + opts.vlan_tagging = None + if arg_pair[0] == 'no-tag': + config.vlan_tagging = False + else: + config.vlan_tagging = True + if len(arg_pair) == 1 or not arg_pair[1]: + arg_pair = [arg_pair[0], arg_pair[0]] + vlans = [[], []] + + def append_vlan(port, vlan_id): + # a vlan tag value must be in [0..4095] + if vlan_id not in range(0, 4096): + raise ValueError + vlans[port].append(vlan_id) + try: + for port in [0, 1]: + vlan_ids = arg_pair[port].split(',') + for vlan_id in vlan_ids: + append_vlan(port, int(vlan_id)) + if len(vlans[0]) != len(vlans[1]): + raise ValueError + except ValueError: + # at least one invalid tag => no tagging + config.vlan_tagging = False + if config.vlan_tagging: + config.vlans = vlans + # force service chain count if not CLI overriden + if opts.service_chain_count is None: + config.service_chain_count = len(vlans[0]) + opts.l2_loopback = None + + if config.i40e_mixed is None: + config.i40e_mixed = 'ignore' + if config.use_sriov_middle_net is None: + config.use_sriov_middle_net = False + if opts.use_sriov_middle_net is not None: + config.use_sriov_middle_net = opts.use_sriov_middle_net + opts.use_sriov_middle_net = None + if (config.use_sriov_middle_net and ( + (not config.sriov) or (config.service_chain != ChainType.PVVP))): + raise Exception("--use-sriov-middle-net is only valid for PVVP with SRIOV") if config.sriov and config.service_chain != ChainType.EXT: # if sriov is requested (does not apply to ext chains) # make sure the physnet names are specified check_physnet("left", config.internal_networks.left) check_physnet("right", config.internal_networks.right) - if config.service_chain == ChainType.PVVP: + if config.service_chain == ChainType.PVVP and config.use_sriov_middle_net: check_physnet("middle", config.internal_networks.middle) # update the config in the config plugin as it might have changed # in a copy of the dict (config plugin still holds the original dict) config_plugin.set_config(config) + if opts.status or opts.cleanup or opts.force_cleanup: + status_cleanup(config, opts.cleanup, opts.force_cleanup) + # add file log if requested if config.log_file: log.add_file_logger(config.log_file) + # possibly change file ownership + uid = config.user_id + gid = config.group_id + if gid is None: + gid = uid + if uid is not None: + os.chown(config.log_file, uid, gid) - nfvbench = NFVBench(config, openstack_spec, config_plugin, factory) + openstack_spec = config_plugin.get_openstack_spec() if config.openrc_file \ + else None + + nfvbench_instance = NFVBench(config, openstack_spec, config_plugin, factory) if opts.server: - if os.path.isdir(opts.server): - server = WebSocketIoServer(opts.server, nfvbench) - nfvbench.set_notifier(server) - try: - port = int(opts.port) - except ValueError: - server.run(host=opts.host) - else: - server.run(host=opts.host, port=port) + server = WebServer(nfvbench_instance, fluent_logger) + try: + port = int(opts.port) + except ValueError: + server.run(host=opts.host) else: - print 'Invalid HTTP root directory: ' + opts.server - sys.exit(1) + server.run(host=opts.host, port=port) + # server.run() should never return else: + dry_run = opts.show_config with utils.RunLock(): + run_summary_required = True if unknown_opts: err_msg = 'Unknown options: ' + ' '.join(unknown_opts) LOG.error(err_msg) raise Exception(err_msg) # remove unfilled values - opts = {k: v for k, v in vars(opts).iteritems() if v is not None} - result = nfvbench.run(opts) + opts = {k: v for k, v in list(vars(opts).items()) if v is not None} + # get CLI args + params = ' '.join(str(e) for e in sys.argv[1:]) + result = nfvbench_instance.run(opts, params, dry_run=dry_run) if 'error_message' in result: raise Exception(result['error_message']) if 'result' in result and result['status']: - nfvbench.save(result['result']) - nfvbench.print_summary(result['result']) + nfvbench_instance.save(result['result']) + nfvbench_instance.prepare_summary(result['result']) except Exception as exc: + run_summary_required = True LOG.error({ 'status': NFVBench.STATUS_ERROR, 'error_message': traceback.format_exc() }) - print str(exc) - sys.exit(1) + print((str(exc))) + finally: + if fluent_logger: + # only send a summary record if there was an actual nfvbench run or + # if an error/exception was logged. + fluent_logger.send_run_summary(run_summary_required) + if __name__ == '__main__': main()