-# Copyright 2015-2016 Intel Corporation.
+# Copyright 2015-2017 Intel Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
:param tunnel_operation encapsulation/decapsulation or None
:return: IVSwitchController for the deployment_scenario
"""
+ # pylint: disable=too-many-return-statements
deployment_scenario = deployment_scenario.lower()
if deployment_scenario.startswith("p2p"):
return VswitchControllerP2P(vswitch_class, traffic)
:param loadgen_class: Reference to load generator class to be used.
:return: A new ILoadGenerator class
"""
+ # pylint: disable=too-many-function-args
loadgen_type = loadgen_type.lower()
if loadgen_type.find("dummy") >= 0:
return DummyLoadGen(loadgen_cfg)
-# Copyright 2015 Intel Corporation.
+# Copyright 2015-2017 Intel Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
from conf import settings
from core.loader.loader_servant import LoaderServant
-from tools.pkt_gen.trafficgen import ITrafficGenerator
from tools.collectors.collector import ICollector
+from tools.pkt_fwd.pkt_fwd import IPktFwd
+from tools.pkt_gen.trafficgen import ITrafficGenerator
from vswitches.vswitch import IVSwitch
from vnfs.vnf.vnf import IVnf
-from tools.pkt_fwd.pkt_fwd import IPktFwd
class Loader(object):
"""Loader class - main object context holder.
-# Copyright 2015 Intel Corporation.
+# Copyright 2015-2017 Intel Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# find all classes derived from given interface, but suppress
# interface itself and any abstract class starting with iface name
gens = dict((k, v) for (k, v) in list(mod.__dict__.items())
- if type(v) == type and
+ if isinstance(v, type) and
issubclass(v, interface) and
not k.startswith(interface.__name__))
if gens:
-# Copyright 2015-2016 Intel Corporation.
+# Copyright 2015-2017 Intel Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# enforce that GUEST_NIC_NR is 1 or even number of NICs
updated = False
nics_nr = settings.getValue('GUEST_NICS_NR')
- for index in range(len(nics_nr)):
- if nics_nr[index] > 1 and nics_nr[index] % 2:
+ for index, value in enumerate(nics_nr):
+ if value > 1 and value % 2:
updated = True
- nics_nr[index] = int(nics_nr[index] / 2) * 2
+ nics_nr[index] = int(value / 2) * 2
if updated:
settings.setValue('GUEST_NICS_NR', nics_nr)
self._logger.warning('Odd number of NICs was detected. Configuration '
-# Copyright 2015 Intel Corporation.
+# Copyright 2015-2017 Intel Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
function-name-hint=[a-z_][a-z0-9_]{2,30}$
# Regular expression matching correct method names
-method-rgx=[a-z_][a-z0-9_]{2,30}$
+method-rgx=[a-z_][a-z0-9_]{2,35}$
# Naming hint for method names
method-name-hint=[a-z_][a-z0-9_]{2,30}$
ignored-argument-names=_.*
# Maximum number of locals for function / method body
-max-locals=20
+max-locals=25
# Maximum number of return / yield for function / method body
-max-returns=6
+max-returns=10
# Maximum number of branch for function / method body
-max-branches=15
+max-branches=25
# Maximum number of statements in function / method body
max-statements=70
xmlrunner==1.7.7
requests==2.8.1
netaddr==0.7.18
-scapy-python3==0.18
\ No newline at end of file
+scapy-python3==0.18
+distro
-# Copyright 2015 Intel Corporation.
+# Copyright 2015-2017 Intel Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
from src.dpdk.dpdk import *
from src.dpdk.testpmd_proc import TestPMDProcess
-
-# Copyright 2015-2016 Intel Corporation.
+# Copyright 2015-2017 Intel Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
def init():
"""Setup system for DPDK.
"""
+ # pylint: disable=global-statement
global _NICS
global _NICS_PCI
_NICS = S.getValue('NICS')
-# Copyright 2016 Intel Corporation.
+# Copyright 2016-2017 Intel Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
"""Wrapper for an OVS dpctl (``ovs-dpctl``) for managing datapaths.
"""
-
-import os
import logging
-import string
from tools import tasks
from conf import settings
"""
self.logger.debug('delete datapath ' + dp_name)
self.run_dpctl(['del-dp', dp_name])
-
-# Copyright 2015-2016 Intel Corporation.
+# Copyright 2015-2017 Intel Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
https://github.com/openstack/neutron/blob/6eac1dc99124ca024d6a69b3abfa3bc69c735667/neutron/agent/linux/ovs_lib.py
"""
-
-import os
import logging
import string
import re
:return: None
"""
- tmp_timeout = self.timeout if timeout == None else timeout
+ tmp_timeout = self.timeout if timeout is None else timeout
cmd = ['sudo', settings.getValue('TOOLS')['ovs-ofctl'], '-O',
'OpenFlow13', '--timeout', str(tmp_timeout)] + args
return tasks.run_task(
"""
# insert flows from cache into OVS if needed
if cache == 'flush':
- if self._cache_file == None:
+ if self._cache_file is None:
self.logger.error('flow cache flush called, but nothing is cached')
return
self.logger.debug('flows cached in %s will be added to the bridge', _CACHE_FILE_NAME)
# insert flow to the cache or OVS
if cache == 'on':
# create and open cache file if needed
- if self._cache_file == None:
+ if self._cache_file is None:
self._cache_file = open(_CACHE_FILE_NAME, 'w')
self._cache_file.write(_flow_key + '\n')
else:
-# Copyright 2015-2016 Intel Corporation.
+# Copyright 2015-2017 Intel Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
from testcases import TestCase
from tools.report import report
-from conf import settings as S
class PerformanceTestCase(TestCase):
"""PerformanceTestCase class
-# Copyright 2015-2016 Intel Corporation.
+# Copyright 2015-2017 Intel Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
CHECK_PREFIX = 'validate_'
+# pylint: disable=too-many-instance-attributes
class TestCase(object):
"""TestCase base class
In this basic form runs RFC2544 throughput test
"""
+ # pylint: disable=too-many-statements
def __init__(self, cfg):
"""Pull out fields from test config
'tunnel_type': self._tunnel_type,})
# Packet Forwarding mode
- self._vswitch_none = 'none' == S.getValue('VSWITCH').strip().lower()
+ self._vswitch_none = S.getValue('VSWITCH').strip().lower() == 'none'
# trafficgen configuration required for tests of tunneling protocols
if self.deployment == "op2p":
def _mount_hugepages(self):
"""Mount hugepages if usage of DPDK or Qemu is detected
"""
+ # pylint: disable=too-many-boolean-expressions
# hugepages are needed by DPDK and Qemu
if not self._hugepages_mounted and \
(self.deployment.count('v') or \
else:
result3 = True
- logging.info('Need a total of {} total hugepages'.format(
- hugepages_needed + sock1_mem + sock0_mem))
+ logging.info('Need a total of %s total hugepages',
+ hugepages_needed + sock1_mem + sock0_mem)
# The only drawback here is sometimes dpdk doesn't release
# its hugepages on a test failure. This could cause a test
"""Add flows to the vswitch
"""
vswitch = self._vswitch_ctl.get_vswitch()
- # TODO BOM 15-08-07 the frame mod code assumes that the
+ # NOTE BOM 15-08-07 the frame mod code assumes that the
# physical ports are ports 1 & 2. The actual numbers
# need to be retrived from the vSwitch and the metadata value
# updated accordingly.
'goto_table:3']}
vswitch.add_flow(bridge, flow)
elif self._frame_mod == "ip_port":
- # TODO BOM 15-08-27 The traffic generated is assumed
+ # NOTE BOM 15-08-27 The traffic generated is assumed
# to be UDP (nw_proto 17d) which is the default case but
# we will need to pick up the actual traffic params in use.
flow = {'table':'2', 'priority':'1000', 'metadata':'2',
# initialize list with results
self._step_result = [None] * len(self.test)
+ # We have to suppress pylint report, because test_object has to be set according
+ # to the test step definition
+ # pylint: disable=redefined-variable-type
# run test step by step...
for i, step in enumerate(self.test):
step_ok = not self._step_check
-# Copyright 2015 Intel Corporation.
+# Copyright 2015-2017 Intel Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
from collections import OrderedDict
from tools import tasks
from tools import systeminfo
-from conf import settings
from tools.collectors.collector import collector
+from conf import settings
_ROOT_DIR = os.path.dirname(os.path.realpath(__file__))
-# Copyright 2016 Intel Corporation.
+# Copyright 2016-2017 Intel Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
#
# Support functions
#
-
+# pylint: disable=too-many-branches
def settings_update_paths():
""" Configure paths to OVS, DPDK and QEMU sources and binaries based on
selected vswitch type and src/binary switch. Data are taken from
paths['paths']['ovs_etc_tmp'] = S.getValue('PATHS')['vswitch']['ovs_etc_tmp']
tools = {}
+ # pylint: disable=too-many-nested-blocks
for path_class in paths:
for tool in paths[path_class]:
tmp_tool = paths[path_class][tool]
tmp_tool = tmp_glob[0]
elif not os.path.exists(tmp_tool):
if tool.endswith('_tmp'):
- logging.getLogger().debug('Temporary path to the {} does not '
- 'exist: {}.'.format(tool, tmp_tool))
+ logging.getLogger().debug('Temporary path to the %s does not '
+ 'exist: %s', tool, tmp_tool)
else:
raise RuntimeError('Path to the {} is not valid: {}'.format(tool, tmp_tool))
-# Copyright 2015-2016 Intel Corporation.
+# Copyright 2015-2017 Intel Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
from conf import settings
_LOGGER = logging.getLogger(__name__)
-_allocated_hugepages = False
+_ALLOCATED_HUGEPAGES = False
#
# hugepage management
#
"""
hugepage_size_re = re.compile(r'^Hugepagesize:\s+(?P<size_hp>\d+)\s+kB',
re.IGNORECASE)
- with open('/proc/meminfo', 'r') as fh:
- data = fh.readlines()
+ with open('/proc/meminfo', 'r') as result_file:
+ data = result_file.readlines()
for line in data:
match = hugepage_size_re.search(line)
if match:
_LOGGER.info('Hugepages size: %s kb', match.group('size_hp'))
return int(match.group('size_hp'))
- else:
- _LOGGER.error('Could not parse for hugepage size')
- return 0
+ _LOGGER.error('Could not parse for hugepage size')
+ return 0
def allocate_hugepages():
"""
hp_size = get_hugepage_size()
if hp_size > 0:
- nr_hp = int(math.ceil(settings.getValue('HUGEPAGE_RAM_ALLOCATION')/hp_size))
- _LOGGER.info('Will allocate %s hugepages.', nr_hp)
-
- nr_hugepages = 'vm.nr_hugepages=' + str(nr_hp)
- try:
- tasks.run_task(['sudo', 'sysctl', nr_hugepages],
- _LOGGER, 'Trying to allocate hugepages..', True)
- except subprocess.CalledProcessError:
- _LOGGER.error('Unable to allocate hugepages.')
- return False
- global _allocated_hugepages
- _allocated_hugepages = True
- return True
+ nr_hp = int(math.ceil(settings.getValue('HUGEPAGE_RAM_ALLOCATION')/hp_size))
+ _LOGGER.info('Will allocate %s hugepages.', nr_hp)
+
+ nr_hugepages = 'vm.nr_hugepages=' + str(nr_hp)
+ try:
+ tasks.run_task(['sudo', 'sysctl', nr_hugepages],
+ _LOGGER, 'Trying to allocate hugepages..', True)
+ except subprocess.CalledProcessError:
+ _LOGGER.error('Unable to allocate hugepages.')
+ return False
+ # pylint: disable=global-statement
+ global _ALLOCATED_HUGEPAGES
+ _ALLOCATED_HUGEPAGES = True
+ return True
else:
_LOGGER.error('Division by 0 will be supported in next release')
def deallocate_hugepages():
"""De-allocate hugepages that were allocated on the fly
"""
- global _allocated_hugepages
- if _allocated_hugepages:
+ # pylint: disable=global-statement
+ global _ALLOCATED_HUGEPAGES
+ if _ALLOCATED_HUGEPAGES:
nr_hugepages = 'vm.nr_hugepages= 0'
try:
tasks.run_task(['sudo', 'sysctl', nr_hugepages],
except subprocess.CalledProcessError:
_LOGGER.error('Unable to de-allocate hugepages.')
return False
- _allocated_hugepages = False
+ _ALLOCATED_HUGEPAGES = False
return True
meminfo_path = '/sys/devices/system/node/node{}/meminfo'.format(
socket)
else:
- _LOGGER.info('No hugepage info found for socket {}'.format(socket))
+ _LOGGER.info('No hugepage info found for socket %s', socket)
return 0
else:
meminfo_path = '/proc/meminfo'
- with open(meminfo_path, 'r') as fh:
- data = fh.readlines()
+ with open(meminfo_path, 'r') as result_file:
+ data = result_file.readlines()
for line in data:
match = hugepage_free_re.search(line)
if match:
_LOGGER.info('Hugepages free: %s %s', match.group('free_hp'),
'on socket {}'.format(socket) if socket else '')
return int(match.group('free_hp'))
- else:
- _LOGGER.info('Could not parse for hugepage size')
- return 0
+ _LOGGER.info('Could not parse for hugepage size')
+ return 0
def is_hugepage_available():
-# Copyright 2015 Intel Corporation.
+# Copyright 2015-2017 Intel Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
from tools.load_gen.load_gen import ILoadGenerator
+# pylint: disable=super-init-not-called
class DummyLoadGen(ILoadGenerator):
"""Dummy load generator, which doesn't generate any load"""
def __init__(self, stress_config):
"""Start stress load if it was requested"""
pass
- def kill(self, signal='-15', sleep=2):
+ def kill(self, dummy_signal='-15', dummy_sleep=2):
"""Kill stress load if it is active"""
pass
-# Copyright 2015-2016 Intel Corporation.
+# Copyright 2015-2017 Intel Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
return None
- @staticmethod
- def get_module_dependecies(module):
+ def get_module_dependecies(self, module):
"""Return list of modules, which must be loaded before module itself
:param module: a name of kernel module
-# Copyright 2016 Intel Corporation.
+# Copyright 2016-2017 Intel Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
_LOGGER.warning('Error during reinitialization of VF %s', vf_nic)
else:
_LOGGER.warning("Can't detect driver for VF %s", vf_nic)
-
"""
vsperf2dashboard
"""
-# Copyright 2015 Intel Corporation.
+# Copyright 2015-2017 Intel Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# See the License for the specific language governing permissions and
# limitations under the License.
-
import os
import csv
-import requests
-import json
import logging
+import requests
def results2opnfv_dashboard(results_path, int_data):
"""
version_ovs = line.replace(' ', '')
version_ovs = version_ovs.replace('OVS_TAG?=', '')
if "DPDK_TAG" in line:
- if int_data['vanilla'] == False:
+ if int_data['vanilla'] is False:
version_dpdk = line.replace(' ', '')
version_dpdk = version_dpdk.replace('DPDK_TAG?=', '')
else:
"version": version,
"details": details}
- myData = requests.post(url, json=body)
- logging.info("Results for %s sent to opnfv, http response: %s", casename, myData)
+ my_data = requests.post(url, json=body)
+ logging.info("Results for %s sent to opnfv, http response: %s", casename, my_data)
logging.debug("opnfv url: %s", db_url)
logging.debug("the body sent to opnfv")
logging.debug(body)
for name, name_list in names.items():
if name != testcase:
continue
- if vanilla == True:
+ if vanilla is True:
res_name = name_list[1]
else:
res_name = name_list[0]
-# Copyright 2016 Intel Corporation.
+# Copyright 2016-2017 Intel Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
:returns: None
"""
raise NotImplementedError('Please call an implementation.')
-
-# Copyright 2016 Intel Corporation.
+# Copyright 2016-2017 Intel Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
except pexpect.EOF:
pass
dpdk.cleanup()
-
-
-# Copyright 2015-2016 Intel Corporation.
+# Copyright 2015-2017 Intel Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
else:
print('Please respond with \'yes\' or \'no\' ', end='')
-
def get_user_traffic(traffic_type, traffic_conf, flow_conf, traffic_stats):
"""
Request user input for traffic.
traffic_,
('b2b frames', 'b2b frame loss %'))
- framesize = traffic_['l2']['framesize']
-
# builds results by using user-supplied values
# and guessing remainder using available info
result[ResultsConstants.B2B_FRAMES] = float(results[0])
result[ResultsConstants.B2B_FRAME_LOSS_PERCENT] = float(results[1])
return result
+ def start_cont_traffic(self, traffic=None, duration=20):
+ return NotImplementedError('Dummy does not implement start_cont_traffic')
+
+ def stop_cont_traffic(self):
+ return NotImplementedError(
+ 'Dummy does not implement stop_cont_traffic')
+
+ def start_rfc2544_back2back(self, traffic=None, tests=1, duration=20,
+ lossrate=0.0):
+ return NotImplementedError(
+ 'Dummy does not implement start_rfc2544_back2back')
+
+ def wait_rfc2544_back2back(self):
+ return NotImplementedError(
+ 'Dummy does not implement stop_cont_traffic')
+
+ def start_rfc2544_throughput(self, traffic=None, tests=1, duration=20,
+ lossrate=0.0):
+ return NotImplementedError(
+ 'Dummy does not implement start_rfc2544_throughput')
+ def wait_rfc2544_throughput(self):
+ return NotImplementedError(
+ 'Dummy does not implement wait_rfc2544_throughput')
if __name__ == '__main__':
TRAFFIC = {
-# Copyright 2015-2016 Intel Corporation.
+# Copyright 2015-2017 Intel Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
If any of these don't exist, the application will raise an exception
(EAFP).
"""
-
import tkinter
import logging
import os
for key in values:
value = values[key]
- # Not allowing derived dictionary types for now
- # pylint: disable=unidiomatic-typecheck
- if type(value) == dict:
+ if isinstance(value, dict):
_prefix = ' '.join([prefix, key]).strip()
for subkey in _build_set_cmds(value, _prefix):
yield subkey
# tcl doesn't recognise the strings "True" or "False", only "1"
# or "0". Special case to convert them
- if type(value) == bool:
+ if isinstance(value, bool):
value = str(int(value))
else:
value = str(value)
_tclsh = tkinter.Tcl()
_logger = logging.getLogger(__name__)
+ def start_rfc2544_throughput(self, traffic=None, tests=1, duration=20,
+ lossrate=0.0):
+ return NotImplementedError(
+ 'Ixia start throughput traffic not implemented')
+
+ def wait_rfc2544_throughput(self):
+ return NotImplementedError(
+ 'Ixia wait throughput traffic not implemented')
+
+ def start_rfc2544_back2back(self, traffic=None, tests=1, duration=20,
+ lossrate=0.0):
+ return NotImplementedError(
+ 'Ixia start back2back traffic not implemented')
+
+ def send_rfc2544_back2back(self, traffic=None, duration=60,
+ lossrate=0.0, tests=1):
+ return NotImplementedError(
+ 'Ixia send back2back traffic not implemented')
+
+ def wait_rfc2544_back2back(self):
+ return NotImplementedError(
+ 'Ixia wait back2back traffic not implemented')
+
def run_tcl(self, cmd):
"""Run a TCL script using the TCL interpreter found in ``tkinter``.
assert len(result) == 6 # fail-fast if underlying Tcl code changes
- #TODO - implement Burst results setting via TrafficgenResults.
+ #NOTE - implement Burst results setting via TrafficgenResults.
def send_cont_traffic(self, traffic=None, duration=30):
"""See ITrafficGenerator for description
-# Copyright 2015-2016 Intel Corporation.
+# Copyright 2015-2017 Intel Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
be expected that the user have access to the IxNetwork machine should
this trafficgen need to be debugged.
"""
-
import tkinter
import logging
import os
for key in values:
value = values[key]
- # Not allowing derived dictionary types for now
- # pylint: disable=unidiomatic-typecheck
- if type(value) == dict:
+ if isinstance(value, dict):
_prefix = ' '.join([prefix, key]).strip()
for subkey in _build_set_cmds(value, _prefix):
yield subkey
continue
- # pylint: disable=unidiomatic-typecheck
# tcl doesn't recognise the strings "True" or "False", only "1"
# or "0". Special case to convert them
- if type(value) == bool:
+ if isinstance(value, bool):
value = str(int(value))
else:
value = str(value)
return parse_ixnet_rfc_results(parse_result_string(output[0]))
+ def send_burst_traffic(self, traffic=None, numpkts=100, duration=20):
+ return NotImplementedError('IxNet does not implement send_burst_traffic')
if __name__ == '__main__':
TRAFFIC = {
-# Copyright 2016 Spirent Communications.
+# Copyright 2016-2017 Spirent Communications.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
-
+#
+# Invalid name of file, must be used '_' instead '-'
+# pylint: disable=invalid-name
'''
@author Spirent Communications
from conf import settings
-logger = logging.getLogger(__name__)
+_LOGGER = logging.getLogger(__name__)
def create_dir(path):
if not os.path.exists(path):
try:
os.makedirs(path)
- except OSError as e:
- logger.error("Failed to create directory %s: %s", path, str(e))
+ except OSError as ex:
+ _LOGGER.error("Failed to create directory %s: %s", path, str(ex))
raise
""" Write the results of the query to the CSV """
create_dir(results_path)
filec = os.path.join(results_path, csv_results_file_prefix + ".csv")
- with open(filec, "wb") as f:
- f.write(query_results["Columns"].replace(" ", ",") + "\n")
+ with open(filec, "wb") as result_file:
+ result_file.write(query_results["Columns"].replace(" ", ",") + "\n")
for row in (query_results["Output"].replace("} {", ",").
replace("{", "").replace("}", "").split(",")):
- f.write(row.replace(" ", ",") + "\n")
+ result_file.write(row.replace(" ", ",") + "\n")
def positive_int(value):
"%s not in range [0.0, 100.0]" % pvalue)
return pvalue
-
+# pylint: disable=too-many-branches, too-many-statements
def main():
""" Read the arguments, Invoke Test and Return the results"""
parser = argparse.ArgumentParser()
args = parser.parse_args()
if args.verbose:
- logger.debug("Creating results directory")
+ _LOGGER.debug("Creating results directory")
create_dir(args.results_dir)
session_name = args.test_session_name
user_name = args.test_user_name
-
+ # pylint: disable=import-error
try:
# Load Spirent REST Library
from stcrestclient import stchttp
stc = stchttp.StcHttp(args.lab_server_addr)
session_id = stc.new_session(user_name, session_name)
stc.join_session(session_id)
- except RuntimeError as e:
- logger.error(e)
+ except RuntimeError as err:
+ _LOGGER.error(err)
raise
# Get STC system info.
# Retrieve and display the server information
if args.verbose:
- logger.debug("SpirentTestCenter system version: %s",
- stc.get("system1", "version"))
+ _LOGGER.debug("SpirentTestCenter system version: %s",
+ stc.get("system1", "version"))
try:
device_list = []
port_list = []
if args.verbose:
- logger.debug("Bring up license server")
+ _LOGGER.debug("Bring up license server")
license_mgr = stc.get("system1", "children-licenseservermanager")
if args.verbose:
- logger.debug("license_mgr = %s", license_mgr)
+ _LOGGER.debug("license_mgr = %s", license_mgr)
stc.create("LicenseServer", under=license_mgr, attributes={
- "server": args.license_server_addr})
+ "server": args.license_server_addr})
# Create the root project object
if args.verbose:
- logger.debug("Creating project ...")
+ _LOGGER.debug("Creating project ...")
project = stc.get("System1", "children-Project")
# Configure any custom traffic parameters
if args.traffic_custom == "cont":
if args.verbose:
- logger.debug("Configure Continuous Traffic")
+ _LOGGER.debug("Configure Continuous Traffic")
stc.create("ContinuousTestConfig", under=project)
# Create ports
if args.verbose:
- logger.debug("Creating ports ...")
+ _LOGGER.debug("Creating ports ...")
east_chassis_port = stc.create('port', project)
if args.verbose:
- logger.debug("Configuring TX port ...")
+ _LOGGER.debug("Configuring TX port ...")
stc.config(east_chassis_port, {'location': tx_port_loc})
port_list.append(east_chassis_port)
west_chassis_port = stc.create('port', project)
if args.verbose:
- logger.debug("Configuring RX port ...")
+ _LOGGER.debug("Configuring RX port ...")
stc.config(west_chassis_port, {'location': rx_port_loc})
port_list.append(west_chassis_port)
# Append to the device list
device_list.append(device_gen_config['ReturnList'])
if args.verbose:
- logger.debug(device_list)
+ _LOGGER.debug(device_list)
# Create the RFC 2544 'metric test
if args.metric == "throughput":
if args.verbose:
- logger.debug("Set up the RFC2544 throughput test...")
+ _LOGGER.debug("Set up the RFC2544 throughput test...")
stc.perform("Rfc2544SetupThroughputTestCommand",
params={"AcceptableFrameLoss":
args.acceptable_frame_loss_pct,
"system1.project", "children-port"), "autoConnect": "TRUE"})
# Apply configuration.
if args.verbose:
- logger.debug("Apply configuration...")
+ _LOGGER.debug("Apply configuration...")
stc.apply()
if args.verbose:
- logger.debug("Starting the sequencer...")
+ _LOGGER.debug("Starting the sequencer...")
stc.perform("SequencerStart")
# Wait for sequencer to finish
- logger.info(
+ _LOGGER.info(
"Starting test... Please wait for the test to complete...")
stc.wait_until_complete()
- logger.info("The test has completed... Saving results...")
+ _LOGGER.info("The test has completed... Saving results...")
# Determine what the results database filename is...
lab_server_resultsdb = stc.get(
"system1.project.TestResultSetting", "CurrentResultFileName")
if args.verbose:
- logger.debug("The lab server results database is %s",
- lab_server_resultsdb)
+ _LOGGER.debug("The lab server results database is %s",
+ lab_server_resultsdb)
stc.perform("CSSynchronizeFiles",
params={"DefaultDownloadDir": args.results_dir})
if not os.path.exists(resultsdb):
resultsdb = lab_server_resultsdb
- logger.info("Failed to create the local summary DB File, using"
- " the remote DB file instead.")
+ _LOGGER.info("Failed to create the local summary DB File, using"
+ " the remote DB file instead.")
else:
- logger.info(
+ _LOGGER.info(
"The local summary DB file has been saved to %s", resultsdb)
# The returns the "RFC2544ThroughputTestResultDetailedSummaryView"
("RFC2544FrameLossTestResultDetailed"
"SummaryView")}))
if args.verbose:
- logger.debug("resultsdict[\"Columns\"]: %s",
- resultsdict["Columns"])
- logger.debug("resultsdict[\"Output\"]: %s", resultsdict["Output"])
- logger.debug("Result paths: %s",
- stc.perform("GetTestResultSettingPaths"))
+ _LOGGER.debug("resultsdict[\"Columns\"]: %s",
+ resultsdict["Columns"])
+ _LOGGER.debug("resultsdict[\"Output\"]: %s", resultsdict["Output"])
+ _LOGGER.debug("Result paths: %s",
+ stc.perform("GetTestResultSettingPaths"))
# Write results to csv
- logger.debug("Writing CSV file to results directory %s",
- args.results_dir)
+ _LOGGER.debug("Writing CSV file to results directory %s",
+ args.results_dir)
write_query_results_to_csv(
args.results_dir, args.csv_results_file_prefix, resultsdict)
except RuntimeError as e:
- logger.error(e)
+ _LOGGER.error(e)
if args.verbose:
- logger.debug("Destroy session on lab server")
+ _LOGGER.debug("Destroy session on lab server")
stc.end_session()
- logger.info("Test complete!")
+ _LOGGER.info("Test complete!")
if __name__ == "__main__":
main()
-# Copyright 2016 Spirent Communications.
+# Copyright 2016-2017 Spirent Communications.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
-
+# pylint: disable=invalid-name
'''
@author Spirent Communications
"%s not in range [0.0, 100.0]" % pvalue)
return pvalue
-
+# pylint: disable=too-many-statements
def main():
""" Read the arguments, Invoke Test and Return the results"""
parser = argparse.ArgumentParser()
session_name = args.test_session_name
user_name = args.test_user_name
+ # pylint: disable=import-error
try:
# Load Spirent REST Library
from stcrestclient import stchttp
row["ForwardingRate(fps)"])
return result
+ # pylint: disable=unused-argument
def send_rfc2889_forwarding(self, traffic=None, tests=1, duration=20):
"""
Send traffic per RFC2889 Forwarding test specifications.
return self.get_rfc2544_results(filecs)
+ def start_cont_traffic(self, traffic=None, duration=30):
+ raise NotImplementedError('TestCenter start_cont_traffic not implement.')
+
+ def stop_cont_traffic(self):
+ raise NotImplementedError('TestCenter stop_cont_traffic not implement.')
+
+ def start_rfc2544_back2back(self, traffic=None, tests=1, duration=20,
+ lossrate=0.0):
+ raise NotImplementedError('TestCenter start_rfc2544_back2back not implement.')
+
+ def wait_rfc2544_back2back(self):
+ raise NotImplementedError('TestCenter wait_rfc2544_back2back not implement.')
+
+ def start_rfc2544_throughput(self, traffic=None, tests=1, duration=20,
+ lossrate=0.0):
+ raise NotImplementedError('TestCenter start_rfc2544_throughput not implement.')
+
+ def wait_rfc2544_throughput(self):
+ raise NotImplementedError('TestCenter wait_rfc2544_throughput not implement.')
+
if __name__ == '__main__':
TRAFFIC = {
'l3': {
-# Copyright 2016 Red Hat Inc & Xena Networks.
+# Copyright 2016-2017 Red Hat Inc & Xena Networks.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
import sys
import threading
import time
-
+# pylint: disable=too-many-lines
# Xena Socket Commands
CMD_CLEAR_RX_STATS = 'pr_clear'
CMD_CLEAR_TX_STATS = 'pt_clear'
"""
return self.driver.ask_verify(make_manager_command(CMD_OWNER, username))
-
+# pylint: disable=too-many-public-methods
class XenaPort(object):
"""
Xena Port emulator class
command = make_port_command(CMD_RESET, self)
return self._manager.driver.ask_verify(command)
- def set_port_arp_reply(self, on=True, v6=False):
+ def set_port_arp_reply(self, is_on=True, ipv6=False):
"""
Set the port arpreply value
:param on: Enable or disable the arp reply on the port
:return: Boolean True if response OK, False if error
"""
command = make_port_command('{} {}'.format(
- CMD_SET_PORT_ARP_V6_REPLY if v6 else CMD_SET_PORT_ARP_REPLY,
- "on" if on else "off"), self)
+ CMD_SET_PORT_ARP_V6_REPLY if ipv6 else CMD_SET_PORT_ARP_REPLY,
+ "on" if is_on else "off"), self)
return self._manager.driver.ask_verify(command)
- def set_port_ping_reply(self, on=True, v6=False):
+ def set_port_ping_reply(self, is_on=True, ipv6=False):
"""
Set the port ping reply value
:param on: Enable or disable the ping reply on the port
:return: Boolean True if response OK, False if error
"""
command = make_port_command('{} {}'.format(
- CMD_SET_PORT_PING_V6_REPLY if v6 else CMD_SET_PORT_PING_REPLY,
- "on" if on else "off"), self)
+ CMD_SET_PORT_PING_V6_REPLY if ipv6 else CMD_SET_PORT_PING_REPLY,
+ "on" if is_on else "off"), self)
return self._manager.driver.ask_verify(command)
def set_port_learning(self, interval):
"""
return self._time
+ # pylint: disable=too-many-branches
def parse_stats(self):
""" Parse the stats from pr all command
:return: Dictionary of all stats
def aggregate_stats(stat1, stat2):
"""
- Judge whether stat1 and stat2 both have same key, if both have same key,
+ Judge whether stat1 and stat2 both have same key, if both have same key,
call the aggregate fuction, else use the stat1's value
"""
newstat = dict()
-# Copyright 2016 Red Hat Inc & Xena Networks.
+# Copyright 2016-2017 Red Hat Inc & Xena Networks.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
"""
self.json_data['TestOptions']['TestTypeOptionMap']['Throughput'][
'Enabled'] = 'true'
-
+ # pylint: disable=too-many-arguments
def modify_2544_tput_options(self, initial_value, minimum_value,
maximum_value, value_resolution,
use_pass_threshhold, pass_threshhold):
+ """
+ modify_2544_tput_options
+ """
self.json_data['TestOptions']['TestTypeOptionMap']['Throughput'][
'RateIterationOptions']['InitialValue'] = initial_value
self.json_data['TestOptions']['TestTypeOptionMap']['Throughput'][
write_json_file(JSON.json_data, './testthis.x2544')
JSON = XenaJSON('./testthis.x2544')
print_json_report(JSON.json_data)
-
-# Copyright 2015-2016 Intel Corporation.
+# Copyright 2015-2017 Intel Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
'tests': tests,
}
i = 0
+ # pylint: disable=no-member
for output_file in output_files:
template = template_env.get_template(_TEMPLATE_FILES[i])
- output_text = template.render(template_vars) #pylint: disable=no-member
+ output_text = template.render(template_vars)
with open(output_file, 'w') as file_:
file_.write(output_text)
logging.info('Test report written to "%s"', output_file)
-# Copyright 2015-2016 Intel Corporation.
+# Copyright 2015-2017 Intel Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
import subprocess
import locale
import re
+import distro
from conf import settings as S
:returns: Return distro name as a string
"""
- return ' '.join(platform.dist())
+ return ' '.join(distro.linux_distribution())
def get_kernel():
"""Get kernel version.
return None
# This function uses long switch per purpose, so let us suppress pylint warning too-many-branches
-# pylint: disable=R0912
+# pylint: disable=too-many-branches, too-many-statements
def get_version(app_name):
""" Get version of given application and its git tag
-# Copyright 2015 Intel Corporation.
+# Copyright 2015-2017 Intel Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
import select
import subprocess
import logging
-import pexpect
import threading
import sys
import os
import locale
import time
+import pexpect
from conf import settings
from tools import systeminfo
if msg:
logger.info(msg)
+ # pylint: disable=too-many-nested-blocks
logger.debug('%s%s', CMD_PREFIX, ' '.join(cmd))
-
try:
proc = subprocess.Popen(map(os.path.expanduser, cmd),
stdout=subprocess.PIPE,
-# Copyright 2015 Intel Corporation.
+# Copyright 2015-2017 Intel Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
"""Automation of QEMU hypervisor with direct access to host NICs via
PCI passthrough.
"""
-
import logging
import subprocess
-import os
from conf import settings as S
from vnfs.qemu.qemu import IVnfQemu
-# Copyright 2015 Intel Corporation.
+# Copyright 2015-2017 Intel Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
self.execute(cmd)
self.wait(prompt=prompt, timeout=timeout)
+ # pylint: disable=simplifiable-if-statement
def validate_start(self, dummy_result):
""" Validate call of VNF start()
"""
This method is static
"""
IVnf._number_vnfs = 0
-
#!/usr/bin/env python3
-# Copyright 2015-2016 Intel Corporation.
+# Copyright 2015-2017 Intel Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
on how self.is_pass was set in the constructor"""
self.assertTrue(self.is_pass, self.msg)
-
+# pylint: disable=too-many-locals, too-many-branches, too-many-statements
def main():
"""Main function.
"""
# configuration validity checks
if args['vswitch']:
- vswitch_none = 'none' == args['vswitch'].strip().lower()
+ vswitch_none = args['vswitch'].strip().lower() == 'none'
if vswitch_none:
settings.setValue('VSWITCH', 'none')
else:
sys.exit(1)
# run tests
+ # Add pylint exception: Redefinition of test type from
+ # testcases.integration.IntegrationTestCase to testcases.performance.PerformanceTestCase
+ # pylint: disable=redefined-variable-type
suite = unittest.TestSuite()
for cfg in selected_tests:
test_name = cfg.get('Name', '<Name not set>')
test = PerformanceTestCase(cfg)
test.run()
suite.addTest(MockTestCase('', True, test.name))
- #pylint: disable=broad-except
+ # pylint: disable=broad-except
except (Exception) as ex:
_LOGGER.exception("Failed to run test: %s", test_name)
suite.addTest(MockTestCase(str(ex), False, test_name))
-# Copyright 2015-2016 Intel Corporation.
+# Copyright 2015-2017 Intel Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
from conf import settings
from src.ovs import OFBridge, flow_key, flow_match
-from tools import tasks
from vswitches.vswitch import IVSwitch
-
+from tools import tasks
+from tools.module_manager import ModuleManager
+# pylint: disable=too-many-public-methods
class IVSwitchOvs(IVSwitch, tasks.Process):
"""Open vSwitch base class implementation
self._cmd = []
self._cmd_template = ['sudo', '-E', settings.getValue('TOOLS')['ovs-vswitchd']]
self._stamp = None
+ self._module_manager = ModuleManager()
def start(self):
""" Start ``ovsdb-server`` and ``ovs-vswitchd`` instance.
bridge = self._bridges[switch_name]
remote_bridge = self._bridges[remote_switch_name]
pcount = str(self._get_port_count('type=patch'))
- # TODO ::: What if interface name longer than allowed width??
+ # NOTE ::: What if interface name longer than allowed width??
local_port_name = switch_name + '-' + remote_switch_name + '-' + pcount
remote_port_name = remote_switch_name + '-' + switch_name + '-' + pcount
local_params = ['--', 'set', 'Interface', local_port_name,
#
# validate methods required for integration testcases
#
-
- def validate_add_switch(self, result, switch_name, params=None):
+ def validate_add_switch(self, dummy_result, switch_name, dummy_params=None):
"""Validate - Create a new logical switch with no ports
"""
bridge = self._bridges[switch_name]
assert re.search('Bridge ["\']?%s["\']?' % switch_name, output[0]) is not None
return True
- def validate_del_switch(self, result, switch_name):
+ # Method could be a function
+ # pylint: disable=no-self-use
+ def validate_del_switch(self, dummy_result, switch_name):
"""Validate removal of switch
"""
bridge = OFBridge('tmp')
"""
return self.validate_add_phy_port(result, switch_name)
- def validate_del_port(self, result, switch_name, port_name):
+ def validate_del_port(self, dummy_result, switch_name, port_name):
""" Validate that port_name was removed from bridge.
"""
bridge = self._bridges[switch_name]
assert 'Port "%s"' % port_name not in output[0]
return True
- def validate_add_flow(self, result, switch_name, flow, cache='off'):
+ def validate_add_flow(self, dummy_result, switch_name, flow, dummy_cache='off'):
""" Validate insertion of the flow into the switch
"""
+
if 'idle_timeout' in flow:
- del(flow['idle_timeout'])
+ del flow['idle_timeout']
# Note: it should be possible to call `ovs-ofctl dump-flows switch flow`
# to verify flow insertion, but it doesn't accept the same flow syntax
return True
return False
- def validate_del_flow(self, result, switch_name, flow=None):
+ def validate_del_flow(self, dummy_result, switch_name, flow=None):
""" Validate removal of the flow
"""
if not flow:
# what else we can do?
return True
- return not self.validate_add_flow(result, switch_name, flow)
+ return not self.validate_add_flow(dummy_result, switch_name, flow)
- def validate_dump_flows(self, result, switch_name):
+ def validate_dump_flows(self, dummy_result, dummy_switch_name):
""" Validate call of flow dump
"""
return True
- def validate_disable_rstp(self, result, switch_name):
+ def validate_disable_rstp(self, dummy_result, switch_name):
""" Validate rstp disable
"""
bridge = self._bridges[switch_name]
return 'rstp_enable : false' in ''.join(bridge.bridge_info())
- def validate_enable_rstp(self, result, switch_name):
+ def validate_enable_rstp(self, dummy_result, switch_name):
""" Validate rstp enable
"""
bridge = self._bridges[switch_name]
return 'rstp_enable : true' in ''.join(bridge.bridge_info())
- def validate_disable_stp(self, result, switch_name):
+ def validate_disable_stp(self, dummy_result, switch_name):
""" Validate stp disable
"""
bridge = self._bridges[switch_name]
return 'stp_enable : false' in ''.join(bridge.bridge_info())
- def validate_enable_stp(self, result, switch_name):
+ def validate_enable_stp(self, dummy_result, switch_name):
""" Validate stp enable
"""
bridge = self._bridges[switch_name]
-# Copyright 2015-2016 Intel Corporation.
+# Copyright 2015-2017 Intel Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
port_name = 'dpdk' + str(dpdk_count)
# PCI info. Please note there must be no blank space, eg must be
# like 'options:dpdk-devargs=0000:06:00.0'
- _NICS = settings.getValue('NICS')
- nic_pci = 'options:dpdk-devargs=' + _NICS[dpdk_count]['pci']
+ _nics = settings.getValue('NICS')
+ nic_pci = 'options:dpdk-devargs=' + _nics[dpdk_count]['pci']
params = ['--', 'set', 'Interface', port_name, 'type=dpdk', nic_pci]
# multi-queue enable
-# Copyright 2015-2016 Intel Corporation.
+# Copyright 2015-2017 Intel Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
from conf import settings
from vswitches.ovs import IVSwitchOvs
from src.ovs import DPCtl
-from tools.module_manager import ModuleManager
from tools import tasks
class OvsVanilla(IVSwitchOvs):
self._logger = logging.getLogger(__name__)
self._vswitchd_args += ["unix:%s" % self.get_db_sock_path()]
self._vswitchd_args += settings.getValue('VSWITCHD_VANILLA_ARGS')
- self._module_manager = ModuleManager()
def stop(self):
"""See IVswitch for general description
self._logger.error("Can't add port! There are only " +
len(self._ports) + " ports " +
"defined in config!")
- raise
-
+ raise RuntimeError('Failed to add phy port')
if not self._ports[self._current_id]:
self._logger.error("Can't detect device name for NIC %s", self._current_id)
raise ValueError("Invalid device name for %s" % self._current_id)
bridge = self._bridges[switch_name]
of_port = bridge.add_port(tap_name, [])
return (tap_name, of_port)
-
-