NFVBENCH-40 Add pylint to tox 93/45093/2
authorKerim Gokarslan <kgokarsl@cisco.com>
Sat, 14 Oct 2017 00:29:58 +0000 (17:29 -0700)
committerKerim Gokarslan <kgokarsl@cisco.com>
Mon, 16 Oct 2017 18:53:49 +0000 (11:53 -0700)
Change-Id: Ic55a07145f27c4cfaa6df5523df3940ca4433af1
Signed-off-by: Kerim Gokarslan <kgokarsl@cisco.com>
27 files changed:
nfvbench/chain_clients.py
nfvbench/chain_managers.py
nfvbench/chain_runner.py
nfvbench/compute.py
nfvbench/config.py
nfvbench/credentials.py
nfvbench/factory.py
nfvbench/fluentd.py
nfvbench/log.py
nfvbench/nfvbench.py
nfvbench/nfvbenchd.py
nfvbench/packet_analyzer.py
nfvbench/service_chain.py
nfvbench/specs.py
nfvbench/summarizer.py
nfvbench/traffic_client.py
nfvbench/traffic_gen/dummy.py
nfvbench/traffic_gen/traffic_base.py
nfvbench/traffic_gen/traffic_utils.py
nfvbench/traffic_gen/trex.py
nfvbench/traffic_server.py
nfvbench/utils.py
pylint.rc [new file with mode: 0644]
test-requirements.txt
test/__init__.py [new file with mode: 0644]
test/test_nfvbench.py
tox.ini

index ac95247..d9a39af 100644 (file)
 #    under the License.
 #
 
+import os
+import re
+import time
+
 import compute
-from glanceclient.v2 import client as glanceclient
 from log import LOG
+
+from glanceclient.v2 import client as glanceclient
 from neutronclient.neutron import client as neutronclient
 from novaclient.client import Client
-import os
-import re
-import time
 
 
 class StageClientException(Exception):
@@ -65,7 +67,8 @@ class BasicStageClient(object):
                     LOG.info('Created instance: %s', instance.name)
                 self.vms[i] = instance
                 setattr(self.vms[i], 'is_reuse', is_reuse)
-            if all(map(lambda instance: instance.status == 'ACTIVE', self.vms)):
+
+            if all([(vm.status == 'ACTIVE') for vm in self.vms]):
                 return
             time.sleep(self.config.generic_poll_sec)
         raise StageClientException('Timed out waiting for VMs to spawn')
@@ -139,7 +142,7 @@ class BasicStageClient(object):
         # add subnet id to the network dict since it has just been added
         network['subnets'] = [subnet['id']]
         network['is_reuse'] = False
-        LOG.info('Created network: %s.' % name)
+        LOG.info('Created network: %s.', name)
         return network
 
     def _create_port(self, net):
@@ -161,7 +164,7 @@ class BasicStageClient(object):
             except Exception:
                 retry += 1
                 time.sleep(self.config.generic_poll_sec)
-        LOG.error('Unable to delete port: %s' % (port['id']))
+        LOG.error('Unable to delete port: %s', port['id'])
 
     def __delete_net(self, network):
         retry = 0
@@ -172,7 +175,7 @@ class BasicStageClient(object):
             except Exception:
                 retry += 1
                 time.sleep(self.config.generic_poll_sec)
-        LOG.error('Unable to delete network: %s' % (network['name']))
+        LOG.error('Unable to delete network: %s', network['name'])
 
     def __get_server_az(self, server):
         availability_zone = getattr(server, 'OS-EXT-AZ:availability_zone', None)
@@ -185,7 +188,7 @@ class BasicStageClient(object):
 
     def _lookup_servers(self, name=None, nets=None, az=None, flavor_id=None):
         error_msg = 'VM with the same name, but non-matching {} found. Aborting.'
-        networks = set(map(lambda net: net['name'], nets)) if nets else None
+        networks = set([net['name'] for net in nets]) if nets else None
         server_list = self.comp.get_server_list()
         matching_servers = []
 
@@ -208,7 +211,7 @@ class BasicStageClient(object):
         return matching_servers
 
     def _create_server(self, name, ports, az, nfvbenchvm_config):
-        port_ids = map(lambda port: {'port-id': port['id']}, ports)
+        port_ids = [{'port-id': port['id']} for port in ports]
         nfvbenchvm_config_location = os.path.join('/etc/', self.nfvbenchvm_config_name)
         server = self.comp.create_server(name,
                                          self.image_instance,
@@ -222,7 +225,7 @@ class BasicStageClient(object):
                                          files={nfvbenchvm_config_location: nfvbenchvm_config})
         if server:
             setattr(server, 'is_reuse', False)
-            LOG.info('Creating instance: %s on %s' % (name, az))
+            LOG.info('Creating instance: %s on %s', name, az)
         else:
             raise StageClientException('Unable to create instance: %s.' % (name))
         return server
@@ -232,14 +235,14 @@ class BasicStageClient(object):
         if self.image_name:
             self.image_instance = self.comp.find_image(self.image_name)
         if self.image_instance:
-            LOG.info("Reusing image %s" % self.image_name)
+            LOG.info("Reusing image %s", self.image_name)
         else:
-            image_name_search_pattern = '(nfvbenchvm-\d+(\.\d+)*).qcow2'
+            image_name_search_pattern = r'(nfvbenchvm-\d+(\.\d+)*).qcow2'
             if self.config.vm_image_file:
                 match = re.search(image_name_search_pattern, self.config.vm_image_file)
                 if match:
                     self.image_name = match.group(1)
-                    LOG.info('Using provided VM image file %s' % self.config.vm_image_file)
+                    LOG.info('Using provided VM image file %s', self.config.vm_image_file)
                 else:
                     raise StageClientException('Provided VM image file name %s must start with '
                                                '"nfvbenchvm-<version>"' % self.config.vm_image_file)
@@ -249,15 +252,14 @@ class BasicStageClient(object):
                     if re.search(image_name_search_pattern, f):
                         self.config.vm_image_file = pkg_root + '/' + f
                         self.image_name = f.replace('.qcow2', '')
-                        LOG.info('Found built-in VM image file %s' % f)
+                        LOG.info('Found built-in VM image file %s', f)
                         break
                 else:
                     raise StageClientException('Cannot find any built-in VM image file.')
             if self.image_name:
                 self.image_instance = self.comp.find_image(self.image_name)
             if not self.image_instance:
-                LOG.info('Uploading %s'
-                         % self.image_name)
+                LOG.info('Uploading %s', self.image_name)
                 res = self.comp.upload_image_via_url(self.image_name,
                                                      self.config.vm_image_file)
 
@@ -265,7 +267,7 @@ class BasicStageClient(object):
                     raise StageClientException('Error uploading image %s from %s. ABORTING.'
                                                % (self.image_name,
                                                   self.config.vm_image_file))
-                LOG.info('Image %s successfully uploaded.' % self.image_name)
+                LOG.info('Image %s successfully uploaded.', self.image_name)
                 self.image_instance = self.comp.find_image(self.image_name)
 
         self.__setup_flavor()
@@ -285,7 +287,7 @@ class BasicStageClient(object):
                                                                  override=True,
                                                                  **flavor_dict)
 
-            LOG.info("Flavor '%s' was created." % self.config.flavor_type)
+            LOG.info("Flavor '%s' was created.", self.config.flavor_type)
 
             if extra_specs:
                 self.flavor_type['flavor'].set_keys(extra_specs)
@@ -298,10 +300,10 @@ class BasicStageClient(object):
 
     def __delete_flavor(self, flavor):
         if self.comp.delete_flavor(flavor=flavor):
-            LOG.info("Flavor '%s' deleted" % self.config.flavor_type)
+            LOG.info("Flavor '%s' deleted", self.config.flavor_type)
             self.flavor_type = {'is_reuse': False, 'flavor': None}
         else:
-            LOG.error('Unable to delete flavor: %s' % self.config.flavor_type)
+            LOG.error('Unable to delete flavor: %s', self.config.flavor_type)
 
     def get_config_file(self, chain_index, src_mac, dst_mac):
         boot_script_file = os.path.join(os.path.dirname(os.path.abspath(__file__)),
@@ -339,7 +341,7 @@ class BasicStageClient(object):
         """
         Disable security at port level.
         """
-        vm_ids = map(lambda vm: vm.id, self.vms)
+        vm_ids = [vm.id for vm in self.vms]
         for net in self.nets:
             for port in self.ports[net['id']]:
                 if port['device_id'] in vm_ids:
@@ -349,7 +351,7 @@ class BasicStageClient(object):
                             'port_security_enabled': False,
                         }
                     })
-                    LOG.info('Security disabled on port {}'.format(port['id']))
+                    LOG.info('Security disabled on port %s', port['id'])
 
     def get_loop_vm_hostnames(self):
         return [getattr(vm, 'OS-EXT-SRV-ATTR:hypervisor_hostname') for vm in self.vms]
@@ -361,8 +363,7 @@ class BasicStageClient(object):
         if not self.host_ips:
             #  get the hypervisor object from the host name
             self.host_ips = [self.comp.get_hypervisor(
-                             getattr(vm, 'OS-EXT-SRV-ATTR:hypervisor_hostname')).host_ip
-                             for vm in self.vms]
+                getattr(vm, 'OS-EXT-SRV-ATTR:hypervisor_hostname')).host_ip for vm in self.vms]
         return self.host_ips
 
     def get_loop_vm_compute_nodes(self):
@@ -378,11 +379,10 @@ class BasicStageClient(object):
                                        flavor_id=self.flavor_type['flavor'].id)
         if servers:
             server = servers[0]
-            LOG.info('Reusing existing server: ' + name)
+            LOG.info('Reusing existing server: %s', name)
             setattr(server, 'is_reuse', True)
             return server
-        else:
-            return None
+        return None
 
     def get_networks_uuids(self):
         """
@@ -400,7 +400,7 @@ class BasicStageClient(object):
         """
         vlans = []
         for net in self.nets:
-            assert (net['provider:network_type'] == 'vlan')
+            assert net['provider:network_type'] == 'vlan'
             vlans.append(net['provider:segmentation_id'])
 
         return vlans
@@ -421,7 +421,7 @@ class BasicStageClient(object):
                 if not getattr(vm, 'is_reuse', True):
                     self.comp.delete_server(vm)
                 else:
-                    LOG.info('Server %s not removed since it is reused' % vm.name)
+                    LOG.info('Server %s not removed since it is reused', vm.name)
 
         for port in self.created_ports:
             self.__delete_port(port)
@@ -431,16 +431,13 @@ class BasicStageClient(object):
                 if 'is_reuse' in net and not net['is_reuse']:
                     self.__delete_net(net)
                 else:
-                    LOG.info('Network %s not removed since it is reused' % (net['name']))
+                    LOG.info('Network %s not removed since it is reused', net['name'])
 
             if not self.flavor_type['is_reuse']:
                 self.__delete_flavor(self.flavor_type['flavor'])
 
 
 class EXTStageClient(BasicStageClient):
-    def __init__(self, config, cred):
-        super(EXTStageClient, self).__init__(config, cred)
-
     def setup(self):
         super(EXTStageClient, self).setup()
 
@@ -454,13 +451,10 @@ class EXTStageClient(BasicStageClient):
 
 
 class PVPStageClient(BasicStageClient):
-    def __init__(self, config, cred):
-        super(PVPStageClient, self).__init__(config, cred)
-
     def get_end_port_macs(self):
-        vm_ids = map(lambda vm: vm.id, self.vms)
+        vm_ids = [vm.id for vm in self.vms]
         port_macs = []
-        for index, net in enumerate(self.nets):
+        for _index, net in enumerate(self.nets):
             vm_mac_map = {port['device_id']: port['mac_address'] for port in self.ports[net['id']]}
             port_macs.append([vm_mac_map[vm_id] for vm_id in vm_ids])
         return port_macs
@@ -497,13 +491,10 @@ class PVPStageClient(BasicStageClient):
 
 
 class PVVPStageClient(BasicStageClient):
-    def __init__(self, config, cred):
-        super(PVVPStageClient, self).__init__(config, cred)
-
     def get_end_port_macs(self):
         port_macs = []
         for index, net in enumerate(self.nets[:2]):
-            vm_ids = map(lambda vm: vm.id, self.vms[index::2])
+            vm_ids = [vm.id for vm in self.vms[index::2]]
             vm_mac_map = {port['device_id']: port['mac_address'] for port in self.ports[net['id']]}
             port_macs.append([vm_mac_map[vm_id] for vm_id in vm_ids])
         return port_macs
index 033eb7a..cbc53e2 100644 (file)
 #    License for the specific language governing permissions and limitations
 #    under the License.
 #
+import time
+
 
 from log import LOG
 from network import Network
 from packet_analyzer import PacketAnalyzer
 from specs import ChainType
 from stats_collector import IntervalCollector
-import time
 
 
 class StageManager(object):
index 2e222de..63cc48f 100644 (file)
 #    under the License.
 #
 
+import traceback
+
 from log import LOG
 from service_chain import ServiceChain
-import traceback
 from traffic_client import TrafficClient
 
 
index 5806164..575744c 100644 (file)
 #    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
 #    License for the specific language governing permissions and limitations
 #    under the License.
+"""Module for Openstack compute operations"""
+import os
+import time
+import traceback
 
-'''Module for Openstack compute operations'''
-from glanceclient import exc as glance_exception
 import keystoneauth1
 from log import LOG
 import novaclient
-import os
-import time
-import traceback
 
+from glanceclient import exc as glance_exception
 
 try:
     from glanceclient.openstack.common.apiclient.exceptions import NotFound as GlanceImageNotFound
@@ -29,7 +29,6 @@ except ImportError:
 
 
 class Compute(object):
-
     def __init__(self, nova_client, glance_client, neutron_client, config):
         self.novaclient = nova_client
         self.glance_client = glance_client
@@ -137,10 +136,9 @@ class Compute(object):
         '''
         if ssh_access.public_key_file:
             return self.add_public_key(kp_name, ssh_access.public_key_file)
-        else:
-            keypair = self.create_keypair(kp_name, None)
-            ssh_access.private_key = keypair.private_key
-            return keypair
+        keypair = self.create_keypair(kp_name, None)
+        ssh_access.private_key = keypair.private_key
+        return keypair
 
     def find_network(self, label):
         net = self.novaclient.networks.find(label=label)
@@ -290,7 +288,7 @@ class Compute(object):
                     if hyp.zone == zone:
                         # matches
                         return az_host
-                    # else continue - another zone with same host name?
+                        # else continue - another zone with same host name?
             # no match
             LOG.error('No match for availability zone and host ' + az_host)
             return None
@@ -375,16 +373,15 @@ class Compute(object):
             LOG.warning('Operation Forbidden: could not retrieve list of hypervisors'
                         ' (likely no permission)')
 
-        hypervisor_list = filter(lambda h: h.status == 'enabled' and h.state == 'up',
-                                 hypervisor_list)
+        hypervisor_list = [h for h in hypervisor_list if h.status == 'enabled' and h.state == 'up']
         if self.config.availability_zone:
-            host_list = filter(lambda h: h.zone == self.config.availability_zone, host_list)
+            host_list = [h for h in host_list if h.zone == self.config.availability_zone]
 
         if self.config.compute_nodes:
-            host_list = filter(lambda h: h.host in self.config.compute_nodes, host_list)
+            host_list = [h for h in host_list if h.host in self.config.compute_nodes]
 
         hosts = [h.hypervisor_hostname for h in hypervisor_list]
-        host_list = filter(lambda h: h.host in hosts, host_list)
+        host_list = [h for h in host_list if h.host in hosts]
 
         avail_list = []
         for host in host_list:
@@ -408,10 +405,7 @@ class Compute(object):
         try:
             server_instance_1 = self.novaclient.servers.get(vm_instance1)
             server_instance_2 = self.novaclient.servers.get(vm_instance2)
-            if server_instance_1.hostId == server_instance_2.hostId:
-                return True
-            else:
-                return False
+            return bool(server_instance_1.hostId == server_instance_2.hostId)
         except novaclient.exceptions:
             LOG.warning("Exception in retrieving the hostId of servers")
 
@@ -420,7 +414,7 @@ class Compute(object):
         # check first the security group exists
         sec_groups = self.neutronclient.list_security_groups()['security_groups']
         group = [x for x in sec_groups if x['name'] == self.config.security_group_name]
-        if len(group) > 0:
+        if group:
             return group[0]
 
         body = {
index 2ed726d..8139389 100644 (file)
@@ -18,7 +18,7 @@ from log import LOG
 import yaml
 
 
-def config_load(file_name, from_cfg=None, whitelist_keys=[]):
+def config_load(file_name, from_cfg=None, whitelist_keys=None):
     """Load a yaml file into a config dict, merge with from_cfg if not None
     The config file content taking precedence in case of duplicate
     """
@@ -31,13 +31,15 @@ def config_load(file_name, from_cfg=None, whitelist_keys=[]):
                         .format(file_name))
 
     if from_cfg:
+        if not whitelist_keys:
+            whitelist_keys = []
         _validate_config(cfg, from_cfg, whitelist_keys)
         cfg = from_cfg + cfg
 
     return cfg
 
 
-def config_loads(cfg_text, from_cfg=None, whitelist_keys=[]):
+def config_loads(cfg_text, from_cfg=None, whitelist_keys=None):
     """Same as config_load but load from a string
     """
     try:
@@ -46,10 +48,13 @@ def config_loads(cfg_text, from_cfg=None, whitelist_keys=[]):
         # empty string
         cfg = AttrDict()
     if from_cfg:
+        if not whitelist_keys:
+            whitelist_keys = []
         _validate_config(cfg, from_cfg, whitelist_keys)
         return from_cfg + cfg
     return cfg
 
+
 def _validate_config(subset, superset, whitelist_keys):
     def get_err_config(subset, superset):
         result = {}
index 0c8470e..62ea708 100644 (file)
 #    under the License.
 #
 
+import os
+import re
+
 # Module for credentials in Openstack
 import getpass
 from keystoneauth1.identity import v2
 from keystoneauth1.identity import v3
 from keystoneauth1 import session
-import os
-import re
-
 from log import LOG
 
 
index 35a8c1b..1461036 100644 (file)
@@ -27,7 +27,6 @@ import tor_client
 
 
 class BasicFactory(object):
-
     chain_classes = [ChainType.EXT, ChainType.PVP, ChainType.PVVP]
 
     chain_stats_classes = {
index a9bda62..16ff33e 100644 (file)
 #    License for the specific language governing permissions and limitations
 #    under the License.
 
+import logging
+
 from datetime import datetime
 from fluent import sender
-import logging
 import pytz
 
 
@@ -108,8 +109,7 @@ class FluentLogHandler(logging.Handler):
             return "GOOD RUN"
         elif highest_level == logging.WARNING:
             return "RUN WITH WARNINGS"
-        else:
-            return "RUN WITH ERRORS"
+        return "RUN WITH ERRORS"
 
     def __update_stats(self, levelno):
         if levelno == logging.WARNING:
index 674ddf8..e55c230 100644 (file)
@@ -31,7 +31,10 @@ def setup(mute_stdout=False):
     # disable unnecessary information capture
     logging.logThreads = 0
     logging.logProcesses = 0
+    # to make sure each log record does not have a source file name attached
+    # pylint: disable=protected-access
     logging._srcfile = None
+    # pylint: enable=protected-access
 
 def add_file_logger(logfile):
     if logfile:
index 52fb57b..24cbb72 100644 (file)
 #    under the License.
 #
 
-from __init__ import __version__
 import argparse
+from collections import defaultdict
+import copy
+import datetime
+import importlib
+import json
+import os
+import sys
+import traceback
+
 from attrdict import AttrDict
+import pbr.version
+from pkg_resources import resource_string
+
+from __init__ import __version__
 from chain_runner import ChainRunner
-from collections import defaultdict
 from config import config_load
 from config import config_loads
-import copy
-import credentials
-import datetime
+import credentials as credentials
 from factory import BasicFactory
 from fluentd import FluentLogHandler
-import importlib
-import json
 import log
 from log import LOG
 from nfvbenchd import WebSocketIoServer
-import os
-import pbr.version
-from pkg_resources import resource_string
 from specs import ChainType
 from specs import Specs
 from summarizer import NFVBenchSummarizer
-import sys
-import traceback
 from traffic_client import TrafficGeneratorFactory
 import utils
 
+
 fluent_logger = None
 
 
@@ -115,17 +118,16 @@ class NFVBench(object):
             if self.chain_runner:
                 self.chain_runner.close()
 
-            if status == NFVBench.STATUS_OK:
-                result = utils.dict_to_json_dict(result)
-                return {
-                    'status': status,
-                    'result': result
-                }
-            else:
-                return {
-                    'status': status,
-                    'error_message': message
-                }
+        if status == NFVBench.STATUS_OK:
+            result = utils.dict_to_json_dict(result)
+            return {
+                'status': status,
+                'result': result
+            }
+        return {
+            'status': status,
+            'error_message': message
+        }
 
     def prepare_summary(self, result):
         """Prepares summary of the result to print and send it to logger (eg: fluentd)"""
@@ -158,9 +160,9 @@ class NFVBench(object):
         self.config.flow_count = utils.parse_flow_count(self.config.flow_count)
         required_flow_count = self.config.service_chain_count * 2
         if self.config.flow_count < required_flow_count:
-            LOG.info("Flow count '{}' has been set to minimum value of '{}' "
-                     "for current configuration".format(self.config.flow_count,
-                                                        required_flow_count))
+            LOG.info("Flow count %d has been set to minimum value of '%d' "
+                     "for current configuration"self.config.flow_count,
+                     required_flow_count)
             self.config.flow_count = required_flow_count
 
         if self.config.flow_count % 2 != 0:
@@ -182,7 +184,7 @@ class NFVBench(object):
                             "({tg_profile}) are missing. Please specify them in configuration file."
                             .format(tg_profile=self.config.generator_profile))
 
-        if self.config.traffic is None or len(self.config.traffic) == 0:
+        if self.config.traffic is None or not self.config.traffic:
             raise Exception("No traffic profile found in traffic configuration, "
                             "please fill 'traffic' section in configuration file.")
 
@@ -209,7 +211,7 @@ class NFVBench(object):
 
         self.config.json_file = self.config.json if self.config.json else None
         if self.config.json_file:
-            (path, filename) = os.path.split(self.config.json)
+            (path, _filename) = os.path.split(self.config.json)
             if not os.path.exists(path):
                 raise Exception('Please provide existing path for storing results in JSON file. '
                                 'Path used: {path}'.format(path=path))
@@ -298,7 +300,6 @@ def parse_opts_from_cli():
                         action='store',
                         help='Traffic generator profile to use')
 
-
     parser.add_argument('-0', '--no-traffic', dest='no_traffic',
                         default=None,
                         action='store_true',
@@ -380,7 +381,7 @@ def parse_opts_from_cli():
     parser.add_argument('--log-file', '--logfile', dest='log_file',
                         action='store',
                         help='Filename for saving logs',
-                        metavar='<log_file>'),
+                        metavar='<log_file>')
 
     parser.add_argument('--user-label', '--userlabel', dest='user_label',
                         action='store',
@@ -527,12 +528,12 @@ def main():
         if config.log_file:
             log.add_file_logger(config.log_file)
 
-        nfvbench = NFVBench(config, openstack_spec, config_plugin, factory)
+        nfvbench_instance = NFVBench(config, openstack_spec, config_plugin, factory)
 
         if opts.server:
             if os.path.isdir(opts.server):
-                server = WebSocketIoServer(opts.server, nfvbench, fluent_logger)
-                nfvbench.set_notifier(server)
+                server = WebSocketIoServer(opts.server, nfvbench_instance, fluent_logger)
+                nfvbench_instance.set_notifier(server)
                 try:
                     port = int(opts.port)
                 except ValueError:
@@ -554,13 +555,13 @@ def main():
                 opts = {k: v for k, v in vars(opts).iteritems() if v is not None}
                 # get CLI args
                 params = ' '.join(str(e) for e in sys.argv[1:])
-                result = nfvbench.run(opts, params)
+                result = nfvbench_instance.run(opts, params)
                 if 'error_message' in result:
                     raise Exception(result['error_message'])
 
                 if 'result' in result and result['status']:
-                    nfvbench.save(result['result'])
-                    nfvbench.prepare_summary(result['result'])
+                    nfvbench_instance.save(result['result'])
+                    nfvbench_instance.prepare_summary(result['result'])
     except Exception as exc:
         run_summary_required = True
         LOG.error({
index 1e096ae..15b71c5 100644 (file)
 #    under the License.
 #
 
+import json
+import Queue
+import traceback
+import uuid
+
 from flask import Flask
 from flask import jsonify
 from flask import render_template
@@ -24,13 +29,10 @@ from flask_socketio import SocketIO
 from fluentd import FluentLogHandler
 from summarizer import NFVBenchSummarizer
 
-import json
 from log import LOG
-import Queue
-import traceback
 from utils import byteify
 from utils import RunLock
-import uuid
+
 
 # this global cannot reside in Ctx because of the @app and @socketio decorators
 app = None
@@ -144,29 +146,29 @@ def setup_flask(root_path):
     # --------- socketio requests ------------
 
     @socketio.on('start_run')
-    def socketio_start_run(config):
+    def _socketio_start_run(config):
         if not Ctx.is_busy():
             Ctx.enqueue(config, get_uuid(), from_socketio=True)
         else:
             emit('error', {'reason': 'there is already an NFVbench request running'})
 
     @socketio.on('echo')
-    def socketio_echo(config):
+    def _socketio_echo(config):
         emit('echo', config)
 
     # --------- HTTP requests ------------
 
     @app.route('/')
-    def index():
+    def _index():
         return render_template('index.html')
 
     @app.route('/echo', methods=['GET'])
-    def echo():
+    def _echo():
         config = request.json
         return jsonify(config)
 
     @app.route('/start_run', methods=['POST'])
-    def start_run():
+    def _start_run():
         config = load_json(request.json)
         if not config:
             config = {}
@@ -178,7 +180,7 @@ def setup_flask(root_path):
 
     @app.route('/status', defaults={'request_id': None}, methods=['GET'])
     @app.route('/status/<request_id>', methods=['GET'])
-    def get_status(request_id):
+    def _get_status(request_id):
         if request_id:
             if Ctx.is_busy() and request_id == Ctx.get_current_request_id():
                 # task with request_id still pending
@@ -188,9 +190,8 @@ def setup_flask(root_path):
             if res:
                 # found result for given request_id
                 return jsonify(res)
-            else:
-                # result for given request_id not found
-                return jsonify(result_json(STATUS_NOT_FOUND, not_found_msg, request_id))
+            # result for given request_id not found
+            return jsonify(result_json(STATUS_NOT_FOUND, not_found_msg, request_id))
         else:
             if Ctx.is_busy():
                 # task still pending, return with request_id
@@ -201,8 +202,7 @@ def setup_flask(root_path):
             res = Ctx.get_result()
             if res:
                 return jsonify(res)
-            else:
-                return jsonify(not_busy_json)
+            return jsonify(not_busy_json)
 
 
 class WebSocketIoServer(object):
index c01675b..5d72bc9 100644 (file)
@@ -47,8 +47,8 @@ class PacketAnalyzer(object):
         transmitted_packets = self.chain[0]['packet_count']
 
         for (index, path_data) in enumerate(self.chain):
-            LOG.info('[Packet Analyze] Interface: %s' % (path_data['interface']))
-            LOG.info('[Packet Analyze]            > Count: %d' % (path_data['packet_count']))
+            LOG.info('[Packet Analyze] Interface: %s', path_data['interface'])
+            LOG.info('[Packet Analyze]            > Count: %d', path_data['packet_count'])
 
             if index:
                 if transmitted_packets:
@@ -56,9 +56,9 @@ class PacketAnalyzer(object):
                         100.0 * path_data['packet_drop_count'] / transmitted_packets
                 else:
                     self.chain[index]['packet_drop_percentage'] = float('nan')
-                LOG.info('[Packet Analyze]            > Packet Drops: %d' %
-                         (path_data['packet_drop_count']))
-                LOG.info('[Packet Analyze]            > Percentage: %s' %
-                         (path_data['packet_drop_percentage']))
+                LOG.info('[Packet Analyze]            > Packet Drops: %d',
+                         path_data['packet_drop_count'])
+                LOG.info('[Packet Analyze]            > Percentage: %s',
+                         path_data['packet_drop_percentage'])
 
         return self.chain
index 0a5fd13..216cc92 100644 (file)
 #    under the License.
 #
 
-from chain_managers import StageManager
 from collections import OrderedDict
+import time
+
+from chain_managers import StageManager
 from log import LOG
 from specs import ChainType
-import time
 
 
 class ServiceChain(object):
@@ -113,8 +114,8 @@ class ServiceChain(object):
             self.clients['traffic'].ensure_end_to_end()
 
     def run(self):
-        LOG.info('Starting {} chain...'.format(self.chain_name))
-        LOG.info('Dry run: {}'.format(self.config.no_traffic))
+        LOG.info('Starting %s chain...', self.chain_name)
+        LOG.info('Dry run: %s', self.config.no_traffic)
         results = {}
 
         self.__set_helpers()
@@ -127,7 +128,7 @@ class ServiceChain(object):
             results[self.chain_name]['mode'] = 'inter-node' \
                 if self.config.inter_node else 'intra-node'
 
-        LOG.info("Service chain '{}' run completed.".format(self.chain_name))
+        LOG.info("Service chain '%s' run completed.", self.chain_name)
         return results
 
     def get_version(self):
index 3f93df6..7a24d5c 100644 (file)
@@ -30,23 +30,22 @@ class Encaps(object):
 
 
 class ChainType(object):
-        PVP = "PVP"
-        PVVP = "PVVP"
-        EXT = "EXT"
-
-        chain_mapping = {
-            'PVP': PVP,
-            'PVVP': PVVP,
-            'EXT': EXT
-        }
+    PVP = "PVP"
+    PVVP = "PVVP"
+    EXT = "EXT"
+
+    chain_mapping = {
+        'PVP': PVP,
+        'PVVP': PVVP,
+        'EXT': EXT
+    }
 
-        @classmethod
-        def get_chain_type(cls, chain):
-            return cls.chain_mapping.get(chain.upper(), None)
+    @classmethod
+    def get_chain_type(cls, chain):
+        return cls.chain_mapping.get(chain.upper(), None)
 
 
 class OpenStackSpec(object):
-
     def __init__(self):
         self.__vswitch = "BASIC"
         self.__encaps = Encaps.BASIC
@@ -75,13 +74,11 @@ class OpenStackSpec(object):
 
 
 class RunSpec(object):
-
     def __init__(self, no_vswitch_access, openstack_spec):
         self.use_vswitch = (not no_vswitch_access) and openstack_spec.vswitch != "BASIC"
 
 
 class Specs(object):
-
     def __init__(self):
         self.openstack = None
         self.run_spec = None
index 19ee9cb..70ad389 100644 (file)
 #    under the License.
 #
 
-import bitmath
 from contextlib import contextmanager
 from datetime import datetime
 import math
+
+import bitmath
 import pytz
 from specs import ChainType
 from tabulate import tabulate
@@ -40,12 +41,11 @@ class Formatter(object):
 
     @staticmethod
     def standard(data):
-        if type(data) == int:
+        if isinstance(data, int):
             return Formatter.int(data)
-        elif type(data) == float:
+        elif isinstance(data, float):
             return Formatter.float(4)(data)
-        else:
-            return Formatter.fixed(data)
+        return Formatter.fixed(data)
 
     @staticmethod
     def suffix(suffix_str):
@@ -70,8 +70,7 @@ class Formatter(object):
         bps = byte_to_bit_classes.get(bit.unit, bitmath.Bit).from_other(bit) / 8.0
         if bps.unit != 'Bit':
             return bps.format("{value:.4f} {unit}ps")
-        else:
-            return bps.format("{value:.4f} bps")
+        return bps.format("{value:.4f} bps")
 
     @staticmethod
     def percentage(data):
@@ -79,8 +78,7 @@ class Formatter(object):
             return ''
         elif math.isnan(data):
             return '-'
-        else:
-            return Formatter.suffix('%')(Formatter.float(4)(data))
+        return Formatter.suffix('%')(Formatter.float(4)(data))
 
 
 class Table(object):
@@ -92,7 +90,7 @@ class Table(object):
         self.columns = len(header_row)
 
     def add_row(self, row):
-        assert (self.columns == len(row))
+        assert self.columns == len(row)
         formatted_row = []
         for entry, formatter in zip(row, self.formatters):
             formatted_row.append(formatter(entry))
@@ -123,7 +121,7 @@ class Summarizer(object):
         self.marker_stack.append(marker)
 
     def __unindent(self):
-        assert (self.indent_size >= self.indent_per_level)
+        assert self.indent_size >= self.indent_per_level
         self.indent_size -= self.indent_per_level
         self.marker_stack.pop()
 
@@ -135,7 +133,7 @@ class Summarizer(object):
 
     def _put(self, *args):
         self.str += self.__get_indent_string()
-        if len(args) and type(args[-1]) == dict:
+        if args and isinstance(args[-1], dict):
             self.str += ' '.join(map(str, args[:-1])) + '\n'
             self._put_dict(args[-1])
         else:
@@ -144,7 +142,7 @@ class Summarizer(object):
     def _put_dict(self, data):
         with self._create_block(False):
             for key, value in data.iteritems():
-                if type(value) == dict:
+                if isinstance(value, dict):
                     self._put(key + ':')
                     self._put_dict(value)
                 else:
@@ -472,8 +470,7 @@ class NFVBenchSummarizer(Summarizer):
     def __record_send(self):
         if self.sender:
             self.record_header["@timestamp"] = datetime.utcnow().replace(
-                tzinfo=pytz.utc).strftime(
-                "%Y-%m-%dT%H:%M:%S.%f%z")
+                tzinfo=pytz.utc).strftime("%Y-%m-%dT%H:%M:%S.%f%z")
             for frame_size in self.record_data:
                 data = self.record_header
                 data['frame_size'] = frame_size
index 7aa557a..a1c4954 100644 (file)
 #    License for the specific language governing permissions and limitations
 #    under the License.
 
+from datetime import datetime
+import socket
+import struct
+import time
+
 from attrdict import AttrDict
 import bitmath
-from datetime import datetime
-from log import LOG
 from netaddr import IPNetwork
+# pylint: disable=import-error
+from trex_stl_lib.api import STLError
+# pylint: enable=import-error
+
+from log import LOG
 from network import Interface
-import socket
 from specs import ChainType
 from stats_collector import IntervalCollector
 from stats_collector import IterationCollector
-import struct
-import time
 import traffic_gen.traffic_utils as utils
-from trex_stl_lib.api import STLError
 from utils import cast_integer
 
 
@@ -34,7 +38,6 @@ class TrafficClientException(Exception):
 
 
 class TrafficRunner(object):
-
     def __init__(self, client, duration_sec, interval_sec=0):
         self.client = client
         self.start_time = None
@@ -59,8 +62,7 @@ class TrafficRunner(object):
     def time_elapsed(self):
         if self.is_running():
             return time.time() - self.start_time
-        else:
-            return self.duration_sec
+        return self.duration_sec
 
     def poll_stats(self):
         if not self.is_running():
@@ -81,6 +83,7 @@ class TrafficRunner(object):
             self.stop()
         return self.client.get_stats()
 
+
 class IpBlock(object):
     def __init__(self, base_ip, step_ip, count_ip):
         self.base_ip_int = Device.ip_to_int(base_ip)
@@ -100,9 +103,9 @@ class IpBlock(object):
         '''
         if self.next_free + count > self.max_available:
             raise IndexError('No more IP addresses next free=%d max_available=%d requested=%d',
-                              self.next_free,
-                              self.max_available,
-                              count)
+                             self.next_free,
+                             self.max_available,
+                             count)
         first_ip = self.get_ip(self.next_free)
         last_ip = self.get_ip(self.next_free + count - 1)
         self.next_free += count
@@ -111,8 +114,8 @@ class IpBlock(object):
     def reset_reservation(self):
         self.next_free = 0
 
-class Device(object):
 
+class Device(object):
     def __init__(self, port, pci, switch_port=None, vtep_vlan=None, ip=None, tg_gateway_ip=None,
                  gateway_ip=None, ip_addrs_step=None, tg_gateway_ip_addrs_step=None,
                  gateway_ip_addrs_step=None, udp_src_port=None, udp_dst_port=None,
@@ -139,10 +142,10 @@ class Device(object):
         self.ip_block = IpBlock(self.ip, ip_addrs_step, flow_count)
         self.gw_ip_block = IpBlock(gateway_ip,
                                    gateway_ip_addrs_step,
-                                   chain_count) 
+                                   chain_count)
         self.tg_gw_ip_block = IpBlock(tg_gateway_ip,
                                       tg_gateway_ip_addrs_step,
-                                      chain_count) 
+                                      chain_count)
         self.udp_src_port = udp_src_port
         self.udp_dst_port = udp_dst_port
 
@@ -172,9 +175,9 @@ class Device(object):
         # exact flow count for each chain is calculated as follows:
         # - all chains except the first will have the same flow count
         #   calculated as (total_flows + chain_count - 1) / chain_count
-        # - the first chain will have the remainder  
+        # - the first chain will have the remainder
         # example 11 flows and 3 chains => 3, 4, 4
-        flows_per_chain = (self.flow_count + self.chain_count -1) / self.chain_count
+        flows_per_chain = (self.flow_count + self.chain_count - 1) / self.chain_count
         cur_chain_flow_count = self.flow_count - flows_per_chain * (self.chain_count - 1)
 
         self.ip_block.reset_reservation()
@@ -186,8 +189,8 @@ class Device(object):
             configs.append({
                 'count': cur_chain_flow_count,
                 'mac_src': self.mac,
-                'mac_dst': self.dst.mac if service_chain == ChainType.EXT
-                else self.vm_mac_list[chain_idx],
+                'mac_dst': self.dst.mac if service_chain == ChainType.EXT else self.vm_mac_list[
+                    chain_idx],
                 'ip_src_addr': src_ip_first,
                 'ip_src_addr_max': src_ip_last,
                 'ip_src_count': cur_chain_flow_count,
@@ -234,6 +237,7 @@ class Device(object):
     def int_to_ip(nvalue):
         return socket.inet_ntoa(struct.pack("!I", nvalue))
 
+
 class RunningTrafficProfile(object):
     """Represents traffic configuration for currently running traffic profile."""
 
@@ -277,8 +281,8 @@ class RunningTrafficProfile(object):
         generator_config = AttrDict(traffic_generator)
         generator_config.pop('default_profile')
         generator_config.pop('generator_profile')
-        matching_profile = filter(lambda profile: profile.name == generator_profile,
-                                  traffic_generator.generator_profile)
+        matching_profile = [profile for profile in traffic_generator.generator_profile if
+                            profile.name == generator_profile]
         if len(matching_profile) != 1:
             raise Exception('Traffic generator profile not found: ' + generator_profile)
 
@@ -348,7 +352,6 @@ class RunningTrafficProfile(object):
 
 
 class TrafficGeneratorFactory(object):
-
     def __init__(self, config):
         self.config = config
 
@@ -363,8 +366,7 @@ class TrafficGeneratorFactory(object):
         elif tool == 'dummy':
             from traffic_gen import dummy
             return dummy.DummyTG(self.config)
-        else:
-            return None
+        return None
 
     def list_generator_profile(self):
         return [profile.name for profile in self.config.traffic_generator.generator_profile]
@@ -373,12 +375,12 @@ class TrafficGeneratorFactory(object):
         return RunningTrafficProfile(self.config, generator_profile)
 
     def get_matching_profile(self, traffic_profile_name):
-        matching_profile = filter(lambda profile: profile.name == traffic_profile_name,
-                                  self.config.traffic_profile)
+        matching_profile = [profile for profile in self.config.traffic_profile if
+                            profile.name == traffic_profile_name]
 
         if len(matching_profile) > 1:
             raise Exception('Multiple traffic profiles with the same name found.')
-        elif len(matching_profile) == 0:
+        elif not matching_profile:
             raise Exception('No traffic profile found.')
 
         return matching_profile[0]
@@ -389,7 +391,6 @@ class TrafficGeneratorFactory(object):
 
 
 class TrafficClient(object):
-
     PORTS = [0, 1]
 
     def __init__(self, config, notifier=None):
@@ -408,7 +409,7 @@ class TrafficClient(object):
             'l2frame_size': None,
             'duration_sec': self.config.duration_sec,
             'bidirectional': True,
-            'rates': None
+            'rates': []  # to avoid unsbuscriptable-obj warning
         }
         self.current_total_rate = {'rate_percent': '10'}
         if self.config.single_run:
@@ -459,8 +460,7 @@ class TrafficClient(object):
         for it in xrange(retry_count):
             self.gen.clear_stats()
             self.gen.start_traffic()
-            LOG.info('Waiting for packets to be received back... ({} / {})'.format(it + 1,
-                     retry_count))
+            LOG.info('Waiting for packets to be received back... (%d / %d)', it + 1, retry_count)
             time.sleep(self.config.generic_poll_sec)
             self.gen.stop_traffic()
             stats = self.gen.get_stats()
@@ -553,8 +553,7 @@ class TrafficClient(object):
         total_pkts = result['tx']['total_pkts']
         if not total_pkts:
             return float('inf')
-        else:
-            return float(dropped_pkts) / total_pkts * 100
+        return float(dropped_pkts) / total_pkts * 100
 
     def get_stats(self):
         stats = self.gen.get_stats()
@@ -621,7 +620,7 @@ class TrafficClient(object):
         self.interval_collector.add_ndr_pdr(tag, last_stats)
 
     def __format_output_stats(self, stats):
-        for key in (self.PORTS + ['overall']):
+        for key in self.PORTS + ['overall']:
             interface = stats[key]
             stats[key] = {
                 'tx_pkts': interface['tx']['total_pkts'],
@@ -637,7 +636,7 @@ class TrafficClient(object):
 
     def __targets_found(self, rate, targets, results):
         for tag, target in targets.iteritems():
-            LOG.info('Found {} ({}) load: {}'.format(tag, target, rate))
+            LOG.info('Found %s (%s) load: %s', tag, target, rate)
             self.__ndr_pdr_found(tag, rate)
             results[tag]['timestamp_sec'] = time.time()
 
@@ -652,9 +651,9 @@ class TrafficClient(object):
                 ('ndr', 'pdr')
         results a dict to store results
         '''
-        if len(targets) == 0:
+        if not targets:
             return
-        LOG.info('Range search [{} .. {}] targets: {}'.format(left, right, targets))
+        LOG.info('Range search [%s .. %s] targets: %s', left, right, targets)
 
         # Terminate search when gap is less than load epsilon
         if right - left < self.config.measurement.load_epsilon:
@@ -733,7 +732,7 @@ class TrafficClient(object):
 
         # save reliable stats from whole iteration
         self.iteration_collector.add(stats, current_traffic_config['direction-total']['rate_pps'])
-        LOG.info('Average drop rate: {}'.format(stats['overall']['drop_rate_percent']))
+        LOG.info('Average drop rate: %f', stats['overall']['drop_rate_percent'])
 
         return stats, current_traffic_config['direction-total']
 
@@ -761,7 +760,7 @@ class TrafficClient(object):
             if stats is None:
                 return
         self.log_stats(stats)
-        LOG.info('Drop rate: {}'.format(stats['overall']['drop_rate_percent']))
+        LOG.info('Drop rate: %f', stats['overall']['drop_rate_percent'])
         yield stats
 
     def cancel_traffic(self):
@@ -817,7 +816,8 @@ class TrafficClient(object):
         for direction in ['orig', 'tx', 'rx']:
             total[direction] = {}
             for unit in ['rate_percent', 'rate_bps', 'rate_pps']:
-                total[direction][unit] = sum(map(lambda x: float(x[direction][unit]), r.values()))
+
+                total[direction][unit] = sum([float(x[direction][unit]) for x in r.values()])
 
         r['direction-total'] = total
         return r
index dabdc71..d8c01e9 100644 (file)
 #    License for the specific language governing permissions and limitations
 #    under the License.
 
-from nfvbench.log import LOG
-
 from traffic_base import AbstractTrafficGenerator
-import traffic_utils as utils
-
 
 
 class DummyTG(AbstractTrafficGenerator):
@@ -50,12 +46,6 @@ class DummyTG(AbstractTrafficGenerator):
     def create_traffic(self, l2frame_size, rates, bidirectional, latency=True):
         pass
 
-    def modify_rate(self, rate, reverse):
-        port_index = int(reverse)
-        port = self.port_handle[port_index]
-        self.rates[port_index] = utils.to_rate_str(rate)
-        LOG.info('Modified traffic stream for %s, new rate=%s.' % (port, utils.to_rate_str(rate)))
-
     def clear_streamblock(self):
         pass
 
index 064b2a2..568fae2 100644 (file)
 
 import abc
 
+from nfvbench.log import LOG
+import traffic_utils
+
+
 class TrafficGeneratorException(Exception):
     pass
 
 
 class AbstractTrafficGenerator(object):
-
     # src_mac (6) + dst_mac (6) + mac_type (2) + frame_check (4) = 18
     l2_header_size = 18
 
     imix_l2_sizes = [64, 594, 1518]
     imix_l3_sizes = [size - l2_header_size for size in imix_l2_sizes]
     imix_ratios = [7, 4, 1]
-    imix_avg_l2_size = sum(map(
-        lambda imix: 1.0 * imix[0] * imix[1],
-        zip(imix_l2_sizes, imix_ratios))) / sum(imix_ratios)
+
+    imix_avg_l2_size = sum(
+        [1.0 * imix[0] * imix[1] for imix in zip(imix_l2_sizes, imix_ratios)]) / sum(imix_ratios)
+
+    traffic_utils.imix_avg_l2_sizes = imix_avg_l2_size
 
     def __init__(self, config):
         self.config = config
 
     @abc.abstractmethod
-    def get_version():
+    def get_version(self):
         # Must be implemented by sub classes
         return None
 
     @abc.abstractmethod
-    def init():
+    def init(self):
         # Must be implemented by sub classes
         return None
 
     @abc.abstractmethod
-    def connect():
+    def connect(self):
         # Must be implemented by sub classes
         return None
 
     @abc.abstractmethod
-    def config_interface():
+    def config_interface(self):
         # Must be implemented by sub classes
         return None
 
     @abc.abstractmethod
-    def create_traffic():
+    def create_traffic(self):
         # Must be implemented by sub classes
         return None
 
-    @abc.abstractmethod
-    def modify_traffic():
+    def modify_rate(self, rate, reverse):
+        port_index = int(reverse)
+        port = self.port_handle[port_index]
+        self.rates[port_index] = traffic_utils.to_rate_str(rate)
+        LOG.info('Modified traffic stream for %s, new rate=%s.', port,
+                 traffic_utils.to_rate_str(rate))
+
+    def modify_traffic(self):
         # Must be implemented by sub classes
         return None
 
     @abc.abstractmethod
-    def get_stats():
+    def get_stats(self):
         # Must be implemented by sub classes
         return None
 
-    @abc.abstractmethod
-    def clear_traffic():
+    def clear_traffic(self):
         # Must be implemented by sub classes
         return None
 
     @abc.abstractmethod
-    def start_traffic():
+    def start_traffic(self):
         # Must be implemented by sub classes
         return None
 
     @abc.abstractmethod
-    def stop_traffic():
+    def stop_traffic(self):
         # Must be implemented by sub classes
         return None
 
     @abc.abstractmethod
-    def cleanup():
+    def cleanup(self):
         # Must be implemented by sub classes
         return None
index 7cf44a8..e618c28 100644 (file)
@@ -14,8 +14,9 @@
 
 
 import bitmath
-from traffic_base import AbstractTrafficGenerator
+from nfvbench.utils import multiplier_map
 
+imix_avg_l2_size = None
 
 
 def convert_rates(l2frame_size, rate, intf_speed):
@@ -48,9 +49,8 @@ def convert_rates(l2frame_size, rate, intf_speed):
 
 def get_average_packet_size(l2frame_size):
     if l2frame_size.upper() == 'IMIX':
-        return AbstractTrafficGenerator.imix_avg_l2_size
-    else:
-        return float(l2frame_size)
+        return imix_avg_l2_size
+    return float(l2frame_size)
 
 
 def load_to_bps(load_percentage, intf_speed):
@@ -71,15 +71,9 @@ def pps_to_bps(pps, avg_packet_size):
 
 def weighted_avg(weight, count):
     if sum(weight):
-        return sum(map(lambda x: x[0] * x[1], zip(weight, count))) / sum(weight)
-    else:
-        return float('nan')
 
-multiplier_map = {
-    'K': 1000,
-    'M': 1000000,
-    'G': 1000000000
-}
+        return sum([x[0] * x[1] for x in zip(weight, count)]) / sum(weight)
+    return float('nan')
 
 def parse_rate_str(rate_str):
     if rate_str.endswith('pps'):
index 498ff50..23faebc 100644 (file)
 #    License for the specific language governing permissions and limitations
 #    under the License.
 
+import os
+import random
+import time
+import traceback
+
 from collections import defaultdict
 from itertools import count
 from nfvbench.log import LOG
@@ -20,14 +25,11 @@ from nfvbench.traffic_server import TRexTrafficServer
 from nfvbench.utils import cast_integer
 from nfvbench.utils import timeout
 from nfvbench.utils import TimeoutError
-import os
-import random
-import time
-import traceback
 from traffic_base import AbstractTrafficGenerator
 from traffic_base import TrafficGeneratorException
 import traffic_utils as utils
 
+# pylint: disable=import-error
 from trex_stl_lib.api import CTRexVmInsFixHwCs
 from trex_stl_lib.api import Dot1Q
 from trex_stl_lib.api import Ether
@@ -46,6 +48,7 @@ from trex_stl_lib.api import STLVmFlowVarRepetableRandom
 from trex_stl_lib.api import STLVmWrFlowVar
 from trex_stl_lib.api import UDP
 from trex_stl_lib.services.trex_stl_service_arp import STLServiceARP
+# pylint: enable=import-error
 
 
 class TRex(AbstractTrafficGenerator):
@@ -95,8 +98,7 @@ class TRex(AbstractTrafficGenerator):
             result[ph]['rx']['min_delay_usec'] = cast_integer(
                 lat['total_min']) if 'total_min' in lat else float('nan')
             result[ph]['rx']['avg_delay_usec'] = cast_integer(
-                lat['average']) if 'average' in lat else float(
-                'nan')
+                lat['average']) if 'average' in lat else float('nan')
         total_tx_pkts = result[0]['tx']['total_pkts'] + result[1]['tx']['total_pkts']
         result["total_tx_rate"] = cast_integer(total_tx_pkts / self.config.duration_sec)
         return result
@@ -117,7 +119,7 @@ class TRex(AbstractTrafficGenerator):
 
     def __combine_latencies(self, in_stats, port_handle):
         """Traverses TRex result dictionary and combines chosen latency stats."""
-        if not len(self.latencies[port_handle]):
+        if not self.latencies[port_handle]:
             return {}
 
         result = defaultdict(float)
@@ -268,7 +270,7 @@ class TRex(AbstractTrafficGenerator):
                     if os.path.isfile(logpath):
                         # Wait for TRex to finish writing error message
                         last_size = 0
-                        for it in xrange(self.config.generic_retry_count):
+                        for _ in xrange(self.config.generic_retry_count):
                             size = os.path.getsize(logpath)
                             if size == last_size:
                                 # probably not writing anymore
@@ -347,13 +349,13 @@ class TRex(AbstractTrafficGenerator):
 
                 if len(self.arps[port]) == self.config.service_chain_count:
                     resolved += 1
-                    LOG.info('ARP resolved successfully for port {}'.format(port))
+                    LOG.info('ARP resolved successfully for port %s', port)
                     break
                 else:
                     failed = [arp.get_record().dst_ip for arp in arps
                               if arp.get_record().dst_mac is None]
-                    LOG.info('Retrying ARP for: {} ({} / {})'.format(
-                        failed, attempt, self.config.generic_retry_count))
+                    LOG.info('Retrying ARP for: %d (%d / %d)',
+                             failed, attempt, self.config.generic_retry_count)
                     time.sleep(self.config.generic_poll_sec)
 
         self.client.set_service_mode(ports=self.port_handle, enabled=False)
@@ -396,7 +398,7 @@ class TRex(AbstractTrafficGenerator):
 
         stream_cfgs = [d.get_stream_configs(self.config.generator_config.service_chain)
                        for d in self.config.generator_config.devices]
-        self.rates = map(lambda rate: utils.to_rate_str(rate), rates)
+        self.rates = [utils.to_rate_str(rate) for rate in rates]
 
         for ph in self.port_handle:
             # generate one pg_id for each direction
@@ -420,13 +422,7 @@ class TRex(AbstractTrafficGenerator):
 
         for ph in self.port_handle:
             self.client.add_streams(self.streamblock[ph], ports=ph)
-            LOG.info('Created traffic stream for port %s.' % ph)
-
-    def modify_rate(self, rate, reverse):
-        port_index = int(reverse)
-        port = self.port_handle[port_index]
-        self.rates[port_index] = utils.to_rate_str(rate)
-        LOG.info('Modified traffic stream for %s, new rate=%s.' % (port, utils.to_rate_str(rate)))
+            LOG.info('Created traffic stream for port %s.', ph)
 
     def clear_streamblock(self):
         self.streamblock = defaultdict(list)
index 05f20e5..b35e78b 100644 (file)
 #    License for the specific language governing permissions and limitations
 #    under the License.
 
-from log import LOG
 import os
 import subprocess
 import yaml
 
+from log import LOG
+
+
 class TrafficServerException(Exception):
     pass
 
@@ -29,7 +31,7 @@ class TRexTrafficServer(TrafficServer):
     def __init__(self, trex_base_dir='/opt/trex'):
         contents = os.listdir(trex_base_dir)
         # only one version of TRex should be supported in container
-        assert(len(contents) == 1)
+        assert len(contents) == 1
         self.trex_dir = os.path.join(trex_base_dir, contents[0])
 
     def run_server(self, traffic_profile, filename='/etc/trex_cfg.yaml'):
index fc72517..412dfae 100644 (file)
 #    License for the specific language governing permissions and limitations
 #    under the License.
 
-import errno
-import fcntl
-from functools import wraps
-import json
-from log import LOG
 from math import isnan
 import os
 import re
 import signal
 import subprocess
 
+import errno
+import fcntl
+from functools import wraps
+import json
+from log import LOG
+
 
 class TimeoutError(Exception):
     pass
@@ -30,7 +31,7 @@ class TimeoutError(Exception):
 
 def timeout(seconds=10, error_message=os.strerror(errno.ETIME)):
     def decorator(func):
-        def _handle_timeout(signum, frame):
+        def _handle_timeout(_signum, _frame):
             raise TimeoutError(error_message)
 
         def wrapper(*args, **kwargs):
@@ -110,7 +111,7 @@ def get_intel_pci(nic_ports):
     for driver in ['i40e', 'ixgbe']:
         matches = re.findall(regex.format(hx=hx, driver=driver), devices)
         if matches:
-            pcis = map(lambda x: x[0], matches)
+            pcis = [x[0] for x in matches]
             if len(pcis) < 2:
                 continue
             pcis.sort()
diff --git a/pylint.rc b/pylint.rc
new file mode 100644 (file)
index 0000000..adcd2b3
--- /dev/null
+++ b/pylint.rc
@@ -0,0 +1,425 @@
+[MASTER]
+
+# A comma-separated list of package or module names from where C extensions may
+# be loaded. Extensions are loading into the active Python interpreter and may
+# run arbitrary code
+extension-pkg-whitelist=
+
+# Add files or directories to the blacklist. They should be base names, not
+# paths.
+ignore=CVS
+
+# Add files or directories matching the regex patterns to the blacklist. The
+# regex matches against base names, not paths.
+ignore-patterns=
+
+# Python code to execute, usually for sys.path manipulation such as
+# pygtk.require().
+#init-hook=
+
+# Use multiple processes to speed up Pylint.
+jobs=1
+
+# List of plugins (as comma separated values of python modules names) to load,
+# usually to register additional checkers.
+load-plugins=
+
+# Pickle collected data for later comparisons.
+persistent=yes
+
+# Specify a configuration file.
+#rcfile=
+
+# Allow loading of arbitrary C extensions. Extensions are imported into the
+# active Python interpreter and may run arbitrary code.
+unsafe-load-any-extension=no
+
+
+[MESSAGES CONTROL]
+
+# Only show warnings with the listed confidence levels. Leave empty to show
+# all. Valid levels: HIGH, INFERENCE, INFERENCE_FAILURE, UNDEFINED
+confidence=
+
+# Disable the message, report, category or checker with the given id(s). You
+# can either give multiple identifiers separated by comma (,) or put this
+# option multiple times (only on the command line, not in the configuration
+# file where it should appear only once).You can also use "--disable=all" to
+# disable everything first and then reenable specific checks. For example, if
+# you want to run only the similarities checker, you can use "--disable=all
+# --enable=similarities". If you want to run only the classes checker, but have
+# no Warning level messages displayed, use"--disable=all --enable=classes
+# --disable=W"
+disable=unused-argument,global-statement,too-many-statements,too-many-arguments,too-many-branches,catching-non-exception,relative-import,too-many-locals,invalid-name,broad-except,print-statement,parameter-unpacking,unpacking-in-except,old-raise-syntax,backtick,long-suffix,old-ne-operator,import-star-module-level,raw-checker-failed,locally-disabled,locally-enabled,file-ignored,suppressed-message,useless-suppression,deprecated-pragma,apply-builtin,basestring-builtin,buffer-builtin,cmp-builtin,coerce-builtin,execfile-builtin,file-builtin,long-builtin,raw_input-builtin,reduce-builtin,standarderror-builtin,unicode-builtin,xrange-builtin,coerce-method,delslice-method,getslice-method,setslice-method,no-absolute-import,old-division,dict-iter-method,dict-view-method,next-method-called,metaclass-assignment,indexing-exception,raising-string,reload-builtin,oct-method,hex-method,nonzero-method,cmp-method,input-builtin,round-builtin,intern-builtin,unichr-builtin,map-builtin-not-iterating,zip-builtin-not-iterating,range-builtin-not-iterating,filter-builtin-not-iterating,using-cmp-argument,eq-without-hash,div-method,idiv-method,rdiv-method,exception-message-attribute,invalid-str-codec,sys-max-int,bad-python3-import,deprecated-string-function,deprecated-str-translate-call,missing-docstring,redefined-builtin,no-name-in-module,no-self-use,no-member,arguments-differ,attribute-defined-outside-init,non-iterator-returned,eval-used,unexpected-keyword-arg
+
+# Enable the message, report, category or checker with the given id(s). You can
+# either give multiple identifier separated by comma (,) or put this option
+# multiple time (only on the command line, not in the configuration file where
+# it should appear only once). See also the "--disable" option for examples.
+enable=
+
+
+[REPORTS]
+
+# Python expression which should return a note less than 10 (10 is the highest
+# note). You have access to the variables errors warning, statement which
+# respectively contain the number of errors / warnings messages and the total
+# number of statements analyzed. This is used by the global evaluation report
+# (RP0004).
+evaluation=10.0 - ((float(5 * error + warning + refactor + convention) / statement) * 10)
+
+# Template used to display messages. This is a python new-style format string
+# used to format the message information. See doc for all details
+#msg-template=
+
+# Set the output format. Available formats are text, parseable, colorized, json
+# and msvs (visual studio).You can also give a reporter class, eg
+# mypackage.mymodule.MyReporterClass.
+output-format=text
+
+# Tells whether to display a full report or only the messages
+reports=no
+
+# Activate the evaluation score.
+score=yes
+
+
+[REFACTORING]
+
+# Maximum number of nested blocks for function / method body
+max-nested-blocks=5
+
+
+[BASIC]
+
+# Naming hint for argument names
+argument-name-hint=(([a-z][a-z0-9_]{2,50})|(_[a-z0-9_]*))$
+
+# Regular expression matching correct argument names
+argument-rgx=(([a-z][a-z0-9_]{2,50})|(_[a-z0-9_]*))$
+
+# Naming hint for attribute names
+attr-name-hint=(([a-z][a-z0-9_]{2,30})|(_[a-z0-9_]*))$
+
+# Regular expression matching correct attribute names
+attr-rgx=(([a-z][a-z0-9_]{2,50})|(_[a-z0-9_]*))$
+
+# Bad variable names which should always be refused, separated by a comma
+bad-names=foo,bar,baz,toto,tutu,tata
+
+# Naming hint for class attribute names
+class-attribute-name-hint=([A-Za-z_][A-Za-z0-9_]{2,30}|(__.*__))$
+
+# Regular expression matching correct class attribute names
+class-attribute-rgx=([A-Za-z_][A-Za-z0-9_]{2,50}|(__.*__))$
+
+# Naming hint for class names
+class-name-hint=[A-Z_][a-zA-Z0-9]+$
+
+# Regular expression matching correct class names
+class-rgx=[A-Z_][a-zA-Z0-9]+$
+
+# Naming hint for constant names
+const-name-hint=(([A-Za-z_][A-Za-z0-9_]*)|(__.*__))$
+
+# Regular expression matching correct constant names
+const-rgx=(([A-Za-z_][A-Za-z0-9_]*)|(__.*__))$
+
+# Minimum line length for functions/classes that require docstrings, shorter
+# ones are exempt.
+docstring-min-length=-1
+
+# Naming hint for function names
+function-name-hint=(([a-z][a-zA-Z0-9_]{2,50})|(_[a-z0-9_]*))$
+
+# Regular expression matching correct function names
+function-rgx=(([a-z][a-zA-Z0-9_]{2,50})|(_[a-z0-9_]*))$
+
+# Good variable names which should always be accepted, separated by a comma
+good-names=i,j,k,ex,Run,_
+
+# Include a hint for the correct naming format with invalid-name
+include-naming-hint=no
+
+# Naming hint for inline iteration names
+inlinevar-name-hint=[A-Za-z_][A-Za-z0-9_]*$
+
+# Regular expression matching correct inline iteration names
+inlinevar-rgx=[A-Za-z_][A-Za-z0-9_]*$
+
+# Naming hint for method names
+method-name-hint=(([a-z][a-z0-9_]{2,50})|(_[a-z0-9_]*))$
+
+# Regular expression matching correct method names
+method-rgx=(([a-z][a-z0-9_]{2,50})|(_[a-z0-9_]*))$
+
+# Naming hint for module names
+module-name-hint=(([a-z_][a-z0-9_]*)|([A-Z][a-zA-Z0-9]+))$
+
+# Regular expression matching correct module names
+module-rgx=(([a-z_][a-z0-9_]*)|([A-Z][a-zA-Z0-9]+))$
+
+# Colon-delimited sets of names that determine each other's naming style when
+# the name regexes allow several styles.
+name-group=
+
+# Regular expression which should only match function or class names that do
+# not require a docstring.
+no-docstring-rgx=^_
+
+# List of decorators that produce properties, such as abc.abstractproperty. Add
+# to this list to register other decorators that produce valid properties.
+property-classes=abc.abstractproperty
+
+# Naming hint for variable names
+variable-name-hint=(([a-z][a-z0-9_]{2,50})|(_[a-z0-9_]*))$
+
+# Regular expression matching correct variable names
+variable-rgx=(([a-z][a-z0-9_]{2,50})|(_[a-z0-9_]*))$
+
+
+[FORMAT]
+
+# Expected format of line ending, e.g. empty (any line ending), LF or CRLF.
+expected-line-ending-format=
+
+# Regexp for a line that is allowed to be longer than the limit.
+ignore-long-lines=^\s*(# )?<?https?://\S+>?$
+
+# Number of spaces of indent required inside a hanging  or continued line.
+indent-after-paren=4
+
+# String used as indentation unit. This is usually "    " (4 spaces) or "\t" (1
+# tab).
+indent-string='    '
+
+# Maximum number of characters on a single line.
+max-line-length=100
+
+# Maximum number of lines in a module
+max-module-lines=1000
+
+# List of optional constructs for which whitespace checking is disabled. `dict-
+# separator` is used to allow tabulation in dicts, etc.: {1  : 1,\n222: 2}.
+# `trailing-comma` allows a space between comma and closing bracket: (a, ).
+# `empty-line` allows space-only lines.
+no-space-check=trailing-comma,dict-separator
+
+# Allow the body of a class to be on the same line as the declaration if body
+# contains single statement.
+single-line-class-stmt=no
+
+# Allow the body of an if to be on the same line as the test if there is no
+# else.
+single-line-if-stmt=no
+
+
+[LOGGING]
+
+# Logging modules to check that the string format arguments are in logging
+# function parameter format
+logging-modules=logging
+
+
+[MISCELLANEOUS]
+
+# List of note tags to take in consideration, separated by a comma.
+notes=FIXME,XXX,TODO
+
+
+[SIMILARITIES]
+
+# Ignore comments when computing similarities.
+ignore-comments=yes
+
+# Ignore docstrings when computing similarities.
+ignore-docstrings=yes
+
+# Ignore imports when computing similarities.
+ignore-imports=no
+
+# Minimum lines number of a similarity.
+min-similarity-lines=4
+
+
+[SPELLING]
+
+# Spelling dictionary name. Available dictionaries: none. To make it working
+# install python-enchant package.
+spelling-dict=
+
+# List of comma separated words that should not be checked.
+spelling-ignore-words=
+
+# A path to a file that contains private dictionary; one word per line.
+spelling-private-dict-file=
+
+# Tells whether to store unknown words to indicated private dictionary in
+# --spelling-private-dict-file option instead of raising a message.
+spelling-store-unknown-words=no
+
+
+[TYPECHECK]
+
+# List of decorators that produce context managers, such as
+# contextlib.contextmanager. Add to this list to register other decorators that
+# produce valid context managers.
+contextmanager-decorators=contextlib.contextmanager
+
+# List of members which are set dynamically and missed by pylint inference
+# system, and so shouldn't trigger E1101 when accessed. Python regular
+# expressions are accepted.
+generated-members=
+
+# Tells whether missing members accessed in mixin class should be ignored. A
+# mixin class is detected if its name ends with "mixin" (case insensitive).
+ignore-mixin-members=yes
+
+# This flag controls whether pylint should warn about no-member and similar
+# checks whenever an opaque object is returned when inferring. The inference
+# can return multiple potential results while evaluating a Python object, but
+# some branches might not be evaluated, which results in partial inference. In
+# that case, it might be useful to still emit no-member and other checks for
+# the rest of the inferred objects.
+ignore-on-opaque-inference=yes
+
+# List of class names for which member attributes should not be checked (useful
+# for classes with dynamically set attributes). This supports the use of
+# qualified names.
+ignored-classes=optparse.Values,thread._local,_thread._local
+
+# List of module names for which member attributes should not be checked
+# (useful for modules/projects where namespaces are manipulated during runtime
+# and thus existing member attributes cannot be deduced by static analysis. It
+# supports qualified module names, as well as Unix pattern matching.
+ignored-modules=
+
+# Show a hint with possible names when a member name was not found. The aspect
+# of finding the hint is based on edit distance.
+missing-member-hint=yes
+
+# The minimum edit distance a name should have in order to be considered a
+# similar match for a missing member name.
+missing-member-hint-distance=1
+
+# The total number of similar names that should be taken in consideration when
+# showing a hint for a missing member.
+missing-member-max-choices=1
+
+
+[VARIABLES]
+
+# List of additional names supposed to be defined in builtins. Remember that
+# you should avoid to define new builtins when possible.
+additional-builtins=
+
+# Tells whether unused global variables should be treated as a violation.
+allow-global-unused-variables=yes
+
+# List of strings which can identify a callback function by name. A callback
+# name must start or end with one of those strings.
+callbacks=cb_,_cb
+
+# A regular expression matching the name of dummy variables (i.e. expectedly
+# not used).
+dummy-variables-rgx=_+$|(_[a-zA-Z0-9_]*[a-zA-Z0-9]+?$)|dummy|^ignored_|^unused_
+
+# Argument names that match this expression will be ignored. Default to name
+# with leading underscore
+ignored-argument-names=_.*|^ignored_|^unused_
+
+# Tells whether we should check for unused import in __init__ files.
+init-import=no
+
+# List of qualified module names which can have objects that can redefine
+# builtins.
+redefining-builtins-modules=six.moves,future.builtins
+
+
+[CLASSES]
+
+# List of method names used to declare (i.e. assign) instance attributes.
+defining-attr-methods=__init__,__new__,setUp
+
+# List of member names, which should be excluded from the protected access
+# warning.
+exclude-protected=_asdict,_fields,_replace,_source,_make
+
+# List of valid names for the first argument in a class method.
+valid-classmethod-first-arg=cls
+
+# List of valid names for the first argument in a metaclass class method.
+valid-metaclass-classmethod-first-arg=mcs
+
+
+[DESIGN]
+
+# Maximum number of arguments for function / method
+max-args=12
+
+# Maximum number of attributes for a class (see R0902).
+max-attributes=32
+
+# Maximum number of boolean expressions in a if statement
+max-bool-expr=5
+
+# Maximum number of branch for function / method body
+max-branches=30
+
+# Maximum number of locals for function / method body
+max-locals=15
+
+# Maximum number of parents for a class (see R0901).
+max-parents=7
+
+# Maximum number of public methods for a class (see R0904).
+max-public-methods=40
+
+# Maximum number of return / yield for function / method body
+max-returns=6
+
+# Maximum number of statements in function / method body
+max-statements=50
+
+# Minimum number of public methods for a class (see R0903).
+min-public-methods=0
+
+
+[IMPORTS]
+
+# Allow wildcard imports from modules that define __all__.
+allow-wildcard-with-all=no
+
+# Analyse import fallback blocks. This can be used to support both Python 2 and
+# 3 compatible code, which means that the block might have code that exists
+# only in one or another interpreter, leading to false positives when analysed.
+analyse-fallback-blocks=no
+
+# Deprecated modules which should not be used, separated by a comma
+deprecated-modules=regsub,TERMIOS,Bastion,rexec
+
+# Create a graph of external dependencies in the given file (report RP0402 must
+# not be disabled)
+ext-import-graph=
+
+# Create a graph of every (i.e. internal and external) dependencies in the
+# given file (report RP0402 must not be disabled)
+import-graph=
+
+# Create a graph of internal dependencies in the given file (report RP0402 must
+# not be disabled)
+int-import-graph=
+
+# Force import order to recognize a module as part of the standard
+# compatibility libraries.
+known-standard-library=
+
+# Force import order to recognize a module as part of a third party library.
+known-third-party=enchant
+
+
+[EXCEPTIONS]
+
+# Exceptions that will emit a warning when being caught. Defaults to
+# "Exception"
+overgeneral-exceptions=Exception
index f72ef01..18f4952 100644 (file)
@@ -15,3 +15,4 @@ testrepository>=0.0.18
 testscenarios>=0.4
 testtools>=1.4.0
 pytest>=3.2.2
+pylint>=1.7.1
diff --git a/test/__init__.py b/test/__init__.py
new file mode 100644 (file)
index 0000000..e69de29
index 55632ab..85342bb 100644 (file)
 #    under the License.
 #
 
-from attrdict import AttrDict
 import logging
+import os
+import sys
+
+from attrdict import AttrDict
 from nfvbench.config import config_loads
 from nfvbench.credentials import Credentials
 from nfvbench.fluentd import FluentLogHandler
@@ -25,29 +28,26 @@ from nfvbench.network import Network
 from nfvbench.specs import ChainType
 from nfvbench.specs import Encaps
 import nfvbench.traffic_gen.traffic_utils as traffic_utils
-import os
 import pytest
-import sys
 
 __location__ = os.path.realpath(os.path.join(os.getcwd(),
                                              os.path.dirname(__file__)))
 
 
-
 @pytest.fixture
 def openstack_vxlan_spec():
     return AttrDict(
         {
             'openstack': AttrDict({
                 'vswitch': "VTS",
-                'encaps': Encaps.VxLAN}
-            ),
+                'encaps': Encaps.VxLAN}),
             'run_spec': AttrDict({
                 'use_vpp': True
             })
         }
     )
 
+
 # =========================================================================
 # PVP Chain tests
 # =========================================================================
@@ -61,6 +61,7 @@ def test_chain_interface():
     assert iface.get_packet_count('wrong_key') == 0
 
 
+# pylint: disable=redefined-outer-name
 @pytest.fixture(scope='session')
 def iface1():
     return Interface('iface1', 'trex', tx_packets=10000, rx_packets=1234)
@@ -98,6 +99,9 @@ def test_chain_network(net1, net2, iface1, iface2, iface3, iface4):
     assert [iface4, iface3, iface2, iface1] == net2.get_interfaces()
 
 
+# pylint: enable=redefined-outer-name
+
+# pylint: disable=pointless-string-statement
 """
 def test_chain_analysis(net1, monkeypatch, openstack_vxlan_spec):
     def mock_empty(self, *args, **kwargs):
@@ -262,7 +266,6 @@ def test_pvp_chain_run(pvp_chain):
     assert result == expected_result
 """
 
-
 # =========================================================================
 # PVVP Chain tests
 # =========================================================================
@@ -439,6 +442,7 @@ def test_pvvp_chain_run(pvvp_chain):
     assert result == expected_result
 """
 
+
 # =========================================================================
 # Traffic client tests
 # =========================================================================
@@ -478,6 +482,7 @@ def test_parse_rate_str():
     assert should_raise_error('0pps')
     assert should_raise_error('-1bps')
 
+
 def test_rate_conversion():
     assert traffic_utils.load_to_bps(50, 10000000000) == pytest.approx(5000000000.0)
     assert traffic_utils.load_to_bps(37, 10000000000) == pytest.approx(3700000000.0)
@@ -616,6 +621,9 @@ def test_ndr_pdr_search(traffic_client):
     assert results == expected_results
 """
 
+
+# pylint: enable=pointless-string-statement
+
 # =========================================================================
 # Other tests
 # =========================================================================
@@ -623,6 +631,7 @@ def test_ndr_pdr_search(traffic_client):
 def setup_module(module):
     nfvbench.log.setup(mute_stdout=True)
 
+
 def test_no_credentials():
     cred = Credentials('/completely/wrong/path/openrc', None, False)
     if cred.rc_auth_url:
@@ -631,18 +640,23 @@ def test_no_credentials():
     else:
         assert True
 
+
 # Because trex_stl_lib may not be installed when running unit test
 # nfvbench.traffic_client will try to import STLError:
 # from trex_stl_lib.api import STLError
 # will raise ImportError: No module named trex_stl_lib.api
 try:
     import trex_stl_lib.api
-    assert(trex_stl_lib.api)
+
+    assert trex_stl_lib.api
 except ImportError:
     # Make up a trex_stl_lib.api.STLError class
     class STLError(Exception):
         pass
+
+
     from types import ModuleType
+
     stl_lib_mod = ModuleType('trex_stl_lib')
     sys.modules['trex_stl_lib'] = stl_lib_mod
     api_mod = ModuleType('trex_stl_lib.api')
@@ -650,25 +664,29 @@ except ImportError:
     sys.modules['trex_stl_lib.api'] = api_mod
     api_mod.STLError = STLError
 
+# pylint: disable=wrong-import-position,ungrouped-imports
 from nfvbench.traffic_client import Device
 from nfvbench.traffic_client import IpBlock
 
 
+# pylint: enable=wrong-import-position,ungrouped-imports
+
 def test_ip_block():
     ipb = IpBlock('10.0.0.0', '0.0.0.1', 256)
-    assert(ipb.get_ip() == '10.0.0.0')
-    assert(ipb.get_ip(255) == '10.0.0.255')
+    assert ipb.get_ip() == '10.0.0.0'
+    assert ipb.get_ip(255) == '10.0.0.255'
     with pytest.raises(IndexError):
         ipb.get_ip(256)
     # verify with step larger than 1
     ipb = IpBlock('10.0.0.0', '0.0.0.2', 256)
-    assert(ipb.get_ip() == '10.0.0.0')
-    assert(ipb.get_ip(1) == '10.0.0.2')
-    assert(ipb.get_ip(128) == '10.0.1.0')
-    assert(ipb.get_ip(255) == '10.0.1.254')
+    assert ipb.get_ip() == '10.0.0.0'
+    assert ipb.get_ip(1) == '10.0.0.2'
+    assert ipb.get_ip(128) == '10.0.1.0'
+    assert ipb.get_ip(255) == '10.0.1.254'
     with pytest.raises(IndexError):
         ipb.get_ip(256)
 
+
 def check_config(configs, cc, fc, src_ip, dst_ip, step_ip):
     '''Verify that the range configs for each chain have adjacent IP ranges
     of the right size and without holes between chains
@@ -679,14 +697,15 @@ def check_config(configs, cc, fc, src_ip, dst_ip, step_ip):
     dip = Device.ip_to_int(dst_ip)
     for index in range(cc):
         config = configs[index]
-        assert(config['ip_src_count'] == config['ip_dst_count'])
-        assert(Device.ip_to_int(config['ip_src_addr']) == sip)
-        assert(Device.ip_to_int(config['ip_dst_addr']) == dip)
+        assert config['ip_src_count'] == config['ip_dst_count']
+        assert Device.ip_to_int(config['ip_src_addr']) == sip
+        assert Device.ip_to_int(config['ip_dst_addr']) == dip
         count = config['ip_src_count']
         cfc += count
         sip += count * step
         dip += count * step
-    assert(cfc == fc)
+    assert cfc == fc
+
 
 def create_device(fc, cc, ip, gip, tggip, step_ip):
     return Device(0, 0, flow_count=fc, chain_count=cc, ip=ip, gateway_ip=gip, tg_gateway_ip=tggip,
@@ -694,6 +713,7 @@ def create_device(fc, cc, ip, gip, tggip, step_ip):
                   tg_gateway_ip_addrs_step=step_ip,
                   gateway_ip_addrs_step=step_ip)
 
+
 def check_device_flow_config(step_ip):
     fc = 99999
     cc = 10
@@ -707,10 +727,12 @@ def check_device_flow_config(step_ip):
     configs = dev0.get_stream_configs(ChainType.EXT)
     check_config(configs, cc, fc, ip0, ip1, step_ip)
 
+
 def test_device_flow_config():
     check_device_flow_config('0.0.0.1')
     check_device_flow_config('0.0.0.2')
 
+
 def test_device_ip_range():
     def ip_range_overlaps(ip0, ip1, flows):
         tggip = '50.0.0.0'
@@ -719,10 +741,11 @@ def test_device_ip_range():
         dev1 = create_device(flows, 10, ip1, gip, tggip, '0.0.0.1')
         dev0.set_destination(dev1)
         return dev0.ip_range_overlaps()
-    assert(not ip_range_overlaps('10.0.0.0', '20.0.0.0', 10000))
-    assert(ip_range_overlaps('10.0.0.0', '10.0.1.0', 10000))
-    assert(ip_range_overlaps('10.0.0.0', '10.0.1.0', 257))
-    assert(ip_range_overlaps('10.0.1.0', '10.0.0.0', 257))
+
+    assert not ip_range_overlaps('10.0.0.0', '20.0.0.0', 10000)
+    assert ip_range_overlaps('10.0.0.0', '10.0.1.0', 10000)
+    assert ip_range_overlaps('10.0.0.0', '10.0.1.0', 257)
+    assert ip_range_overlaps('10.0.1.0', '10.0.0.0', 257)
 
 
 def test_config():
@@ -730,10 +753,10 @@ def test_config():
     res1 = {1: 10, 2: {21: 100, 22: 200}, 3: None}
     res2 = {1: 100, 2: {21: 1000, 22: 200}, 3: None}
     res3 = {1: 100, 2: {21: 100, 22: 200}, 3: "abc"}
-    assert(config_loads("{}", refcfg) == refcfg)
-    assert(config_loads("{1: 10}", refcfg) == res1)
-    assert(config_loads("{2: {21: 1000}}", refcfg) == res2)
-    assert(config_loads('{3: "abc"}', refcfg) == res3)
+    assert config_loads("{}", refcfg) == refcfg
+    assert config_loads("{1: 10}", refcfg) == res1
+    assert config_loads("{2: {21: 1000}}", refcfg) == res2
+    assert config_loads('{3: "abc"}', refcfg) == res3
 
     # correctly fails
     # pairs of input string and expected subset (None if identical)
@@ -754,11 +777,12 @@ def test_config():
 
     # whitelist keys
     flavor = {'flavor': {'vcpus': 2, 'ram': 8192, 'disk': 0,
-              'extra_specs': {'hw:cpu_policy': 'dedicated'}}}
+                         'extra_specs': {'hw:cpu_policy': 'dedicated'}}}
     new_flavor = {'flavor': {'vcpus': 2, 'ram': 8192, 'disk': 0,
-                  'extra_specs': {'hw:cpu_policy': 'dedicated', 'hw:numa_nodes': 2}}}
-    assert(config_loads("{'flavor': {'extra_specs': {'hw:numa_nodes': 2}}}", flavor,
-                        whitelist_keys=['alpha', 'extra_specs']) == new_flavor)
+                             'extra_specs': {'hw:cpu_policy': 'dedicated', 'hw:numa_nodes': 2}}}
+    assert config_loads("{'flavor': {'extra_specs': {'hw:numa_nodes': 2}}}", flavor,
+                        whitelist_keys=['alpha', 'extra_specs']) == new_flavor
+
 
 def test_fluentd():
     logger = logging.getLogger('fluent-logger')
diff --git a/tox.ini b/tox.ini
index ea0894e..1dab8a7 100644 (file)
--- a/tox.ini
+++ b/tox.ini
@@ -1,6 +1,6 @@
 [tox]
 minversion = 1.6
-envlist = py27,pep8
+envlist = py27,pep8, lint
 skipsdist = True
 
 [testenv]
@@ -15,6 +15,9 @@ commands = py.test -q -s --basetemp={envtmpdir} {posargs}
 [testenv:pep8]
 commands = flake8 {toxinidir}
 
+[testenv:lint]
+commands = pylint --rcfile pylint.rc nfvbench test
+
 [testenv:venv]
 commands = {posargs}