autopep8 fix for flake8 93/22593/1
authoryuyang <Gabriel.yuyang@huawei.com>
Fri, 30 Sep 2016 18:21:36 +0000 (02:21 +0800)
committeryuyang <Gabriel.yuyang@huawei.com>
Fri, 30 Sep 2016 18:23:53 +0000 (02:23 +0800)
JIRA: BOTTLENECK-101

Using autopep8 to fix the python style scanned by flake8

Change-Id: I74bf28ed4d999dac3dd36e9101f099c9853a49b6
Signed-off-by: yuyang <Gabriel.yuyang@huawei.com>
135 files changed:
testsuites/rubbos/run_rubbos.py
testsuites/vstf/run_vstf.py
testsuites/vstf/vstf_collector.py
testsuites/vstf/vstf_scripts/vstf/__init__.py
testsuites/vstf/vstf_scripts/vstf/agent/agent.py
testsuites/vstf/vstf_scripts/vstf/agent/env/__init__.py
testsuites/vstf/vstf_scripts/vstf/agent/env/basic/__init__.py
testsuites/vstf/vstf_scripts/vstf/agent/env/basic/collect.py
testsuites/vstf/vstf_scripts/vstf/agent/env/basic/commandline.py
testsuites/vstf/vstf_scripts/vstf/agent/env/basic/device_manager.py
testsuites/vstf/vstf_scripts/vstf/agent/env/basic/image_manager.py
testsuites/vstf/vstf_scripts/vstf/agent/env/basic/source_manager.py
testsuites/vstf/vstf_scripts/vstf/agent/env/basic/vm9pfs.py
testsuites/vstf/vstf_scripts/vstf/agent/env/basic/vm_manager.py
testsuites/vstf/vstf_scripts/vstf/agent/env/basic/vm_xml_help.py
testsuites/vstf/vstf_scripts/vstf/agent/env/builder.py
testsuites/vstf/vstf_scripts/vstf/agent/env/driver_plugins/__init__.py
testsuites/vstf/vstf_scripts/vstf/agent/env/driver_plugins/manager.py
testsuites/vstf/vstf_scripts/vstf/agent/env/driver_plugins/model.py
testsuites/vstf/vstf_scripts/vstf/agent/env/driver_plugins/origin_driver.py
testsuites/vstf/vstf_scripts/vstf/agent/env/fsmonitor/FSMonitor.py
testsuites/vstf/vstf_scripts/vstf/agent/env/fsmonitor/constant.py
testsuites/vstf/vstf_scripts/vstf/agent/env/fsmonitor/utils.py
testsuites/vstf/vstf_scripts/vstf/agent/env/plugins/libvirt_plugin.py
testsuites/vstf/vstf_scripts/vstf/agent/env/plugins/tester_env_plugin.py
testsuites/vstf/vstf_scripts/vstf/agent/env/vswitch_plugins/bridge_plugin.py
testsuites/vstf/vstf_scripts/vstf/agent/env/vswitch_plugins/manager.py
testsuites/vstf/vstf_scripts/vstf/agent/env/vswitch_plugins/model.py
testsuites/vstf/vstf_scripts/vstf/agent/env/vswitch_plugins/ovs_plugin.py
testsuites/vstf/vstf_scripts/vstf/agent/equalizer/__init__.py
testsuites/vstf/vstf_scripts/vstf/agent/equalizer/equalizer.py
testsuites/vstf/vstf_scripts/vstf/agent/equalizer/get_info.py
testsuites/vstf/vstf_scripts/vstf/agent/equalizer/optimize.py
testsuites/vstf/vstf_scripts/vstf/agent/perf/__init__.py
testsuites/vstf/vstf_scripts/vstf/agent/perf/affctl.py
testsuites/vstf/vstf_scripts/vstf/agent/perf/iperf.py
testsuites/vstf/vstf_scripts/vstf/agent/perf/netmap.py
testsuites/vstf/vstf_scripts/vstf/agent/perf/netns.py
testsuites/vstf/vstf_scripts/vstf/agent/perf/netperf.py
testsuites/vstf/vstf_scripts/vstf/agent/perf/pktgen.py
testsuites/vstf/vstf_scripts/vstf/agent/perf/qperf.py
testsuites/vstf/vstf_scripts/vstf/agent/perf/sar.py
testsuites/vstf/vstf_scripts/vstf/agent/perf/utils.py
testsuites/vstf/vstf_scripts/vstf/agent/perf/vnstat.py
testsuites/vstf/vstf_scripts/vstf/agent/perf/vstfperf.py
testsuites/vstf/vstf_scripts/vstf/agent/softagent.py
testsuites/vstf/vstf_scripts/vstf/agent/spirent/spirent.py
testsuites/vstf/vstf_scripts/vstf/agent/spirent/tools.py
testsuites/vstf/vstf_scripts/vstf/agent/spirentagent.py
testsuites/vstf/vstf_scripts/vstf/common/__init__.py
testsuites/vstf/vstf_scripts/vstf/common/candy_text.py
testsuites/vstf/vstf_scripts/vstf/common/cfgparser.py
testsuites/vstf/vstf_scripts/vstf/common/cliutil.py
testsuites/vstf/vstf_scripts/vstf/common/cmds.py
testsuites/vstf/vstf_scripts/vstf/common/daemon.py
testsuites/vstf/vstf_scripts/vstf/common/decorator.py
testsuites/vstf/vstf_scripts/vstf/common/log.py
testsuites/vstf/vstf_scripts/vstf/common/pyhtml.py
testsuites/vstf/vstf_scripts/vstf/common/rsync.py
testsuites/vstf/vstf_scripts/vstf/common/saltstack.py
testsuites/vstf/vstf_scripts/vstf/common/ssh.py
testsuites/vstf/vstf_scripts/vstf/common/test_func.py
testsuites/vstf/vstf_scripts/vstf/common/unix.py
testsuites/vstf/vstf_scripts/vstf/common/utils.py
testsuites/vstf/vstf_scripts/vstf/common/vstfcli.py
testsuites/vstf/vstf_scripts/vstf/controller/__init__.py
testsuites/vstf/vstf_scripts/vstf/controller/api_server.py
testsuites/vstf/vstf_scripts/vstf/controller/database/__init__.py
testsuites/vstf/vstf_scripts/vstf/controller/database/dbinterface.py
testsuites/vstf/vstf_scripts/vstf/controller/database/tables.py
testsuites/vstf/vstf_scripts/vstf/controller/env_build/cfg_intent_parse.py
testsuites/vstf/vstf_scripts/vstf/controller/env_build/env_build.py
testsuites/vstf/vstf_scripts/vstf/controller/env_build/env_collect.py
testsuites/vstf/vstf_scripts/vstf/controller/fabricant.py
testsuites/vstf/vstf_scripts/vstf/controller/reporters/__init__.py
testsuites/vstf/vstf_scripts/vstf/controller/reporters/mail/__init__.py
testsuites/vstf/vstf_scripts/vstf/controller/reporters/mail/mail.py
testsuites/vstf/vstf_scripts/vstf/controller/reporters/mail/sendmail.py
testsuites/vstf/vstf_scripts/vstf/controller/reporters/report/__init__.py
testsuites/vstf/vstf_scripts/vstf/controller/reporters/report/candy_generator.py
testsuites/vstf/vstf_scripts/vstf/controller/reporters/report/data_factory.py
testsuites/vstf/vstf_scripts/vstf/controller/reporters/report/html/__init__.py
testsuites/vstf/vstf_scripts/vstf/controller/reporters/report/html/html_base.py
testsuites/vstf/vstf_scripts/vstf/controller/reporters/report/html/htmlcreator.py
testsuites/vstf/vstf_scripts/vstf/controller/reporters/report/pdf/__init__.py
testsuites/vstf/vstf_scripts/vstf/controller/reporters/report/pdf/element.py
testsuites/vstf/vstf_scripts/vstf/controller/reporters/report/pdf/pdfcreator.py
testsuites/vstf/vstf_scripts/vstf/controller/reporters/report/pdf/pdftemplate.py
testsuites/vstf/vstf_scripts/vstf/controller/reporters/report/pdf/story.py
testsuites/vstf/vstf_scripts/vstf/controller/reporters/report/provider/html_provider.py
testsuites/vstf/vstf_scripts/vstf/controller/reporters/report/provider/pdf_provider.py
testsuites/vstf/vstf_scripts/vstf/controller/reporters/reporter.py
testsuites/vstf/vstf_scripts/vstf/controller/settings/cpu_settings.py
testsuites/vstf/vstf_scripts/vstf/controller/settings/device_settings.py
testsuites/vstf/vstf_scripts/vstf/controller/settings/flows_settings.py
testsuites/vstf/vstf_scripts/vstf/controller/settings/forwarding_settings.py
testsuites/vstf/vstf_scripts/vstf/controller/settings/html_settings.py
testsuites/vstf/vstf_scripts/vstf/controller/settings/mail_settings.py
testsuites/vstf/vstf_scripts/vstf/controller/settings/perf_settings.py
testsuites/vstf/vstf_scripts/vstf/controller/settings/settings.py
testsuites/vstf/vstf_scripts/vstf/controller/settings/template_settings.py
testsuites/vstf/vstf_scripts/vstf/controller/settings/tester_settings.py
testsuites/vstf/vstf_scripts/vstf/controller/settings/tool_settings.py
testsuites/vstf/vstf_scripts/vstf/controller/spirent/__init__.py
testsuites/vstf/vstf_scripts/vstf/controller/spirent/appliance.py
testsuites/vstf/vstf_scripts/vstf/controller/spirent/common/model.py
testsuites/vstf/vstf_scripts/vstf/controller/spirent/common/result_analysis.py
testsuites/vstf/vstf_scripts/vstf/controller/sw_perf/__init__.py
testsuites/vstf/vstf_scripts/vstf/controller/sw_perf/flow_producer.py
testsuites/vstf/vstf_scripts/vstf/controller/sw_perf/model.py
testsuites/vstf/vstf_scripts/vstf/controller/sw_perf/perf_provider.py
testsuites/vstf/vstf_scripts/vstf/controller/sw_perf/performance.py
testsuites/vstf/vstf_scripts/vstf/controller/sw_perf/raw_data.py
testsuites/vstf/vstf_scripts/vstf/controller/unittest/__init__.py
testsuites/vstf/vstf_scripts/vstf/controller/unittest/configuration.py
testsuites/vstf/vstf_scripts/vstf/controller/unittest/model.py
testsuites/vstf/vstf_scripts/vstf/controller/unittest/test_cfg_intent_parse.py
testsuites/vstf/vstf_scripts/vstf/controller/unittest/test_collect.py
testsuites/vstf/vstf_scripts/vstf/controller/unittest/test_driver_function.py
testsuites/vstf/vstf_scripts/vstf/controller/unittest/test_env_build.py
testsuites/vstf/vstf_scripts/vstf/controller/unittest/test_perf.py
testsuites/vstf/vstf_scripts/vstf/controller/unittest/test_ssh.py
testsuites/vstf/vstf_scripts/vstf/controller/vstfadm.py
testsuites/vstf/vstf_scripts/vstf/rpc_frame_work/__init__.py
testsuites/vstf/vstf_scripts/vstf/rpc_frame_work/rpc_consumer.py
testsuites/vstf/vstf_scripts/vstf/rpc_frame_work/rpc_producer.py
utils/dashboard/process_data.py
utils/dashboard/rubbos_collector.py
utils/dashboard/uploader.py
utils/dispatcher/func.py
utils/infra_setup/heat/common.py
utils/infra_setup/heat/consts/files.py
utils/infra_setup/heat/manager.py
utils/infra_setup/heat/template.py
utils/infra_setup/heat/tests/generate_template_test.py

index 455b3e5..63b9ae0 100755 (executable)
@@ -24,9 +24,11 @@ from novaclient.client import Client as NovaClient
 # parser for configuration files in each test case
 # ------------------------------------------------------
 parser = argparse.ArgumentParser()
 # parser for configuration files in each test case
 # ------------------------------------------------------
 parser = argparse.ArgumentParser()
-parser.add_argument("-c", "--conf",
-                    help="configuration files for the testcase, in yaml format",
-                    default="/home/opnfv/bottlenecks/testsuites/rubbos/testcase_cfg/rubbos_basic.yaml")
+parser.add_argument(
+    "-c",
+    "--conf",
+    help="configuration files for the testcase, in yaml format",
+    default="/home/opnfv/bottlenecks/testsuites/rubbos/testcase_cfg/rubbos_basic.yaml")
 args = parser.parse_args()
 
 #--------------------------------------------------
 args = parser.parse_args()
 
 #--------------------------------------------------
@@ -37,31 +39,40 @@ logger = logging.getLogger(__name__)
 
 def _get_keystone_client():
     keystone_client = KeystoneClient(
 
 def _get_keystone_client():
     keystone_client = KeystoneClient(
-                auth_url=os.environ.get('OS_AUTH_URL'),
-                username=os.environ.get('OS_USERNAME'),
-                password=os.environ.get('OS_PASSWORD'),
-                tenant_name=os.environ.get('OS_TENANT_NAME'),
-                cacert=os.environ.get('OS_CACERT'))
+        auth_url=os.environ.get('OS_AUTH_URL'),
+        username=os.environ.get('OS_USERNAME'),
+        password=os.environ.get('OS_PASSWORD'),
+        tenant_name=os.environ.get('OS_TENANT_NAME'),
+        cacert=os.environ.get('OS_CACERT'))
     return keystone_client
 
     return keystone_client
 
+
 def _get_heat_client():
     keystone = _get_keystone_client()
 def _get_heat_client():
     keystone = _get_keystone_client()
-    heat_endpoint = keystone.service_catalog.url_for(service_type='orchestration')
-    heat_client = HeatClient('1', endpoint=heat_endpoint, token=keystone.auth_token)
+    heat_endpoint = keystone.service_catalog.url_for(
+        service_type='orchestration')
+    heat_client = HeatClient(
+        '1',
+        endpoint=heat_endpoint,
+        token=keystone.auth_token)
     return heat_client
 
     return heat_client
 
+
 def _get_glance_client():
     keystone = _get_keystone_client()
 def _get_glance_client():
     keystone = _get_keystone_client()
-    glance_endpoint = keystone.service_catalog.url_for(service_type='image', endpoint_type='publicURL')
+    glance_endpoint = keystone.service_catalog.url_for(
+        service_type='image', endpoint_type='publicURL')
     return GlanceClient(glance_endpoint, token=keystone.auth_token)
 
     return GlanceClient(glance_endpoint, token=keystone.auth_token)
 
+
 def _get_nova_client():
     nova_client = NovaClient("2", os.environ.get('OS_USERNAME'),
 def _get_nova_client():
     nova_client = NovaClient("2", os.environ.get('OS_USERNAME'),
-                                  os.environ.get('OS_PASSWORD'),
-                                  os.environ.get('OS_TENANT_NAME'),
-                                  os.environ.get('OS_AUTH_URL'))
+                             os.environ.get('OS_PASSWORD'),
+                             os.environ.get('OS_TENANT_NAME'),
+                             os.environ.get('OS_AUTH_URL'))
     return nova_client
 
     return nova_client
 
+
 def _download_url(src_url, dest_dir):
     ''' Download a file to a destination path given a URL'''
     file_name = src_url.rsplit('/')[-1]
 def _download_url(src_url, dest_dir):
     ''' Download a file to a destination path given a URL'''
     file_name = src_url.rsplit('/')[-1]
@@ -76,21 +87,27 @@ def _download_url(src_url, dest_dir):
     return dest
 
 
     return dest
 
 
-def rubbos_stack_satisfy(name="bottlenecks_rubbos_stack", status="CREATE_COMPLETE"):
+def rubbos_stack_satisfy(
+        name="bottlenecks_rubbos_stack",
+        status="CREATE_COMPLETE"):
     heat = _get_heat_client()
     for stack in heat.stacks.list():
     heat = _get_heat_client()
     for stack in heat.stacks.list():
-        if status == None and stack.stack_name == name:
+        if status is None and stack.stack_name == name:
             # Found target stack
             print "Found stack, name=" + str(stack.stack_name)
             return True
             # Found target stack
             print "Found stack, name=" + str(stack.stack_name)
             return True
-        elif stack.stack_name == name and stack.stack_status==status:
+        elif stack.stack_name == name and stack.stack_status == status:
             print "Found stack, name=" + str(stack.stack_name) + ", status=" + str(stack.stack_status)
             return True
     return False
 
             print "Found stack, name=" + str(stack.stack_name) + ", status=" + str(stack.stack_status)
             return True
     return False
 
+
 def rubbos_env_prepare(template=None):
     print "========== Prepare rubbos environment =========="
 def rubbos_env_prepare(template=None):
     print "========== Prepare rubbos environment =========="
-    logger.info("Generate heat template for the testcase based on template '%s'." % template)
+    logger.info(
+        "Generate heat template for the testcase based on template '%s'." %
+        template)
+
 
 def rubbos_env_cleanup():
     print "========== Cleanup rubbos environment =========="
 
 def rubbos_env_cleanup():
     print "========== Cleanup rubbos environment =========="
@@ -119,25 +136,33 @@ def rubbos_env_cleanup():
             heat.stacks.delete(stack.id)
 
     timeInProgress = 0
             heat.stacks.delete(stack.id)
 
     timeInProgress = 0
-    while rubbos_stack_satisfy(name="bottlenecks_rubbos_stack", status=None) and timeInProgress < 60:
+    while rubbos_stack_satisfy(
+            name="bottlenecks_rubbos_stack",
+            status=None) and timeInProgress < 60:
         time.sleep(5)
         timeInProgress = timeInProgress + 5
 
         time.sleep(5)
         timeInProgress = timeInProgress + 5
 
-    if rubbos_stack_satisfy(name="bottlenecks_rubbos_stack", status=None) == True:
+    if rubbos_stack_satisfy(name="bottlenecks_rubbos_stack", status=None):
         print "Failed to clean the stack"
         return False
     else:
         return True
 
         print "Failed to clean the stack"
         return False
     else:
         return True
 
-def rubbos_create_images(imagefile=None, image_name="bottlenecks_rubbos_image"):
+
+def rubbos_create_images(
+        imagefile=None,
+        image_name="bottlenecks_rubbos_image"):
     print "========== Create rubbos image in OS =========="
 
     print "========== Create rubbos image in OS =========="
 
-    if imagefile == None:
-       print "imagefile not set/found"
-       return False
+    if imagefile is None:
+        print "imagefile not set/found"
+        return False
 
     glance = _get_glance_client()
 
     glance = _get_glance_client()
-    image = glance.images.create(name=image_name, disk_format="qcow2", container_format="bare")
+    image = glance.images.create(
+        name=image_name,
+        disk_format="qcow2",
+        container_format="bare")
     with open(imagefile) as fimage:
         glance.images.upload(image.id, fimage)
 
     with open(imagefile) as fimage:
         glance.images.upload(image.id, fimage)
 
@@ -149,50 +174,65 @@ def rubbos_create_images(imagefile=None, image_name="bottlenecks_rubbos_image"):
         timeInQueue = timeInQueue + 1
         img_status = glance.images.get(image.id).status
 
         timeInQueue = timeInQueue + 1
         img_status = glance.images.get(image.id).status
 
-    print "After %d seconds, the image's status is [%s]" %(timeInQueue, img_status)
+    print "After %d seconds, the image's status is [%s]" % (timeInQueue, img_status)
     return True if img_status == "active" else False
 
     return True if img_status == "active" else False
 
+
 def rubbos_create_keypairs(key_path, name="bottlenecks_rubbos_keypair"):
     print "========== Add rubbos keypairs in OS =========="
     nova = _get_nova_client()
     with open(key_path) as pkey:
         nova.keypairs.create(name=name, public_key=pkey.read())
 
 def rubbos_create_keypairs(key_path, name="bottlenecks_rubbos_keypair"):
     print "========== Add rubbos keypairs in OS =========="
     nova = _get_nova_client()
     with open(key_path) as pkey:
         nova.keypairs.create(name=name, public_key=pkey.read())
 
-def rubbos_create_flavors(name="bottlenecks_rubbos_flavor", ram=4096, vcpus=2, disk=10):
+
+def rubbos_create_flavors(
+        name="bottlenecks_rubbos_flavor",
+        ram=4096,
+        vcpus=2,
+        disk=10):
     print "========== Create rubbos flavors in OS =========="
     nova = _get_nova_client()
     nova.flavors.create(name=name, ram=ram, vcpus=vcpus, disk=disk)
 
     print "========== Create rubbos flavors in OS =========="
     nova = _get_nova_client()
     nova.flavors.create(name=name, ram=ram, vcpus=vcpus, disk=disk)
 
-def rubbos_create_instances(template_file, rubbos_parameters=None, stack_name="bottlenecks_rubbos_stack"):
+
+def rubbos_create_instances(
+        template_file,
+        rubbos_parameters=None,
+        stack_name="bottlenecks_rubbos_stack"):
     print "========== Create rubbos instances =========="
     heat = _get_heat_client()
 
     with open(template_file) as template:
     print "========== Create rubbos instances =========="
     heat = _get_heat_client()
 
     with open(template_file) as template:
-        stack = heat.stacks.create(stack_name=stack_name, template=template.read(), parameters=rubbos_parameters)
+        stack = heat.stacks.create(
+            stack_name=stack_name,
+            template=template.read(),
+            parameters=rubbos_parameters)
 
     stack_id = stack['stack']['id']
     stack_status = heat.stacks.get(stack_id).stack_status
 
     print "Created stack, id=" + str(stack_id) + ", status=" + str(stack_status)
 
 
     stack_id = stack['stack']['id']
     stack_status = heat.stacks.get(stack_id).stack_status
 
     print "Created stack, id=" + str(stack_id) + ", status=" + str(stack_status)
 
-    timeInProgress= 0
+    timeInProgress = 0
     while stack_status == "CREATE_IN_PROGRESS" and timeInProgress < 150:
     while stack_status == "CREATE_IN_PROGRESS" and timeInProgress < 150:
-        print "  stack's status: %s, after %d seconds" %(stack_status, timeInProgress)
+        print "  stack's status: %s, after %d seconds" % (stack_status, timeInProgress)
         time.sleep(5)
         timeInProgress = timeInProgress + 5
         stack_status = heat.stacks.get(stack_id).stack_status
 
         time.sleep(5)
         timeInProgress = timeInProgress + 5
         stack_status = heat.stacks.get(stack_id).stack_status
 
-    print "After %d seconds, the stack's status is [%s]" %(timeInProgress, stack_status)
+    print "After %d seconds, the stack's status is [%s]" % (timeInProgress, stack_status)
     return True if stack_status == "CREATE_COMPLETE" else False
 
     return True if stack_status == "CREATE_COMPLETE" else False
 
+
 def get_instances(nova_client):
     try:
         instances = nova_client.servers.list(search_opts={'all_tenants': 1})
         return instances
 def get_instances(nova_client):
     try:
         instances = nova_client.servers.list(search_opts={'all_tenants': 1})
         return instances
-    except Exception, e:
+    except Exception as e:
         print "Error [get_instances(nova_client)]:", e
         return None
 
         print "Error [get_instances(nova_client)]:", e
         return None
 
+
 def reboot_instances():
     print("========== reboot instances ==========")
     nova = _get_nova_client()
 def reboot_instances():
     print("========== reboot instances ==========")
     nova = _get_nova_client()
@@ -205,12 +245,13 @@ def reboot_instances():
             instance.reboot()
     print("Finish reboot all rubbos servers.")
 
             instance.reboot()
     print("Finish reboot all rubbos servers.")
 
+
 def rubbos_run():
     print "========== run rubbos ==========="
 
     nova = _get_nova_client()
     instances = get_instances(nova)
 def rubbos_run():
     print "========== run rubbos ==========="
 
     nova = _get_nova_client()
     instances = get_instances(nova)
-    if instances == None:
+    if instances is None:
         print "Found *None* instances, exit rubbos_run()!"
         return False
 
         print "Found *None* instances, exit rubbos_run()!"
         return False
 
@@ -223,57 +264,83 @@ def rubbos_run():
     database_servers = ""
     for instance in instances:
         name = getattr(instance, 'name')
     database_servers = ""
     for instance in instances:
         name = getattr(instance, 'name')
-        private_ip = [ x['addr'] for x in getattr(instance, 'addresses').itervalues().next() if x['OS-EXT-IPS:type'] == 'fixed']
-        public_ip =  [ x['addr'] for x in getattr(instance, 'addresses').itervalues().next() if x['OS-EXT-IPS:type'] == 'floating']
+        private_ip = [
+            x['addr'] for x in getattr(
+                instance,
+                'addresses').itervalues().next() if x['OS-EXT-IPS:type'] == 'fixed']
+        public_ip = [
+            x['addr'] for x in getattr(
+                instance,
+                'addresses').itervalues().next() if x['OS-EXT-IPS:type'] == 'floating']
 
         if name.find("rubbos-control") >= 0:
             control_public_ip = public_ip[0]
 
         if name.find("rubbos-control") >= 0:
             control_public_ip = public_ip[0]
-            control_server = str(name) + ':' + public_ip[0] + ':' + private_ip[0]
+            control_server = str(name) + ':' + \
+                public_ip[0] + ':' + private_ip[0]
         if name.find("rubbos-client") >= 0:
         if name.find("rubbos-client") >= 0:
-            client_servers = client_servers + str(name)+':'+private_ip[0] + ","
+            client_servers = client_servers + \
+                str(name) + ':' + private_ip[0] + ","
         if name.find("rubbos-httpd") >= 0:
         if name.find("rubbos-httpd") >= 0:
-            web_servers = web_servers + str(name)+':'+private_ip[0] + ","
+            web_servers = web_servers + str(name) + ':' + private_ip[0] + ","
         if name.find("rubbos-tomcat") >= 0:
             app_servers = app_servers + str(name) + ':' + private_ip[0] + ","
         if name.find("rubbos-cjdbc") >= 0:
             cjdbc_controller = str(name) + ':' + private_ip[0]
         if name.find("rubbos-mysql") >= 0:
         if name.find("rubbos-tomcat") >= 0:
             app_servers = app_servers + str(name) + ':' + private_ip[0] + ","
         if name.find("rubbos-cjdbc") >= 0:
             cjdbc_controller = str(name) + ':' + private_ip[0]
         if name.find("rubbos-mysql") >= 0:
-            database_servers = database_servers + str(name) + ':' + private_ip[0] + ","
+            database_servers = database_servers + \
+                str(name) + ':' + private_ip[0] + ","
 
 
-    client_servers = client_servers[0:len(client_servers)-1]
-    web_servers = web_servers[0:len(web_servers)-1]
-    app_servers = app_servers[0:len(app_servers)-1]
-    database_servers = database_servers[0:len(database_servers)-1]
+    client_servers = client_servers[0:len(client_servers) - 1]
+    web_servers = web_servers[0:len(web_servers) - 1]
+    app_servers = app_servers[0:len(app_servers) - 1]
+    database_servers = database_servers[0:len(database_servers) - 1]
     print "control_server:    %s" % control_server
     print "client_servers:    %s" % client_servers
     print "web_servers:       %s" % web_servers
     print "app_servers:       %s" % app_servers
     print "cjdbc_controller:  %s" % cjdbc_controller
     print "database_servers:  %s" % database_servers
     print "control_server:    %s" % control_server
     print "client_servers:    %s" % client_servers
     print "web_servers:       %s" % web_servers
     print "app_servers:       %s" % app_servers
     print "cjdbc_controller:  %s" % cjdbc_controller
     print "database_servers:  %s" % database_servers
-    with open(Bottlenecks_repo_dir+"/testsuites/rubbos/puppet_manifests/internal/rubbos.conf") as temp_f, open('rubbos.conf', 'w') as new_f:
+    with open(Bottlenecks_repo_dir + "/testsuites/rubbos/puppet_manifests/internal/rubbos.conf") as temp_f, open('rubbos.conf', 'w') as new_f:
         for line in temp_f.readlines():
         for line in temp_f.readlines():
-            if line.find("REPLACED_CONTROLLER") >= 0 :
-                new_f.write( line.replace("REPLACED_CONTROLLER", control_server) )
+            if line.find("REPLACED_CONTROLLER") >= 0:
+                new_f.write(
+                    line.replace(
+                        "REPLACED_CONTROLLER",
+                        control_server))
             elif line.find("REPLACED_CLIENT_SERVERS") >= 0:
             elif line.find("REPLACED_CLIENT_SERVERS") >= 0:
-                new_f.write( line.replace("REPLACED_CLIENT_SERVERS", client_servers) )
+                new_f.write(
+                    line.replace(
+                        "REPLACED_CLIENT_SERVERS",
+                        client_servers))
             elif line.find("REPLACED_WEB_SERVERS") >= 0:
             elif line.find("REPLACED_WEB_SERVERS") >= 0:
-                new_f.write( line.replace("REPLACED_WEB_SERVERS", web_servers) )
+                new_f.write(line.replace("REPLACED_WEB_SERVERS", web_servers))
             elif line.find("REPLACED_APP_SERVERS") >= 0:
             elif line.find("REPLACED_APP_SERVERS") >= 0:
-                new_f.write( line.replace("REPLACED_APP_SERVERS", app_servers) )
+                new_f.write(line.replace("REPLACED_APP_SERVERS", app_servers))
             elif line.find("REPLACED_CJDBC_CONTROLLER") >= 0:
             elif line.find("REPLACED_CJDBC_CONTROLLER") >= 0:
-                new_f.write( line.replace("REPLACED_CJDBC_CONTROLLER", cjdbc_controller) )
+                new_f.write(
+                    line.replace(
+                        "REPLACED_CJDBC_CONTROLLER",
+                        cjdbc_controller))
             elif line.find("REPLACED_DB_SERVERS") >= 0:
             elif line.find("REPLACED_DB_SERVERS") >= 0:
-                new_f.write( line.replace("REPLACED_DB_SERVERS", database_servers) )
+                new_f.write(
+                    line.replace(
+                        "REPLACED_DB_SERVERS",
+                        database_servers))
             elif line.find("REPLACED_CLIENTS_PER_NODE") >= 0:
             elif line.find("REPLACED_CLIENTS_PER_NODE") >= 0:
-                new_f.write( line.replace("REPLACED_CLIENTS_PER_NODE", "200 400 800 1600 3200") )
+                new_f.write(
+                    line.replace(
+                        "REPLACED_CLIENTS_PER_NODE",
+                        "200 400 800 1600 3200"))
             else:
                 new_f.write(line)
     if os.path.exists("rubbos.conf") == False:
         return False
 
             else:
                 new_f.write(line)
     if os.path.exists("rubbos.conf") == False:
         return False
 
-    cmd = "sudo chmod 0600 " + Bottlenecks_repo_dir + "/utils/infra_setup/bottlenecks_key/bottlenecks_key"
+    cmd = "sudo chmod 0600 " + Bottlenecks_repo_dir + \
+        "/utils/infra_setup/bottlenecks_key/bottlenecks_key"
     subprocess.call(cmd, shell=True)
     subprocess.call(cmd, shell=True)
-    ssh_args = "-o StrictHostKeyChecking=no -o BatchMode=yes -i " + Bottlenecks_repo_dir + "/utils/infra_setup/bottlenecks_key/bottlenecks_key "
+    ssh_args = "-o StrictHostKeyChecking=no -o BatchMode=yes -i " + \
+        Bottlenecks_repo_dir + "/utils/infra_setup/bottlenecks_key/bottlenecks_key "
 
     print "############### Test #################"
     cmd = 'ssh-keygen -f "/root/.ssh/known_hosts" -R ' + control_public_ip
 
     print "############### Test #################"
     cmd = 'ssh-keygen -f "/root/.ssh/known_hosts" -R ' + control_public_ip
@@ -292,19 +359,23 @@ def rubbos_run():
     subprocess.call("nova list", shell=True)
     print "############### Test #################"
 
     subprocess.call("nova list", shell=True)
     print "############### Test #################"
 
-    cmd = "scp " + ssh_args + "rubbos.conf ubuntu@" + control_public_ip + ":/home/ubuntu/"
+    cmd = "scp " + ssh_args + "rubbos.conf ubuntu@" + \
+        control_public_ip + ":/home/ubuntu/"
     print "Exec shell: " + cmd
     subprocess.call(cmd, shell=True)
 
     print "Exec shell: " + cmd
     subprocess.call(cmd, shell=True)
 
-    cmd = "scp " + ssh_args + Bottlenecks_repo_dir + "/testsuites/rubbos/puppet_manifests/internal/run_rubbos_internal.sh ubuntu@" + control_public_ip + ":/home/ubuntu/"
+    cmd = "scp " + ssh_args + Bottlenecks_repo_dir + \
+        "/testsuites/rubbos/puppet_manifests/internal/run_rubbos_internal.sh ubuntu@" + control_public_ip + ":/home/ubuntu/"
     print "Exec shell: " + cmd
     subprocess.call(cmd, shell=True)
 
     # call remote run_rubbos_internal.sh
     print "Exec shell: " + cmd
     subprocess.call(cmd, shell=True)
 
     # call remote run_rubbos_internal.sh
-    cmd = "ssh " + ssh_args + " ubuntu@" + control_public_ip + ' "sudo /home/ubuntu/run_rubbos_internal.sh /home/ubuntu/rubbos.conf /home/ubuntu/btnks-results" '
+    cmd = "ssh " + ssh_args + " ubuntu@" + control_public_ip + \
+        ' "sudo /home/ubuntu/run_rubbos_internal.sh /home/ubuntu/rubbos.conf /home/ubuntu/btnks-results" '
     print "Exec shell: " + cmd
     subprocess.call(cmd, shell=True)
     print "Exec shell: " + cmd
     subprocess.call(cmd, shell=True)
-    cmd = "scp " + ssh_args + " ubuntu@" + control_public_ip + ":/home/ubuntu/btnks-results/rubbos.out ./rubbos.out"
+    cmd = "scp " + ssh_args + " ubuntu@" + control_public_ip + \
+        ":/home/ubuntu/btnks-results/rubbos.out ./rubbos.out"
     print "Exec shell: " + cmd
     subprocess.call(cmd, shell=True)
     if os.path.exists("rubbos.out") == False:
     print "Exec shell: " + cmd
     subprocess.call(cmd, shell=True)
     if os.path.exists("rubbos.out") == False:
@@ -318,36 +389,40 @@ def rubbos_run():
             print line
     return True
 
             print line
     return True
 
+
 def main():
     global Heat_template
     global Bottlenecks_repo_dir
     global image_url
 def main():
     global Heat_template
     global Bottlenecks_repo_dir
     global image_url
-    Bottlenecks_repo_dir = "/home/opnfv/bottlenecks"      # same in Dockerfile, docker directory
+    # same in Dockerfile, docker directory
+    Bottlenecks_repo_dir = "/home/opnfv/bottlenecks"
 
     image_url = 'http://artifacts.opnfv.org/bottlenecks/rubbos/trusty-server-cloudimg-amd64-btnks.img'
     #image_url = 'http://artifacts.opnfv.org/bottlenecks/rubbos/bottlenecks-trusty-server.img'
 
     if not (args.conf):
 
     image_url = 'http://artifacts.opnfv.org/bottlenecks/rubbos/trusty-server-cloudimg-amd64-btnks.img'
     #image_url = 'http://artifacts.opnfv.org/bottlenecks/rubbos/bottlenecks-trusty-server.img'
 
     if not (args.conf):
-       logger.error("Configuration files are not set for testcase")
-       exit(-1)
+        logger.error("Configuration files are not set for testcase")
+        exit(-1)
     else:
     else:
-       Heat_template = args.conf
-
-    master_user_data=""
-    agent_user_data=""
-    with open(Bottlenecks_repo_dir+"/utils/infra_setup/user_data/p-master-user-data") as f:
-        master_user_data=f.read()
-    master_user_data = master_user_data.replace('REPLACED_PUPPET_MASTER_SERVER','rubbos-control')
-    with open(Bottlenecks_repo_dir+"/utils/infra_setup/user_data/p-agent-user-data") as f:
-        agent_user_data=f.read()
-    agent_user_data = agent_user_data.replace('REPLACED_PUPPET_MASTER_SERVER','rubbos-control')
-
-    parameters={'image': 'bottlenecks_rubbos_image',
-                'key_name': 'bottlenecks_rubbos_keypair',
-                'flavor': 'bottlenecks_rubbos_flavor',
-                'public_net': os.environ.get('EXTERNAL_NET'),
-                'master_user_data': master_user_data,
-                'agent_user_data': agent_user_data }
+        Heat_template = args.conf
+
+    master_user_data = ""
+    agent_user_data = ""
+    with open(Bottlenecks_repo_dir + "/utils/infra_setup/user_data/p-master-user-data") as f:
+        master_user_data = f.read()
+    master_user_data = master_user_data.replace(
+        'REPLACED_PUPPET_MASTER_SERVER', 'rubbos-control')
+    with open(Bottlenecks_repo_dir + "/utils/infra_setup/user_data/p-agent-user-data") as f:
+        agent_user_data = f.read()
+    agent_user_data = agent_user_data.replace(
+        'REPLACED_PUPPET_MASTER_SERVER', 'rubbos-control')
+
+    parameters = {'image': 'bottlenecks_rubbos_image',
+                  'key_name': 'bottlenecks_rubbos_keypair',
+                  'flavor': 'bottlenecks_rubbos_flavor',
+                  'public_net': os.environ.get('EXTERNAL_NET'),
+                  'master_user_data': master_user_data,
+                  'agent_user_data': agent_user_data}
 
     print "Heat_template_file: " + Heat_template
     print "parameters:\n" + str(parameters)
 
     print "Heat_template_file: " + Heat_template
     print "parameters:\n" + str(parameters)
@@ -360,31 +435,35 @@ def main():
 
     dest_dir = "/tmp"
     image_file = _download_url(image_url, dest_dir)
 
     dest_dir = "/tmp"
     image_file = _download_url(image_url, dest_dir)
-    if image_file == None:
-       print "error with downloading image(s)"
-       exit(-1)
+    if image_file is None:
+        print "error with downloading image(s)"
+        exit(-1)
 
     image_created = rubbos_create_images(imagefile=image_file)
 
     image_created = rubbos_create_images(imagefile=image_file)
-    keyPath = Bottlenecks_repo_dir + "/utils/infra_setup/bottlenecks_key/bottlenecks_key.pub"
+    keyPath = Bottlenecks_repo_dir + \
+        "/utils/infra_setup/bottlenecks_key/bottlenecks_key.pub"
     rubbos_create_keypairs(key_path=keyPath)
     rubbos_create_flavors()
 
     rubbos_create_keypairs(key_path=keyPath)
     rubbos_create_flavors()
 
-    if image_created == True:
-        stack_created = rubbos_create_instances(template_file=Heat_template, rubbos_parameters=parameters, stack_name="bottlenecks_rubbos_stack")
+    if image_created:
+        stack_created = rubbos_create_instances(
+            template_file=Heat_template,
+            rubbos_parameters=parameters,
+            stack_name="bottlenecks_rubbos_stack")
     else:
         print "Cannot create instances, as Failed to create image(s)."
     else:
         print "Cannot create instances, as Failed to create image(s)."
-        exit (-1)
+        exit(-1)
 
     print "Wait 600 seconds after stack creation..."
     time.sleep(600)
 
 
     print "Wait 600 seconds after stack creation..."
     time.sleep(600)
 
-    #reboot_instances()
-    #time.sleep(180)
+    # reboot_instances()
+    # time.sleep(180)
 
     rubbos_run()
     time.sleep(30)
 
     rubbos_env_cleanup()
 
 
     rubbos_run()
     time.sleep(30)
 
     rubbos_env_cleanup()
 
-if __name__=='__main__':
+if __name__ == '__main__':
     main()
     main()
index 1aed759..f0018e7 100755 (executable)
@@ -24,9 +24,11 @@ from novaclient.client import Client as NovaClient
 # parser for configuration files in each test case
 # ------------------------------------------------------
 parser = argparse.ArgumentParser()
 # parser for configuration files in each test case
 # ------------------------------------------------------
 parser = argparse.ArgumentParser()
-parser.add_argument("-c", "--conf",
-                    help="configuration files for the testcase, in yaml format",
-                    default="/home/opnfv/bottlenecks/testsuites/vstf/testcase_cfg/vstf_Tu1.yaml")
+parser.add_argument(
+    "-c",
+    "--conf",
+    help="configuration files for the testcase, in yaml format",
+    default="/home/opnfv/bottlenecks/testsuites/vstf/testcase_cfg/vstf_Tu1.yaml")
 args = parser.parse_args()
 
 #--------------------------------------------------
 args = parser.parse_args()
 
 #--------------------------------------------------
@@ -37,31 +39,40 @@ logger = logging.getLogger(__name__)
 
 def _get_keystone_client():
     keystone_client = KeystoneClient(
 
 def _get_keystone_client():
     keystone_client = KeystoneClient(
-                auth_url=os.environ.get('OS_AUTH_URL'),
-                username=os.environ.get('OS_USERNAME'),
-                password=os.environ.get('OS_PASSWORD'),
-                tenant_name=os.environ.get('OS_TENANT_NAME'),
-                cacert=os.environ.get('OS_CACERT'))
+        auth_url=os.environ.get('OS_AUTH_URL'),
+        username=os.environ.get('OS_USERNAME'),
+        password=os.environ.get('OS_PASSWORD'),
+        tenant_name=os.environ.get('OS_TENANT_NAME'),
+        cacert=os.environ.get('OS_CACERT'))
     return keystone_client
 
     return keystone_client
 
+
 def _get_heat_client():
     keystone = _get_keystone_client()
 def _get_heat_client():
     keystone = _get_keystone_client()
-    heat_endpoint = keystone.service_catalog.url_for(service_type='orchestration')
-    heat_client = HeatClient('1', endpoint=heat_endpoint, token=keystone.auth_token)
+    heat_endpoint = keystone.service_catalog.url_for(
+        service_type='orchestration')
+    heat_client = HeatClient(
+        '1',
+        endpoint=heat_endpoint,
+        token=keystone.auth_token)
     return heat_client
 
     return heat_client
 
+
 def _get_glance_client():
     keystone = _get_keystone_client()
 def _get_glance_client():
     keystone = _get_keystone_client()
-    glance_endpoint = keystone.service_catalog.url_for(service_type='image', endpoint_type='publicURL')
+    glance_endpoint = keystone.service_catalog.url_for(
+        service_type='image', endpoint_type='publicURL')
     return GlanceClient(glance_endpoint, token=keystone.auth_token)
 
     return GlanceClient(glance_endpoint, token=keystone.auth_token)
 
+
 def _get_nova_client():
     nova_client = NovaClient("2", os.environ.get('OS_USERNAME'),
 def _get_nova_client():
     nova_client = NovaClient("2", os.environ.get('OS_USERNAME'),
-                                  os.environ.get('OS_PASSWORD'),
-                                  os.environ.get('OS_TENANT_NAME'),
-                                  os.environ.get('OS_AUTH_URL'))
+                             os.environ.get('OS_PASSWORD'),
+                             os.environ.get('OS_TENANT_NAME'),
+                             os.environ.get('OS_AUTH_URL'))
     return nova_client
 
     return nova_client
 
+
 def _download_url(src_url, dest_dir):
     ''' Download a file to a destination path given a URL'''
     file_name = src_url.rsplit('/')[-1]
 def _download_url(src_url, dest_dir):
     ''' Download a file to a destination path given a URL'''
     file_name = src_url.rsplit('/')[-1]
@@ -75,22 +86,27 @@ def _download_url(src_url, dest_dir):
         shutil.copyfileobj(response, f)
     return dest
 
         shutil.copyfileobj(response, f)
     return dest
 
-def vstf_stack_satisfy(name="bottlenecks_vstf_stack", status="CREATE_COMPLETE"):
+
+def vstf_stack_satisfy(
+        name="bottlenecks_vstf_stack",
+        status="CREATE_COMPLETE"):
     heat = _get_heat_client()
     for stack in heat.stacks.list():
     heat = _get_heat_client()
     for stack in heat.stacks.list():
-        if status == None and stack.stack_name == name:
+        if status is None and stack.stack_name == name:
             # Found target stack
             print "Found stack, name=" + str(stack.stack_name)
             return True
             # Found target stack
             print "Found stack, name=" + str(stack.stack_name)
             return True
-        elif stack.stack_name == name and stack.stack_status==status:
+        elif stack.stack_name == name and stack.stack_status == status:
             print "Found stack, name=" + str(stack.stack_name) + ", status=" + str(stack.stack_status)
             return True
     return False
 
             print "Found stack, name=" + str(stack.stack_name) + ", status=" + str(stack.stack_status)
             return True
     return False
 
+
 def vstf_env_prepare(template=None):
     print "========== Prepare vstf environment =========="
     logger.info("env preparation for testcase.")
 
 def vstf_env_prepare(template=None):
     print "========== Prepare vstf environment =========="
     logger.info("env preparation for testcase.")
 
+
 def vstf_env_cleanup():
     print "========== Cleanup vstf environment =========="
     glance = _get_glance_client()
 def vstf_env_cleanup():
     print "========== Cleanup vstf environment =========="
     glance = _get_glance_client()
@@ -118,25 +134,31 @@ def vstf_env_cleanup():
             heat.stacks.delete(stack.id)
 
     timeInProgress = 0
             heat.stacks.delete(stack.id)
 
     timeInProgress = 0
-    while vstf_stack_satisfy(name="bottlenecks_vstf_stack", status=None) and timeInProgress < 60:
+    while vstf_stack_satisfy(
+            name="bottlenecks_vstf_stack",
+            status=None) and timeInProgress < 60:
         time.sleep(5)
         timeInProgress = timeInProgress + 5
 
         time.sleep(5)
         timeInProgress = timeInProgress + 5
 
-    if vstf_stack_satisfy(name="bottlenecks_vstf_stack", status=None) == True:
+    if vstf_stack_satisfy(name="bottlenecks_vstf_stack", status=None):
         print "Failed to clean the stack"
         return False
     else:
         return True
 
         print "Failed to clean the stack"
         return False
     else:
         return True
 
+
 def vstf_create_images(imagefile=None, image_name="bottlenecks_vstf_image"):
     print "========== Create vstf image in OS =========="
 
 def vstf_create_images(imagefile=None, image_name="bottlenecks_vstf_image"):
     print "========== Create vstf image in OS =========="
 
-    if imagefile == None:
-       print "imagefile not set/found"
-       return False
+    if imagefile is None:
+        print "imagefile not set/found"
+        return False
 
     glance = _get_glance_client()
 
     glance = _get_glance_client()
-    image = glance.images.create(name=image_name, disk_format="qcow2", container_format="bare")
+    image = glance.images.create(
+        name=image_name,
+        disk_format="qcow2",
+        container_format="bare")
     with open(imagefile) as fimage:
         glance.images.upload(image.id, fimage)
 
     with open(imagefile) as fimage:
         glance.images.upload(image.id, fimage)
 
@@ -148,50 +170,65 @@ def vstf_create_images(imagefile=None, image_name="bottlenecks_vstf_image"):
         timeInQueue = timeInQueue + 1
         img_status = glance.images.get(image.id).status
 
         timeInQueue = timeInQueue + 1
         img_status = glance.images.get(image.id).status
 
-    print "After %d seconds, the image's status is [%s]" %(timeInQueue, img_status)
+    print "After %d seconds, the image's status is [%s]" % (timeInQueue, img_status)
     return True if img_status == "active" else False
 
     return True if img_status == "active" else False
 
+
 def vstf_create_keypairs(key_path, name="bottlenecks_vstf_keypair"):
     print "========== Add vstf keypairs in OS =========="
     nova = _get_nova_client()
     with open(key_path) as pkey:
         nova.keypairs.create(name=name, public_key=pkey.read())
 
 def vstf_create_keypairs(key_path, name="bottlenecks_vstf_keypair"):
     print "========== Add vstf keypairs in OS =========="
     nova = _get_nova_client()
     with open(key_path) as pkey:
         nova.keypairs.create(name=name, public_key=pkey.read())
 
-def vstf_create_flavors(name="bottlenecks_vstf_flavor", ram=4096, vcpus=2, disk=10):
+
+def vstf_create_flavors(
+        name="bottlenecks_vstf_flavor",
+        ram=4096,
+        vcpus=2,
+        disk=10):
     print "========== Create vstf flavors in OS =========="
     nova = _get_nova_client()
     nova.flavors.create(name=name, ram=ram, vcpus=vcpus, disk=disk)
 
     print "========== Create vstf flavors in OS =========="
     nova = _get_nova_client()
     nova.flavors.create(name=name, ram=ram, vcpus=vcpus, disk=disk)
 
-def vstf_create_instances(template_file, vstf_parameters=None, stack_name="bottlenecks_vstf_stack"):
+
+def vstf_create_instances(
+        template_file,
+        vstf_parameters=None,
+        stack_name="bottlenecks_vstf_stack"):
     print "========== Create vstf instances =========="
     heat = _get_heat_client()
 
     with open(template_file) as template:
     print "========== Create vstf instances =========="
     heat = _get_heat_client()
 
     with open(template_file) as template:
-        stack = heat.stacks.create(stack_name=stack_name, template=template.read(), parameters=vstf_parameters)
+        stack = heat.stacks.create(
+            stack_name=stack_name,
+            template=template.read(),
+            parameters=vstf_parameters)
 
     stack_id = stack['stack']['id']
     stack_status = heat.stacks.get(stack_id).stack_status
 
     print "Created stack, id=" + str(stack_id) + ", status=" + str(stack_status)
 
 
     stack_id = stack['stack']['id']
     stack_status = heat.stacks.get(stack_id).stack_status
 
     print "Created stack, id=" + str(stack_id) + ", status=" + str(stack_status)
 
-    timeInProgress= 0
+    timeInProgress = 0
     while stack_status == "CREATE_IN_PROGRESS" and timeInProgress < 150:
     while stack_status == "CREATE_IN_PROGRESS" and timeInProgress < 150:
-        print "  stack's status: %s, after %d seconds" %(stack_status, timeInProgress)
+        print "  stack's status: %s, after %d seconds" % (stack_status, timeInProgress)
         time.sleep(5)
         timeInProgress = timeInProgress + 5
         stack_status = heat.stacks.get(stack_id).stack_status
 
         time.sleep(5)
         timeInProgress = timeInProgress + 5
         stack_status = heat.stacks.get(stack_id).stack_status
 
-    print "After %d seconds, the stack's status is [%s]" %(timeInProgress, stack_status)
+    print "After %d seconds, the stack's status is [%s]" % (timeInProgress, stack_status)
     return True if stack_status == "CREATE_COMPLETE" else False
 
     return True if stack_status == "CREATE_COMPLETE" else False
 
+
 def get_instances(nova_client):
     try:
         instances = nova_client.servers.list(search_opts={'all_tenants': 1})
         return instances
 def get_instances(nova_client):
     try:
         instances = nova_client.servers.list(search_opts={'all_tenants': 1})
         return instances
-    except Exception, e:
+    except Exception as e:
         print "Error [get_instances(nova_client)]:", e
         return None
 
         print "Error [get_instances(nova_client)]:", e
         return None
 
+
 def vstf_run(launch_file=None, test_file=None):
     print "================run vstf==============="
 
 def vstf_run(launch_file=None, test_file=None):
     print "================run vstf==============="
 
@@ -204,12 +241,12 @@ def vstf_run(launch_file=None, test_file=None):
     subprocess.call("nova list", shell=True)
     time.sleep(100)
     instances = get_instances(nova)
     subprocess.call("nova list", shell=True)
     time.sleep(100)
     instances = get_instances(nova)
-    if instances == None:
+    if instances is None:
         print "Found *None* instances, exit vstf_run()!"
         return False
         print "Found *None* instances, exit vstf_run()!"
         return False
-    if launch_file == None or test_file == None:
-         print "Error, vstf launch/test file not given"
-         return False
+    if launch_file is None or test_file is None:
+        print "Error, vstf launch/test file not given"
+        return False
     cmd = "bash " + launch_file
     subprocess.call(cmd, shell=True)
     time.sleep(50)
     cmd = "bash " + launch_file
     subprocess.call(cmd, shell=True)
     time.sleep(50)
@@ -217,66 +254,80 @@ def vstf_run(launch_file=None, test_file=None):
     subprocess.call(cmd, shell=True)
     time.sleep(20)
 
     subprocess.call(cmd, shell=True)
     time.sleep(20)
 
+
 def main():
 
 def main():
 
-    Bottlenecks_repo_dir = "/home/opnfv/bottlenecks"      # same in Dockerfile, docker directory
-    Heat_template = Bottlenecks_repo_dir + "/testsuites/vstf/testcase_cfg/vstf_heat_template.yaml"
+    # same in Dockerfile, docker directory
+    Bottlenecks_repo_dir = "/home/opnfv/bottlenecks"
+    Heat_template = Bottlenecks_repo_dir + \
+        "/testsuites/vstf/testcase_cfg/vstf_heat_template.yaml"
     manager_image_url = 'http://artifacts.opnfv.org/bottlenecks/vstf-manager-new.img'
     agent_image_url = 'http://artifacts.opnfv.org/bottlenecks/vstf-agent-new.img'
 
     manager_image_url = 'http://artifacts.opnfv.org/bottlenecks/vstf-manager-new.img'
     agent_image_url = 'http://artifacts.opnfv.org/bottlenecks/vstf-agent-new.img'
 
-    #vstf_env_prepare(testcase_cfg)
+    # vstf_env_prepare(testcase_cfg)
     vstf_env_cleanup()
 
     dest_dir = "/tmp"
     manager_file = _download_url(manager_image_url, dest_dir)
     vstf_env_cleanup()
 
     dest_dir = "/tmp"
     manager_file = _download_url(manager_image_url, dest_dir)
-    if manager_file == None:
-       print "error with downloading image(s)"
-       exit(-1)
+    if manager_file is None:
+        print "error with downloading image(s)"
+        exit(-1)
     agent_file = _download_url(agent_image_url, dest_dir)
     agent_file = _download_url(agent_image_url, dest_dir)
-    if agent_file == None:
-       print "error with downloading image(s)"
-       exit(-1)
+    if agent_file is None:
+        print "error with downloading image(s)"
+        exit(-1)
 
 
-    #TO DO:the parameters are all used defaults here, it should be changed depends on what it is really named
-    parameters={'key_name': 'bottlenecks_vstf_keypair',
-                'flavor': 'bottlenecks_vstf_flavor',
-                'public_net': os.environ.get('EXTERNAL_NET')}
+    # TO DO:the parameters are all used defaults here, it should be changed
+    # depends on what it is really named
+    parameters = {'key_name': 'bottlenecks_vstf_keypair',
+                  'flavor': 'bottlenecks_vstf_flavor',
+                  'public_net': os.environ.get('EXTERNAL_NET')}
 
     print "Heat_template_file: " + Heat_template
     print "parameters:\n" + str(parameters)
 
     if not (args.conf):
 
     print "Heat_template_file: " + Heat_template
     print "parameters:\n" + str(parameters)
 
     if not (args.conf):
-       logger.error("Configuration files are not set for testcase")
-       exit(-1)
+        logger.error("Configuration files are not set for testcase")
+        exit(-1)
     else:
     else:
-       testcase_cfg = args.conf
+        testcase_cfg = args.conf
 
     manager_image_created = False
     tester_image_created = False
     target_image_created = False
     stack_created = False
 
 
     manager_image_created = False
     tester_image_created = False
     target_image_created = False
     stack_created = False
 
-    manager_image_created = vstf_create_images(imagefile=manager_file, image_name="bottlenecks_vstf_manager")
-    tester_image_created = vstf_create_images(imagefile=agent_file, image_name="bottlenecks_vstf_tester")
-    target_image_created = vstf_create_images(imagefile=agent_file, image_name="bottlenecks_vstf_target")
-    keyPath = Bottlenecks_repo_dir + "/utils/infra_setup/bottlenecks_key/bottlenecks_key.pub"
+    manager_image_created = vstf_create_images(
+        imagefile=manager_file,
+        image_name="bottlenecks_vstf_manager")
+    tester_image_created = vstf_create_images(
+        imagefile=agent_file, image_name="bottlenecks_vstf_tester")
+    target_image_created = vstf_create_images(
+        imagefile=agent_file, image_name="bottlenecks_vstf_target")
+    keyPath = Bottlenecks_repo_dir + \
+        "/utils/infra_setup/bottlenecks_key/bottlenecks_key.pub"
     vstf_create_keypairs(key_path=keyPath)
     vstf_create_flavors()
 
     vstf_create_keypairs(key_path=keyPath)
     vstf_create_flavors()
 
-    if manager_image_created == True and tester_image_created == True and target_image_created == True:
-        stack_created = vstf_create_instances(template_file=Heat_template, vstf_parameters=parameters, stack_name="bottlenecks_vstf_stack")
+    if manager_image_created and tester_image_created and target_image_created:
+        stack_created = vstf_create_instances(
+            template_file=Heat_template,
+            vstf_parameters=parameters,
+            stack_name="bottlenecks_vstf_stack")
     else:
         print "Cannot create instances, as Failed to create image(s)."
     else:
         print "Cannot create instances, as Failed to create image(s)."
-        exit (-1)
+        exit(-1)
 
     print "Wait 300 seconds after stack creation..."
     time.sleep(300)
 
 
     print "Wait 300 seconds after stack creation..."
     time.sleep(300)
 
-    launchfile = Bottlenecks_repo_dir + "/utils/infra_setup/heat_template/vstf_heat_template/launch_vstf.sh"
-    testfile = Bottlenecks_repo_dir + "/utils/infra_setup/heat_template/vstf_heat_template/vstf_test.sh"
+    launchfile = Bottlenecks_repo_dir + \
+        "/utils/infra_setup/heat_template/vstf_heat_template/launch_vstf.sh"
+    testfile = Bottlenecks_repo_dir + \
+        "/utils/infra_setup/heat_template/vstf_heat_template/vstf_test.sh"
     vstf_run(launch_file=launchfile, test_file=testfile)
 
     vstf_env_cleanup()
 
     vstf_run(launch_file=launchfile, test_file=testfile)
 
     vstf_env_cleanup()
 
-if __name__=='__main__':
+if __name__ == '__main__':
     main()
     main()
index 7206e32..af11bc6 100755 (executable)
@@ -14,7 +14,9 @@ import logging
 \r
 LOG = logging.getLogger(__name__)\r
 \r
 \r
 LOG = logging.getLogger(__name__)\r
 \r
+\r
 class Uploader(object):\r
 class Uploader(object):\r
+\r
     def __init__(self, conf):\r
         self.headers = {'Content-type': 'application/json'}\r
         self.timeout = 5\r
     def __init__(self, conf):\r
         self.headers = {'Content-type': 'application/json'}\r
         self.timeout = 5\r
@@ -36,12 +38,18 @@ class Uploader(object):
         self.result["case_name"] = case_name\r
         self.result["details"] = raw_data\r
         try:\r
         self.result["case_name"] = case_name\r
         self.result["details"] = raw_data\r
         try:\r
-            LOG.debug('Result to be uploaded:\n %s' % json.dumps(self.result, indent=4))\r
+            LOG.debug(\r
+                'Result to be uploaded:\n %s' %\r
+                json.dumps(\r
+                    self.result,\r
+                    indent=4))\r
             res = requests.post(self.target,\r
                                 data=json.dumps(self.result),\r
                                 headers=self.headers,\r
                                 timeout=self.timeout)\r
             res = requests.post(self.target,\r
                                 data=json.dumps(self.result),\r
                                 headers=self.headers,\r
                                 timeout=self.timeout)\r
-            print('Test result posting finished with status code %d.' % res.status_code)\r
+            print(\r
+                'Test result posting finished with status code %d.' %\r
+                res.status_code)\r
         except Exception as err:\r
             LOG.error('Failed to record result data: %s', err)\r
 \r
         except Exception as err:\r
             LOG.error('Failed to record result data: %s', err)\r
 \r
@@ -49,8 +57,14 @@ class Uploader(object):
 if __name__ == "__main__":\r
     logging.basicConfig(level=logging.DEBUG)\r
     parser = argparse.ArgumentParser()\r
 if __name__ == "__main__":\r
     logging.basicConfig(level=logging.DEBUG)\r
     parser = argparse.ArgumentParser()\r
-    parser.add_argument('--config', required=True, help="basic config file for uploader, json format.")\r
-    parser.add_argument('--dir', required=True, help="result files for test cases")\r
+    parser.add_argument(\r
+        '--config',\r
+        required=True,\r
+        help="basic config file for uploader, json format.")\r
+    parser.add_argument(\r
+        '--dir',\r
+        required=True,\r
+        help="result files for test cases")\r
     args = parser.parse_args()\r
     realpath = os.path.realpath(args.dir)\r
     for filename in os.listdir(args.dir):\r
     args = parser.parse_args()\r
     realpath = os.path.realpath(args.dir)\r
     for filename in os.listdir(args.dir):\r
@@ -58,4 +72,9 @@ if __name__ == "__main__":
         LOG.debug("uploading test result from file:%s", filepath)\r
         with open(filepath) as stream:\r
             result = eval(stream.read())\r
         LOG.debug("uploading test result from file:%s", filepath)\r
         with open(filepath) as stream:\r
             result = eval(stream.read())\r
-            Uploader(args.config).upload_result(filename.lower().replace('-', ''), result)\r
+            Uploader(\r
+                args.config).upload_result(\r
+                filename.lower().replace(\r
+                    '-',\r
+                    ''),\r
+                result)\r
index df7d24d..83b8d15 100644 (file)
@@ -6,4 +6,3 @@
 # which accompanies this distribution, and is available at
 # http://www.apache.org/licenses/LICENSE-2.0
 ##############################################################################
 # which accompanies this distribution, and is available at
 # http://www.apache.org/licenses/LICENSE-2.0
 ##############################################################################
-
index b574599..4d92c51 100644 (file)
@@ -40,17 +40,17 @@ stc_opts = [
 class Client(daemon.Daemon):
     """This is a consumer of vstf-agent which will create two channel to the
     rabbitmq-server, one for direct call, one for fan call.
 class Client(daemon.Daemon):
     """This is a consumer of vstf-agent which will create two channel to the
     rabbitmq-server, one for direct call, one for fan call.
-    
+
     agent start with a config file which record rabbitmq's ip, port and user passwd
     also each agent has its own id.
     agent start with a config file which record rabbitmq's ip, port and user passwd
     also each agent has its own id.
-    
+
     """
 
     def __init__(self, agent, config_file):
         """Record the config file, init the daemon.
     """
 
     def __init__(self, agent, config_file):
         """Record the config file, init the daemon.
-        
+
         :param str config_file: the config of a VSTF agent.
         :param str config_file: the config of a VSTF agent.
-        
+
         """
         super(Client, self).__init__('/tmp/esp_rpc_client.pid')
         self.config_file = config_file
         """
         super(Client, self).__init__('/tmp/esp_rpc_client.pid')
         self.config_file = config_file
@@ -61,7 +61,7 @@ class Client(daemon.Daemon):
 
     def init_config(self):
         """Use olso.config to analyse the config file
 
     def init_config(self):
         """Use olso.config to analyse the config file
-        
+
         """
         parser = CfgParser(self.config_file)
         parser.register_my_opts(server_opts, "rabbit")
         """
         parser = CfgParser(self.config_file)
         parser.register_my_opts(server_opts, "rabbit")
@@ -80,7 +80,7 @@ class Client(daemon.Daemon):
 
     def run(self):
         """Run the rabbitmq consumers as a daemon.
 
     def run(self):
         """Run the rabbitmq consumers as a daemon.
-        
+
         """
         signal.signal(signal.SIGTERM, self.process_exit)
         self.loop_thread()
         """
         signal.signal(signal.SIGTERM, self.process_exit)
         self.loop_thread()
@@ -90,7 +90,7 @@ class Client(daemon.Daemon):
         """This function try to stop the agent after running agent stop.
         When we call vstf-agent stop which will send a signal SIGTERM to agent
         When the agent catch the SIGTERM signal will call this function.
         """This function try to stop the agent after running agent stop.
         When we call vstf-agent stop which will send a signal SIGTERM to agent
         When the agent catch the SIGTERM signal will call this function.
-        
+
         """
         LOG.info("daemon catch the signalterm, start to stop the process.")
         self.run_flag = False
         """
         LOG.info("daemon catch the signalterm, start to stop the process.")
         self.run_flag = False
@@ -104,7 +104,7 @@ class Client(daemon.Daemon):
     def stop_agent(self):
         """Notice that: this function just kill the agent by pid file, it has
         none vars of the agent.
     def stop_agent(self):
         """Notice that: this function just kill the agent by pid file, it has
         none vars of the agent.
-        
+
         """
         LOG.info("call daemon stop.")
         # kill the main thread
         """
         LOG.info("call daemon stop.")
         # kill the main thread
@@ -120,9 +120,11 @@ def main():
                         default="soft",
                         choices=["soft", "spirent"],
                         help="the agent type, as now, just soft and spirent")
                         default="soft",
                         choices=["soft", "spirent"],
                         help="the agent type, as now, just soft and spirent")
-    parser.add_argument('--config_file', action='store',
-                        default="/etc/vstf/amqp/amqp.ini",
-                        help="some env_build params recorded in the config file")
+    parser.add_argument(
+        '--config_file',
+        action='store',
+        default="/etc/vstf/amqp/amqp.ini",
+        help="some env_build params recorded in the config file")
 
     args = parser.parse_args()
 
 
     args = parser.parse_args()
 
index df7d24d..83b8d15 100644 (file)
@@ -6,4 +6,3 @@
 # which accompanies this distribution, and is available at
 # http://www.apache.org/licenses/LICENSE-2.0
 ##############################################################################
 # which accompanies this distribution, and is available at
 # http://www.apache.org/licenses/LICENSE-2.0
 ##############################################################################
-
index df7d24d..83b8d15 100644 (file)
@@ -6,4 +6,3 @@
 # which accompanies this distribution, and is available at
 # http://www.apache.org/licenses/LICENSE-2.0
 ##############################################################################
 # which accompanies this distribution, and is available at
 # http://www.apache.org/licenses/LICENSE-2.0
 ##############################################################################
-
index 126a7d5..1d39d7b 100644 (file)
@@ -31,11 +31,11 @@ class Collect(object):
         """the base _system info
         {'os info':{'_system':'ubuntu', 'kernel': '3.13.3'}}"""
         return {const.OS_INFO:
         """the base _system info
         {'os info':{'_system':'ubuntu', 'kernel': '3.13.3'}}"""
         return {const.OS_INFO:
-            {
-                '_system': open('/etc/issue.net').readline().strip(),
-                'kernel': platform.uname()[2]
-            }
-        }
+                {
+                    '_system': open('/etc/issue.net').readline().strip(),
+                    'kernel': platform.uname()[2]
+                }
+                }
 
     def _memery(self):
         """ Return the information in /proc/meminfo
 
     def _memery(self):
         """ Return the information in /proc/meminfo
@@ -46,11 +46,11 @@ class Collect(object):
                 meminfo[line.split(':')[0]] = line.split(':')[1].strip()
 
         return {const.MEMORY_INFO:
                 meminfo[line.split(':')[0]] = line.split(':')[1].strip()
 
         return {const.MEMORY_INFO:
-            {
-                "Mem Total": meminfo['MemTotal'],
-                "Mem Swap": meminfo['SwapTotal']
-            }
-        }
+                {
+                    "Mem Total": meminfo['MemTotal'],
+                    "Mem Swap": meminfo['SwapTotal']
+                }
+                }
 
     def _lscpu(self):
         ret = {}
 
     def _lscpu(self):
         ret = {}
@@ -68,18 +68,19 @@ class Collect(object):
                     ret.append(cpuinfo)
                     cpuinfo = OrderedDict()
                 elif len(line.split(':')) == 2:
                     ret.append(cpuinfo)
                     cpuinfo = OrderedDict()
                 elif len(line.split(':')) == 2:
-                    cpuinfo[line.split(':')[0].strip()] = line.split(':')[1].strip()
+                    cpuinfo[line.split(':')[0].strip()] = line.split(':')[
+                        1].strip()
                 else:
                     log.error("_cpu info unknow format <%(c)s>", {'c': line})
         return {const.CPU_INFO:
                 else:
                     log.error("_cpu info unknow format <%(c)s>", {'c': line})
         return {const.CPU_INFO:
-            dict(
-                {
-                    "Model Name": ret[0]['model name'],
-                    "Address sizes": ret[0]['address sizes']
-                },
-                **(self._lscpu())
-            )
-        }
+                dict(
+                    {
+                        "Model Name": ret[0]['model name'],
+                        "Address sizes": ret[0]['address sizes']
+                    },
+                    **(self._lscpu())
+                )
+                }
 
     def _hw_sysinfo(self):
         cmdline = "dmidecode | grep  -A 2 'System Information' | grep -v 'System Information'"
 
     def _hw_sysinfo(self):
         cmdline = "dmidecode | grep  -A 2 'System Information' | grep -v 'System Information'"
@@ -90,14 +91,15 @@ class Collect(object):
             for tmp in output.strip().split('\n'):
                 if tmp is None or tmp is "":
                     continue
             for tmp in output.strip().split('\n'):
                 if tmp is None or tmp is "":
                     continue
-                # split the items 
+                # split the items
                 tmp = tmp.split(":")
                 if len(tmp) >= 2:
                     # first item as key, and the other as value
                     result[tmp[0].strip("\t")] = ";".join(tmp[1:])
             return {const.HW_INFO: result}
         else:
                 tmp = tmp.split(":")
                 if len(tmp) >= 2:
                     # first item as key, and the other as value
                     result[tmp[0].strip("\t")] = ";".join(tmp[1:])
             return {const.HW_INFO: result}
         else:
-            return {const.HW_INFO: "get hw info failed. check the host by cmd: dmidecode"}
+            return {
+                const.HW_INFO: "get hw info failed. check the host by cmd: dmidecode"}
 
     def collect_host_info(self):
         return [self._system, self._cpu, self._memery(), self._hw_sysinfo()]
 
     def collect_host_info(self):
         return [self._system, self._cpu, self._memery(), self._hw_sysinfo()]
index e4df9b2..29dd2c0 100644 (file)
@@ -16,6 +16,7 @@ LOG = logging.getLogger(__name__)
 
 
 class CommandLine(object):
 
 
 class CommandLine(object):
+
     def __init__(self):
         super(CommandLine, self).__init__()
         self.proc = None
     def __init__(self):
         super(CommandLine, self).__init__()
         self.proc = None
index 8b5387f..c34f5e0 100644 (file)
@@ -21,6 +21,7 @@ default_drivers = {
 
 
 class LspciHelper(object):
 
 
 class LspciHelper(object):
+
     def __init__(self):
         self.bdf_desc_map = {}
         self.bdf_device_map = {}
     def __init__(self):
         self.bdf_desc_map = {}
         self.bdf_device_map = {}
@@ -45,7 +46,8 @@ class LspciHelper(object):
         for bdf, desc in self.bdf_desc_map.items():
             device = get_device_name(bdf)
             if device is None:
         for bdf, desc in self.bdf_desc_map.items():
             device = get_device_name(bdf)
             if device is None:
-                LOG.info("cann't find device name for bdf:%s, no driver is available.", bdf)
+                LOG.info(
+                    "cann't find device name for bdf:%s, no driver is available.", bdf)
                 try:
                     self._load_driver(desc)
                 except:
                 try:
                     self._load_driver(desc)
                 except:
@@ -66,13 +68,17 @@ class LspciHelper(object):
     def _get_ip_macs(self):
         for device, bdf in self.device_bdf_map.items():
             buf = check_output("ip addr show dev %s" % device, shell=True)
     def _get_ip_macs(self):
         for device, bdf in self.device_bdf_map.items():
             buf = check_output("ip addr show dev %s" % device, shell=True)
-            macs = re.compile("[A-F0-9]{2}(?::[A-F0-9]{2}){5}", re.IGNORECASE | re.MULTILINE)
+            macs = re.compile(
+                "[A-F0-9]{2}(?::[A-F0-9]{2}){5}",
+                re.IGNORECASE | re.MULTILINE)
             for mac in macs.findall(buf):
                 if mac.lower() in ('00:00:00:00:00:00', 'ff:ff:ff:ff:ff:ff'):
                     continue
                 else:
                     break
             for mac in macs.findall(buf):
                 if mac.lower() in ('00:00:00:00:00:00', 'ff:ff:ff:ff:ff:ff'):
                     continue
                 else:
                     break
-            ips = re.compile(r"inet (\d{1,3}\.\d{1,3}\.\d{1,3}.\d{1,3}/\d{1,2})", re.MULTILINE)
+            ips = re.compile(
+                r"inet (\d{1,3}\.\d{1,3}\.\d{1,3}.\d{1,3}/\d{1,2})",
+                re.MULTILINE)
             ip = ips.findall(buf)
             if ip:
                 self.bdf_ip_map[bdf] = ip[0]
             ip = ips.findall(buf)
             if ip:
                 self.bdf_ip_map[bdf] = ip[0]
@@ -93,6 +99,7 @@ class LspciHelper(object):
 
 
 class DeviceManager(object):
 
 
 class DeviceManager(object):
+
     def __init__(self):
         super(DeviceManager, self).__init__()
         mgr = netns.NetnsManager()
     def __init__(self):
         super(DeviceManager, self).__init__()
         mgr = netns.NetnsManager()
index c3b5c6b..4bae49d 100644 (file)
@@ -19,6 +19,7 @@ class _ImageManager(object):
     A qemu-img wrapper to create qcow2 child image from a parent image.
 
     """
     A qemu-img wrapper to create qcow2 child image from a parent image.
 
     """
+
     def __init__(self, parent_image_path, child_image_dir):
         """
         :param parent_image_path    str: the parent image path.
     def __init__(self, parent_image_path, child_image_dir):
         """
         :param parent_image_path    str: the parent image path.
@@ -31,7 +32,11 @@ class _ImageManager(object):
         assert os.path.isfile(self.parent_image_path)
         assert os.path.isdir(self.child_image_dir)
 
         assert os.path.isfile(self.parent_image_path)
         assert os.path.isdir(self.child_image_dir)
 
-    def create_child_image(self, child_name, full_clone=False, image_type='qcow2'):
+    def create_child_image(
+            self,
+            child_name,
+            full_clone=False,
+            image_type='qcow2'):
         """
         create a child image and put it in self.child_image_dir.
 
         """
         create a child image and put it in self.child_image_dir.
 
@@ -39,16 +44,25 @@ class _ImageManager(object):
         :return: return the path of child image.
         """
 
         :return: return the path of child image.
         """
 
-        image_path = os.path.join(self.child_image_dir, child_name) + '.' + image_type
+        image_path = os.path.join(
+            self.child_image_dir,
+            child_name) + '.' + image_type
         if full_clone:
         if full_clone:
-            cmd = self._convert_str % {'image_type': image_type, 'child_path': image_path, 'parent_path': self.parent_image_path}
+            cmd = self._convert_str % {
+                'image_type': image_type,
+                'child_path': image_path,
+                'parent_path': self.parent_image_path}
         else:
         else:
-            cmd = self._create_child_str % {'child_path': image_path, 'parent_path': self.parent_image_path, 'image_type':image_type}
+            cmd = self._create_child_str % {
+                'child_path': image_path,
+                'parent_path': self.parent_image_path,
+                'image_type': image_type}
         check_call(cmd.split())
         return image_path
 
 
 class ImageManager(object):
         check_call(cmd.split())
         return image_path
 
 
 class ImageManager(object):
+
     def __init__(self, cfg):
         """
         ImageManager creates images from configuration context.
     def __init__(self, cfg):
         """
         ImageManager creates images from configuration context.
@@ -74,13 +88,22 @@ class ImageManager(object):
 
     @staticmethod
     def _check_cfg(cfg):
 
     @staticmethod
     def _check_cfg(cfg):
-        for key in ('parent_image', 'dst_location', 'full_clone', 'type', 'names'):
+        for key in (
+            'parent_image',
+            'dst_location',
+            'full_clone',
+            'type',
+                'names'):
             if key not in cfg:
                 raise Exception("does't find %s config" % key)
         if cfg['type'] not in ('raw', 'qcow2'):
             if key not in cfg:
                 raise Exception("does't find %s config" % key)
         if cfg['type'] not in ('raw', 'qcow2'):
-            raise Exception("type:%s not supported, only support 'raw' and 'qcow2'" % cfg['type'])
+            raise Exception(
+                "type:%s not supported, only support 'raw' and 'qcow2'" %
+                cfg['type'])
         if not cfg['full_clone'] and cfg['type'] == 'raw':
         if not cfg['full_clone'] and cfg['type'] == 'raw':
-            raise Exception("only support 'qcow2' for not full_clone image creation" % cfg['type'])
+            raise Exception(
+                "only support 'qcow2' for not full_clone image creation" %
+                cfg['type'])
         return cfg
 
     def create_all(self):
         return cfg
 
     def create_all(self):
@@ -90,7 +113,8 @@ class ImageManager(object):
         :return: True for success, False for failure.
         """
         for name in self.names:
         :return: True for success, False for failure.
         """
         for name in self.names:
-            image = self.mgr.create_child_image(name, self.full_clone, self.image_type)
+            image = self.mgr.create_child_image(
+                name, self.full_clone, self.image_type)
             LOG.info("image: %s created", image)
         return True
 
             LOG.info("image: %s created", image)
         return True
 
@@ -101,7 +125,8 @@ class ImageManager(object):
         :return: True for success. Raise exception otherwise.
         """
         for name in self.names:
         :return: True for success. Raise exception otherwise.
         """
         for name in self.names:
-            image_path = os.path.join(self.image_dir, name + '.' + self.image_type)
+            image_path = os.path.join(
+                self.image_dir, name + '.' + self.image_type)
             try:
                 os.unlink(image_path)
                 LOG.info("remove:%s successfully", image_path)
             try:
                 os.unlink(image_path)
                 LOG.info("remove:%s successfully", image_path)
@@ -114,7 +139,12 @@ if __name__ == '__main__':
     import argparse
     import json
     parser = argparse.ArgumentParser()
     import argparse
     import json
     parser = argparse.ArgumentParser()
-    parser.add_argument('action', choices = ('create','clean'), help='action:create|clean')
+    parser.add_argument(
+        'action',
+        choices=(
+            'create',
+            'clean'),
+        help='action:create|clean')
     parser.add_argument('--config', help='config file to parse')
     args = parser.parse_args()
     logging.basicConfig(level=logging.INFO)
     parser.add_argument('--config', help='config file to parse')
     args = parser.parse_args()
     logging.basicConfig(level=logging.INFO)
@@ -124,5 +154,3 @@ if __name__ == '__main__':
         mgr.create_all()
     if args.action == 'clean':
         mgr.clean_all()
         mgr.create_all()
     if args.action == 'clean':
         mgr.clean_all()
-
-
index 6edd14c..5aca536 100644 (file)
@@ -27,6 +27,7 @@ def my_chdir(file_path):
 
 
 class SourceCodeManager(object):
 
 
 class SourceCodeManager(object):
+
     def __init__(self):
         super(SourceCodeManager, self).__init__()
         self.base_path = '/opt/vstf/'
     def __init__(self):
         super(SourceCodeManager, self).__init__()
         self.base_path = '/opt/vstf/'
index 7364f8b..4b7b31b 100644 (file)
@@ -69,7 +69,9 @@ class VMConfigBy9pfs(object):
         return ret == constant.VM_CMD_EXCUTE_SUCCES_FLAG_CONTENT
 
     def _wait_command_done(self):
         return ret == constant.VM_CMD_EXCUTE_SUCCES_FLAG_CONTENT
 
     def _wait_command_done(self):
-        done = self._wait_flag_file_to_exist(constant.VM_CMD_DONE_FLAG_FILE, constant.VM_COMMON_CMD_EXCUTE_TIME_OUT)
+        done = self._wait_flag_file_to_exist(
+            constant.VM_CMD_DONE_FLAG_FILE,
+            constant.VM_COMMON_CMD_EXCUTE_TIME_OUT)
         if done:
             return self._get_cmd_return_code()
         else:
         if done:
             return self._get_cmd_return_code()
         else:
@@ -86,7 +88,8 @@ class VMConfigBy9pfs(object):
             raise Exception("9pfs command failure: timeout.")
 
     def wait_up(self):
             raise Exception("9pfs command failure: timeout.")
 
     def wait_up(self):
-        return self._wait_flag_file_to_exist(constant.VM_UP_Flag_FILE, constant.VM_UP_TIME_OUT)
+        return self._wait_flag_file_to_exist(
+            constant.VM_UP_Flag_FILE, constant.VM_UP_TIME_OUT)
 
     def config_ip(self, mac, ip):
         cmd = 'config_ip %s %s' % (mac, ip)
 
     def config_ip(self, mac, ip):
         cmd = 'config_ip %s %s' % (mac, ip)
@@ -118,7 +121,13 @@ class VMConfigBy9pfs(object):
         cmd = 'recover_nic_binding ' + mac_str
         return self._set_cmd(cmd)
 
         cmd = 'recover_nic_binding ' + mac_str
         return self._set_cmd(cmd)
 
-    def config_amqp(self, identity, server, port=5672, user="guest", passwd="guest"):
+    def config_amqp(
+            self,
+            identity,
+            server,
+            port=5672,
+            user="guest",
+            passwd="guest"):
         data = {
             'server': server,
             'port': port,
         data = {
             'server': server,
             'port': port,
@@ -135,7 +144,7 @@ class VMConfigBy9pfs(object):
         id=%(id)s''' % data
         file_name = "amqp.ini"
         dedented_text = textwrap.dedent(content)
         id=%(id)s''' % data
         file_name = "amqp.ini"
         dedented_text = textwrap.dedent(content)
-        self._write(file_name, header+dedented_text)
+        self._write(file_name, header + dedented_text)
         cmd = 'config_amqp %s' % file_name
         return self._set_cmd(cmd)
 
         cmd = 'config_amqp %s' % file_name
         return self._set_cmd(cmd)
 
index 60a3b37..d0a2060 100644 (file)
@@ -93,8 +93,14 @@ class VMControlOperation(object):
 
     @staticmethod
     def check_required_options(context):
 
     @staticmethod
     def check_required_options(context):
-        for key in ('vm_name', 'vm_memory', 'vm_cpu', 'image_path', 'image_type', 'taps'):
-            if not context.has_key(key):
+        for key in (
+            'vm_name',
+            'vm_memory',
+            'vm_cpu',
+            'image_path',
+            'image_type',
+                'taps'):
+            if key not in context:
                 raise Exception("vm config error, must set %s option" % key)
 
     def set_vm_defaults(self, context):
                 raise Exception("vm config error, must set %s option" % key)
 
     def set_vm_defaults(self, context):
@@ -117,14 +123,18 @@ class VMControlOperation(object):
             context.setdefault(k, v)
 
     def _shutdown_vm(self):
             context.setdefault(k, v)
 
     def _shutdown_vm(self):
-        out = check_output("virsh list | sed 1,2d | awk '{print $2}'", shell=True)
+        out = check_output(
+            "virsh list | sed 1,2d | awk '{print $2}'",
+            shell=True)
         vm_set = set(out.split())
         for vm in vm_set:
             check_call("virsh shutdown %s" % vm, shell=True)
         timeout = 60
         # wait for gracefully shutdown
         while timeout > 0:
         vm_set = set(out.split())
         for vm in vm_set:
             check_call("virsh shutdown %s" % vm, shell=True)
         timeout = 60
         # wait for gracefully shutdown
         while timeout > 0:
-            out = check_output("virsh list | sed 1,2d | awk '{print $2}'", shell=True)
+            out = check_output(
+                "virsh list | sed 1,2d | awk '{print $2}'",
+                shell=True)
             vm_set = set(out.split())
             if len(vm_set) == 0:
                 break
             vm_set = set(out.split())
             if len(vm_set) == 0:
                 break
@@ -135,7 +145,9 @@ class VMControlOperation(object):
         for vm in vm_set:
             check_call("virsh destroy %s" % vm, shell=True)
         # undefine all
         for vm in vm_set:
             check_call("virsh destroy %s" % vm, shell=True)
         # undefine all
-        out = check_output("virsh list --all | sed 1,2d | awk '{print $2}'", shell=True)
+        out = check_output(
+            "virsh list --all | sed 1,2d | awk '{print $2}'",
+            shell=True)
         vm_set = set(out.split())
         for vm in vm_set:
             check_call("virsh undefine %s" % vm, shell=True)
         vm_set = set(out.split())
         for vm in vm_set:
             check_call("virsh undefine %s" % vm, shell=True)
@@ -177,7 +189,8 @@ class VMControlOperation(object):
         vm9pctrl = self.vm_9p_controllers[vm_name]
         ret = vm9pctrl.wait_up()
         if ret not in (True,):
         vm9pctrl = self.vm_9p_controllers[vm_name]
         ret = vm9pctrl.wait_up()
         if ret not in (True,):
-            raise Exception('vm running but stuck in boot process, please manully check.')
+            raise Exception(
+                'vm running but stuck in boot process, please manully check.')
         LOG.debug('waitVM %s up ok, ret:%s', vm_name, ret)
         return True
 
         LOG.debug('waitVM %s up ok, ret:%s', vm_name, ret)
         return True
 
@@ -193,12 +206,14 @@ class VMControlOperation(object):
         # print self.vm_9p_controllers
         init_cfg = vm_cfg['init_config']
         if "ctrl_ip_setting" in init_cfg:
         # print self.vm_9p_controllers
         init_cfg = vm_cfg['init_config']
         if "ctrl_ip_setting" in init_cfg:
-            ret = vm9pctrl.config_ip(vm_cfg['ctrl_mac'], init_cfg['ctrl_ip_setting'])
-            assert ret == True
+            ret = vm9pctrl.config_ip(
+                vm_cfg['ctrl_mac'],
+                init_cfg['ctrl_ip_setting'])
+            assert ret
             LOG.info('initConfigVM config ip ok')
         if 'ctrl_gw' in init_cfg:
             ret = vm9pctrl.config_gw(init_cfg['ctrl_gw'])
             LOG.info('initConfigVM config ip ok')
         if 'ctrl_gw' in init_cfg:
             ret = vm9pctrl.config_gw(init_cfg['ctrl_gw'])
-            assert ret == True
+            assert ret
             LOG.info('initConfigVM ctrl_gw ok')
         if "ctrl_ip_setting" in init_cfg and "amqp_server" in init_cfg:
             identity = init_cfg['ctrl_ip_setting'].split('/')[0]
             LOG.info('initConfigVM ctrl_gw ok')
         if "ctrl_ip_setting" in init_cfg and "amqp_server" in init_cfg:
             identity = init_cfg['ctrl_ip_setting'].split('/')[0]
@@ -209,7 +224,7 @@ class VMControlOperation(object):
             user = init_cfg['amqp_user']
             passwd = init_cfg['amqp_passwd']
             ret = vm9pctrl.config_amqp(identity, server, port, user, passwd)
             user = init_cfg['amqp_user']
             passwd = init_cfg['amqp_passwd']
             ret = vm9pctrl.config_amqp(identity, server, port, user, passwd)
-            assert ret == True
+            assert ret
             LOG.info('initConfigVM config_amqp ok')
         if 'tap_pktloop_config' in init_cfg:
             taps = vm_cfg['taps']
             LOG.info('initConfigVM config_amqp ok')
         if 'tap_pktloop_config' in init_cfg:
             taps = vm_cfg['taps']
@@ -217,6 +232,6 @@ class VMControlOperation(object):
             for tap in taps:
                 macs.append(tap['tap_mac'])
             ret = vm9pctrl.set_pktloop_dpdk(macs)
             for tap in taps:
                 macs.append(tap['tap_mac'])
             ret = vm9pctrl.set_pktloop_dpdk(macs)
-            assert ret == True
+            assert ret
             LOG.info('initConfigVM set_pktloop_dpdk ok')
         return True
             LOG.info('initConfigVM set_pktloop_dpdk ok')
         return True
index 6f9131e..89c1096 100644 (file)
@@ -38,7 +38,7 @@ xml_disk = '''
       <source file='IMAGE_PATH'/>
       <target dev='vda' bus='virtio'/>
     </disk>'''
       <source file='IMAGE_PATH'/>
       <target dev='vda' bus='virtio'/>
     </disk>'''
-    
+
 xml_ctrl_br = '''
 <interface type='bridge'>
   <mac address='CTRL_MAC'/>
 xml_ctrl_br = '''
 <interface type='bridge'>
   <mac address='CTRL_MAC'/>
@@ -63,7 +63,7 @@ xml_br = '''
       <model type='virtio'/>
       <target dev='TAP_NAME'/>
     </interface>'''
       <model type='virtio'/>
       <target dev='TAP_NAME'/>
     </interface>'''
-    
+
 xml_pci = '''
     <hostdev mode='subsystem' type='pci' managed='yes'>
       <driver name='kvm'/>
 xml_pci = '''
     <hostdev mode='subsystem' type='pci' managed='yes'>
       <driver name='kvm'/>
@@ -82,4 +82,3 @@ xml_tail = '''
     </graphics>
   </devices>
 </domain>'''
     </graphics>
   </devices>
 </domain>'''
-
index a66a887..19bf12f 100644 (file)
@@ -15,6 +15,7 @@ LOG = logging.getLogger(__name__)
 
 
 class PluginManager(object):
 
 
 class PluginManager(object):
+
     def __init__(self):
         self.instance = None
         self.saved = {}
     def __init__(self):
         self.instance = None
         self.saved = {}
index fc9802b..83b8d15 100644 (file)
@@ -5,4 +5,4 @@
 # are made available under the terms of the Apache License, Version 2.0
 # which accompanies this distribution, and is available at
 # http://www.apache.org/licenses/LICENSE-2.0
 # are made available under the terms of the Apache License, Version 2.0
 # which accompanies this distribution, and is available at
 # http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
\ No newline at end of file
+##############################################################################
index 6f89565..e20b5dd 100644 (file)
@@ -11,9 +11,11 @@ import stevedore
 
 
 class DriverPluginManager(object):
 
 
 class DriverPluginManager(object):
+
     def __init__(self):
         self.plugins = {}
     def __init__(self):
         self.plugins = {}
-        self.mgr = stevedore.extension.ExtensionManager(namespace="drivers.plugins", invoke_on_load=True)
+        self.mgr = stevedore.extension.ExtensionManager(
+            namespace="drivers.plugins", invoke_on_load=True)
 
     def load(self, drivers):
         plugin = self.determine_driver_type(drivers)
 
     def load(self, drivers):
         plugin = self.determine_driver_type(drivers)
index ddc0744..807143f 100644 (file)
@@ -22,16 +22,16 @@ class DriverPlugin:
     @abstractmethod
     def clean(self):
         """implement this clean function to clean environment before and after calling any other functions.
     @abstractmethod
     def clean(self):
         """implement this clean function to clean environment before and after calling any other functions.
-        
+
         """
         pass
 
     @abstractmethod
     def load(self, drivers):
         """load driver modules.
         """
         pass
 
     @abstractmethod
     def load(self, drivers):
         """load driver modules.
-        
+
         :param list    drivers:list of modules to be inserted. for example:[ixgbe,vhost_net]
         :param list    drivers:list of modules to be inserted. for example:[ixgbe,vhost_net]
-        
+
         """
         pass
 
         """
         pass
 
index bf3c15c..2004b8e 100644 (file)
@@ -24,7 +24,7 @@ class OriginDriverPlugin(model.DriverPlugin):
 
     def clean(self):
         """clean drivers list in self.origin_drivers.
 
     def clean(self):
         """clean drivers list in self.origin_drivers.
-        
+
         """
         for mod in self.origin_drivers:
             check_and_rmmod(mod)
         """
         for mod in self.origin_drivers:
             check_and_rmmod(mod)
@@ -34,7 +34,7 @@ class OriginDriverPlugin(model.DriverPlugin):
 
     def load(self, drivers):
         """insmod drivers
 
     def load(self, drivers):
         """insmod drivers
-        
+
         :param list    drivers:list of drivers link ['ixgbe','vhost_net']
         """
         # load implicit 'tun' module dependency for vhost_net
         :param list    drivers:list of drivers link ['ixgbe','vhost_net']
         """
         # load implicit 'tun' module dependency for vhost_net
index e655936..53cddeb 100644 (file)
@@ -23,6 +23,7 @@ LOG = logging.getLogger('__name__')
 
 
 class VMOperation(object):
 
 
 class VMOperation(object):
+
     def __init__(self):
         self.RTE_SDK = '/home/dpdk-2.0.0'
         self.RTE_TARGET = 'x86_64-native-linuxapp-gcc'
     def __init__(self):
         self.RTE_SDK = '/home/dpdk-2.0.0'
         self.RTE_TARGET = 'x86_64-native-linuxapp-gcc'
@@ -46,7 +47,8 @@ class VMOperation(object):
         for mac in tap_macs:
             bdf = self.ip_helper.mac_bdf_map[mac]
             bdf_str = bdf_str + ' ' + bdf
         for mac in tap_macs:
             bdf = self.ip_helper.mac_bdf_map[mac]
             bdf_str = bdf_str + ' ' + bdf
-        cmd = 'python %s/tools/dpdk_nic_bind.py --bind=virtio-pci %s' % (self.RTE_SDK, bdf_str)
+        cmd = 'python %s/tools/dpdk_nic_bind.py --bind=virtio-pci %s' % (
+            self.RTE_SDK, bdf_str)
         LOG.debug("recover_nic_binding runs cmd = %s", cmd)
         check_call(cmd, shell=True)
 
         LOG.debug("recover_nic_binding runs cmd = %s", cmd)
         check_call(cmd, shell=True)
 
@@ -60,15 +62,22 @@ class VMOperation(object):
         check_call("mount -t hugetlbfs nodev /mnt/huge", shell=True)
         check_call("modprobe uio", shell=True)
         check_and_rmmod('igb_uio')
         check_call("mount -t hugetlbfs nodev /mnt/huge", shell=True)
         check_call("modprobe uio", shell=True)
         check_and_rmmod('igb_uio')
-        check_call("insmod %s/%s/kmod/igb_uio.ko" % (RTE_SDK, RTE_TARGET), shell=True)
+        check_call(
+            "insmod %s/%s/kmod/igb_uio.ko" %
+            (RTE_SDK, RTE_TARGET), shell=True)
 
         bdf_str = ''
         for mac in tap_macs:
             bdf = self.ip_helper.mac_bdf_map[mac]
             bdf_str = bdf_str + ' ' + bdf
 
 
         bdf_str = ''
         for mac in tap_macs:
             bdf = self.ip_helper.mac_bdf_map[mac]
             bdf_str = bdf_str + ' ' + bdf
 
-        check_call('python %s/tools/dpdk_nic_bind.py --bind=igb_uio %s' % (RTE_SDK, bdf_str), shell=True)
-        cpu_num = int(check_output('cat /proc/cpuinfo | grep processor | wc -l', shell=True))
+        check_call(
+            'python %s/tools/dpdk_nic_bind.py --bind=igb_uio %s' %
+            (RTE_SDK, bdf_str), shell=True)
+        cpu_num = int(
+            check_output(
+                'cat /proc/cpuinfo | grep processor | wc -l',
+                shell=True))
         cpu_bit_mask = 0
         i = cpu_num
         while i:
         cpu_bit_mask = 0
         i = cpu_num
         while i:
@@ -76,14 +85,7 @@ class VMOperation(object):
             i -= 1
         cpu_bit_mask = hex(cpu_bit_mask)
         cmd = "%s/%s/app/testpmd -c %s -n %d -- --disable-hw-vlan --disable-rss --nb-cores=%d --rxq=%d --txq=%d --rxd=4096 --txd=4096" % (
             i -= 1
         cpu_bit_mask = hex(cpu_bit_mask)
         cmd = "%s/%s/app/testpmd -c %s -n %d -- --disable-hw-vlan --disable-rss --nb-cores=%d --rxq=%d --txq=%d --rxd=4096 --txd=4096" % (
-            RTE_SDK,
-            RTE_TARGET,
-            cpu_bit_mask,
-            cpu_num / 2,
-            cpu_num - 1,
-            (cpu_num - 1) / 2,
-            (cpu_num - 1) / 2
-        )
+            RTE_SDK, RTE_TARGET, cpu_bit_mask, cpu_num / 2, cpu_num - 1, (cpu_num - 1) / 2, (cpu_num - 1) / 2)
         LOG.info("set_pktloop_dpdk runs cmd = %s", cmd)
         p = subprocess.Popen(cmd.split())
         if not p.poll():
         LOG.info("set_pktloop_dpdk runs cmd = %s", cmd)
         p = subprocess.Popen(cmd.split())
         if not p.poll():
@@ -105,6 +107,7 @@ class VMOperation(object):
 
 
 class FSMonitor(object):
 
 
 class FSMonitor(object):
+
     def __init__(self, pidfile=None, interval=1):
         if pidfile:
             self.pidfile = pidfile
     def __init__(self, pidfile=None, interval=1):
         if pidfile:
             self.pidfile = pidfile
@@ -121,8 +124,9 @@ class FSMonitor(object):
             pass
 
     def kill_old(self):
             pass
 
     def kill_old(self):
-        out = check_output("ps -ef | grep -v grep | egrep 'python.*%s' | awk '{print $2}'" % sys.argv[0],
-                                      shell=True)
+        out = check_output(
+            "ps -ef | grep -v grep | egrep 'python.*%s' | awk '{print $2}'" %
+            sys.argv[0], shell=True)
         if out:
             for pid in out.split():
                 if int(pid) != os.getpid():
         if out:
             for pid in out.split():
                 if int(pid) != os.getpid():
@@ -131,7 +135,8 @@ class FSMonitor(object):
 
     def set_fail(self, failed_reason):
         with open(constant.VM_CMD_RETURN_CODE_FILE, 'w') as f:
 
     def set_fail(self, failed_reason):
         with open(constant.VM_CMD_RETURN_CODE_FILE, 'w') as f:
-            f.writelines([constant.VM_CMD_EXCUTE_FAILED_FLAG_CONTENT, '\n', failed_reason])
+            f.writelines(
+                [constant.VM_CMD_EXCUTE_FAILED_FLAG_CONTENT, '\n', failed_reason])
         with open(constant.VM_CMD_DONE_FLAG_FILE, 'w') as f:
             pass
 
         with open(constant.VM_CMD_DONE_FLAG_FILE, 'w') as f:
             pass
 
@@ -149,8 +154,10 @@ class FSMonitor(object):
             pid = os.fork()
             if pid > 0:
                 sys.exit(0)
             pid = os.fork()
             if pid > 0:
                 sys.exit(0)
-        except OSError, e:
-            sys.stderr.write('fork #1 failed:%d,(%s)\n' % (e.errno, e.strerror))
+        except OSError as e:
+            sys.stderr.write(
+                'fork #1 failed:%d,(%s)\n' %
+                (e.errno, e.strerror))
             sys.exit(1)
         os.setsid()
         os.umask(0)
             sys.exit(1)
         os.setsid()
         os.umask(0)
@@ -158,10 +165,17 @@ class FSMonitor(object):
             pid = os.fork()
             if pid > 0:
                 sys.exit(0)
             pid = os.fork()
             if pid > 0:
                 sys.exit(0)
-        except OSError, e:
-            sys.stderr.write('fork #2 failed:%d,(%s)\n' % (e.errno, e.strerror))
+        except OSError as e:
+            sys.stderr.write(
+                'fork #2 failed:%d,(%s)\n' %
+                (e.errno, e.strerror))
             sys.exit(1)
             sys.exit(1)
-        LOG.debug("pid:%d,ppid:%d,sid:%d", os.getpid(), os.getppid(), os.getsid(os.getpid()))
+        LOG.debug(
+            "pid:%d,ppid:%d,sid:%d",
+            os.getpid(),
+            os.getppid(),
+            os.getsid(
+                os.getpid()))
         old = open('/dev/null', 'r')
         os.dup2(old.fileno(), sys.stdin.fileno())
         old = open('/dev/null', 'a+')
         old = open('/dev/null', 'r')
         os.dup2(old.fileno(), sys.stdin.fileno())
         old = open('/dev/null', 'a+')
@@ -192,8 +206,9 @@ class FSMonitor(object):
                             method(*param)
                             self.set_success()
                             LOG.debug("cmd sucessfully done")
                             method(*param)
                             self.set_success()
                             LOG.debug("cmd sucessfully done")
-                        except Exception, e:
-                            LOG.debug('failed to run:%s %s,reason:%s', cmd, param, str(e))
+                        except Exception as e:
+                            LOG.debug(
+                                'failed to run:%s %s,reason:%s', cmd, param, str(e))
                             self.set_fail(str(e))
                         break
                 else:
                             self.set_fail(str(e))
                         break
                 else:
@@ -209,7 +224,8 @@ if __name__ == '__main__':
     # echo "config_ip 56:6f:44:a5:3f:a2 192.168.188.200/23" > command;touch command_set
     # echo "config_gw 192.168.188.1" > command;touch command_set
     # echo set_pktloop_dpdk 56:6f:44:a5:3f:a2 56:6f:44:a5:3f:a3 > command;touch command_set
     # echo "config_ip 56:6f:44:a5:3f:a2 192.168.188.200/23" > command;touch command_set
     # echo "config_gw 192.168.188.1" > command;touch command_set
     # echo set_pktloop_dpdk 56:6f:44:a5:3f:a2 56:6f:44:a5:3f:a3 > command;touch command_set
-    # echo recover_nic_binding 56:6f:44:a5:3f:a2 56:6f:44:a5:3f:a3 > command;touch command_set
+    # echo recover_nic_binding 56:6f:44:a5:3f:a2 56:6f:44:a5:3f:a3 >
+    # command;touch command_set
     import os
     logging.basicConfig(level=logging.DEBUG, filename=LOG_FILE, filemode='w')
     os.environ['PATH'] = os.environ["PATH"] + ":/usr/local/bin"
     import os
     logging.basicConfig(level=logging.DEBUG, filename=LOG_FILE, filemode='w')
     os.environ['PATH'] = os.environ["PATH"] + ":/usr/local/bin"
index 33b37eb..3ae80a3 100644 (file)
@@ -18,4 +18,4 @@ VM_CMD_EXCUTE_FAILED_FLAG_CONTENT = 'fail'
 VM_CMD_NOT_FOUND = 'comamnd_not_found'
 VM_UP_TIME_OUT = 120
 VM_COMMON_CMD_EXCUTE_TIME_OUT = 10
 VM_CMD_NOT_FOUND = 'comamnd_not_found'
 VM_UP_TIME_OUT = 120
 VM_COMMON_CMD_EXCUTE_TIME_OUT = 10
-FS_MOUNT_POINT = '/mnt/9pfs'
\ No newline at end of file
+FS_MOUNT_POINT = '/mnt/9pfs'
index 5bdb415..c28b6ec 100644 (file)
@@ -70,6 +70,7 @@ def umount(path):
 
 
 class IPCommandHelper(object):
 
 
 class IPCommandHelper(object):
+
     def __init__(self):
         self.devices = []
         self.macs = []
     def __init__(self):
         self.devices = []
         self.macs = []
@@ -80,7 +81,9 @@ class IPCommandHelper(object):
         self.mac_bdf_map = {}
         self.bdf_mac_map = {}
         buf = check_output("ip link", shell=True)
         self.mac_bdf_map = {}
         self.bdf_mac_map = {}
         buf = check_output("ip link", shell=True)
-        macs = re.compile("[A-F0-9]{2}(?::[A-F0-9]{2}){5}", re.IGNORECASE | re.MULTILINE)
+        macs = re.compile(
+            "[A-F0-9]{2}(?::[A-F0-9]{2}){5}",
+            re.IGNORECASE | re.MULTILINE)
         for mac in macs.findall(buf):
             if mac.lower() in ('00:00:00:00:00:00', 'ff:ff:ff:ff:ff:ff'):
                 continue
         for mac in macs.findall(buf):
             if mac.lower() in ('00:00:00:00:00:00', 'ff:ff:ff:ff:ff:ff'):
                 continue
@@ -95,7 +98,10 @@ class IPCommandHelper(object):
             self.mac_device_map[mac] = device
         for device in self.devices:
             buf = check_output("ethtool -i %s" % device, shell=True)
             self.mac_device_map[mac] = device
         for device in self.devices:
             buf = check_output("ethtool -i %s" % device, shell=True)
-            bdfs = re.findall(r'^bus-info: \d{4}:(\d{2}:\d{2}\.\d*)$', buf, re.MULTILINE)
+            bdfs = re.findall(
+                r'^bus-info: \d{4}:(\d{2}:\d{2}\.\d*)$',
+                buf,
+                re.MULTILINE)
             if bdfs:
                 self.bdf_device_map[bdfs[0]] = device
                 self.device_bdf_map[device] = bdfs[0]
             if bdfs:
                 self.bdf_device_map[bdfs[0]] = device
                 self.device_bdf_map[device] = bdfs[0]
index 27af806..2fd7d69 100644 (file)
@@ -20,6 +20,7 @@ LOG = logging.getLogger(__name__)
 
 
 class Plugin(EnvBuilderPlugin):
 
 
 class Plugin(EnvBuilderPlugin):
+
     def __init__(self):
         super(Plugin, self).__init__()
         self.vm_mgr = VMControlOperation()
     def __init__(self):
         super(Plugin, self).__init__()
         self.vm_mgr = VMControlOperation()
index 0682aac..0c994d4 100644 (file)
@@ -16,6 +16,7 @@ LOG = logging.getLogger(__name__)
 
 
 class Plugin(EnvBuilderPlugin):
 
 
 class Plugin(EnvBuilderPlugin):
+
     def __init__(self):
         super(Plugin, self).__init__()
         self.dr_mgr = DriverPluginManager()
     def __init__(self):
         super(Plugin, self).__init__()
         self.dr_mgr = DriverPluginManager()
index 21b8f82..fb6a54c 100644 (file)
@@ -12,6 +12,7 @@ from vstf.common.utils import check_call, get_eth_by_bdf, check_output
 
 
 class BridgePlugin(model.VswitchPlugin):
 
 
 class BridgePlugin(model.VswitchPlugin):
+
     def __init__(self):
         pass
 
     def __init__(self):
         pass
 
@@ -19,7 +20,9 @@ class BridgePlugin(model.VswitchPlugin):
         """clean brs created before.
 
         """
         """clean brs created before.
 
         """
-        out = check_output(r"brctl show | grep -v '^\s' | awk '{print $1}'|sed '1,1d'", shell=True)
+        out = check_output(
+            r"brctl show | grep -v '^\s' | awk '{print $1}'|sed '1,1d'",
+            shell=True)
         print out
         for br in out.split():
             if br != 'br0':
         print out
         for br in out.split():
             if br != 'br0':
index 785a1db..4890ee1 100644 (file)
@@ -11,9 +11,11 @@ import stevedore
 
 
 class VswitchPluginManager(object):
 
 
 class VswitchPluginManager(object):
+
     def __init__(self):
         self.plugin = None
     def __init__(self):
         self.plugin = None
-        self.mgr = stevedore.extension.ExtensionManager(namespace="vswitch.plugins", invoke_on_load=True)
+        self.mgr = stevedore.extension.ExtensionManager(
+            namespace="vswitch.plugins", invoke_on_load=True)
 
     def clean(self):
         if self.plugin:
 
     def clean(self):
         if self.plugin:
index 5d70041..8a80e44 100644 (file)
@@ -64,4 +64,4 @@ class VswitchPlugin:
         pass
 
     def set_fastlink(self, br_cfg):
         pass
 
     def set_fastlink(self, br_cfg):
-        return True
\ No newline at end of file
+        return True
index 7ea56d4..66943c1 100644 (file)
@@ -28,7 +28,8 @@ class OvsPlugin(model.VswitchPlugin):
         self.dirs = {'db': "/usr/local/etc/openvswitch"}
         self.cmds = []
         self.cmds.append("mkdir -p /usr/local/etc/openvswitch")
         self.dirs = {'db': "/usr/local/etc/openvswitch"}
         self.cmds = []
         self.cmds.append("mkdir -p /usr/local/etc/openvswitch")
-        self.cmds.append("ovsdb-tool create /usr/local/etc/openvswitch/conf.db")
+        self.cmds.append(
+            "ovsdb-tool create /usr/local/etc/openvswitch/conf.db")
         self.cmds.append("ovsdb-server --remote=punix:/usr/local/var/run/openvswitch/db.sock \
              --remote=db:Open_vSwitch,Open_vSwitch,manager_options \
              --private-key=db:Open_vSwitch,SSL,private_key \
         self.cmds.append("ovsdb-server --remote=punix:/usr/local/var/run/openvswitch/db.sock \
              --remote=db:Open_vSwitch,Open_vSwitch,manager_options \
              --private-key=db:Open_vSwitch,SSL,private_key \
@@ -81,8 +82,9 @@ class OvsPlugin(model.VswitchPlugin):
         name, uplinks = br_cfg['name'], br_cfg['uplinks']
 
         check_call("ovs-vsctl add-br %s" % (name), shell=True)
         name, uplinks = br_cfg['name'], br_cfg['uplinks']
 
         check_call("ovs-vsctl add-br %s" % (name), shell=True)
-        if br_cfg['vtep']: # vxlan supports
-            local_ip, remote_ip = br_cfg['vtep']['local_ip'], br_cfg['vtep']['remote_ip']
+        if br_cfg['vtep']:  # vxlan supports
+            local_ip, remote_ip = br_cfg['vtep'][
+                'local_ip'], br_cfg['vtep']['remote_ip']
             assert len(uplinks) == 1
             uplink = uplinks[0]
             device = get_eth_by_bdf(uplink['bdf'])
             assert len(uplinks) == 1
             uplink = uplinks[0]
             device = get_eth_by_bdf(uplink['bdf'])
@@ -90,7 +92,9 @@ class OvsPlugin(model.VswitchPlugin):
             vtep = 'vx1'
             check_call("ifconfig %s %s up" % (device, local_ip), shell=True)
             check_call("ovs-vsctl add-port %s %s" % (name, vtep), shell=True)
             vtep = 'vx1'
             check_call("ifconfig %s %s up" % (device, local_ip), shell=True)
             check_call("ovs-vsctl add-port %s %s" % (name, vtep), shell=True)
-            check_call("ovs-vsctl set interface %s type=vxlan options:remote_ip=%s" % (vtep, remote_ip), shell=True)
+            check_call(
+                "ovs-vsctl set interface %s type=vxlan options:remote_ip=%s" %
+                (vtep, remote_ip), shell=True)
         for uplink in uplinks:
             device = get_eth_by_bdf(uplink['bdf'])
             vlan_mode = uplink['vlan_mode']
         for uplink in uplinks:
             device = get_eth_by_bdf(uplink['bdf'])
             vlan_mode = uplink['vlan_mode']
@@ -99,9 +103,13 @@ class OvsPlugin(model.VswitchPlugin):
             call("ethtool -A %s rx off tx off " % device, shell=True)
             check_call("ovs-vsctl add-port %s %s" % (name, device), shell=True)
             if vlan_mode == 'trunk':
             call("ethtool -A %s rx off tx off " % device, shell=True)
             check_call("ovs-vsctl add-port %s %s" % (name, device), shell=True)
             if vlan_mode == 'trunk':
-                check_call("ovs-vsctl set port %s trunks=%s" % (device, vlan_id), shell=True)
+                check_call(
+                    "ovs-vsctl set port %s trunks=%s" %
+                    (device, vlan_id), shell=True)
             elif vlan_mode == 'access':
             elif vlan_mode == 'access':
-                check_call("ovs-vsctl set port %s tag=%s" % (device, vlan_id), shell=True)
+                check_call(
+                    "ovs-vsctl set port %s tag=%s" %
+                    (device, vlan_id), shell=True)
             else:
                 raise Exception("unreconized vlan_mode:%s" % vlan_mode)
         return True
             else:
                 raise Exception("unreconized vlan_mode:%s" % vlan_mode)
         return True
@@ -118,7 +126,8 @@ class OvsPlugin(model.VswitchPlugin):
                         }
 
         """
                         }
 
         """
-        port, vlan_mode, vlan = tap_cfg['tap_name'], tap_cfg['vlan_mode'], tap_cfg['vlan_id']
+        port, vlan_mode, vlan = tap_cfg['tap_name'], tap_cfg[
+            'vlan_mode'], tap_cfg['vlan_id']
         assert vlan_mode in ('access', 'vxlan')
         if int(vlan) > '4095':
             # vxlan setting
         assert vlan_mode in ('access', 'vxlan')
         if int(vlan) > '4095':
             # vxlan setting
@@ -162,15 +171,21 @@ class OvsPlugin(model.VswitchPlugin):
         if vlan_mode == 'vxlan':
             raise Exception("don't support vxlan setting right now.")
         elif vlan_mode == 'trunk':
         if vlan_mode == 'vxlan':
             raise Exception("don't support vxlan setting right now.")
         elif vlan_mode == 'trunk':
-            check_call("ovs-vsctl set port %s trunks=%s" % (port, vlan_id), shell=True)
+            check_call(
+                "ovs-vsctl set port %s trunks=%s" %
+                (port, vlan_id), shell=True)
         else:
         else:
-            check_call("ovs-vsctl set port %s tag=%s" % (port, vlan_id), shell=True)
+            check_call(
+                "ovs-vsctl set port %s tag=%s" %
+                (port, vlan_id), shell=True)
 
     def __fastlink(self, br, p1, p2):
         LOG.info("_fastlink(%s,%s,%s)", br, p1, p2)
         p1 = p1.replace(' ', '')
         p2 = p2.replace(' ', '')
 
     def __fastlink(self, br, p1, p2):
         LOG.info("_fastlink(%s,%s,%s)", br, p1, p2)
         p1 = p1.replace(' ', '')
         p2 = p2.replace(' ', '')
-        bdfs = check_output("lspci |grep Eth | awk '{print $1}'", shell=True).splitlines()
+        bdfs = check_output(
+            "lspci |grep Eth | awk '{print $1}'",
+            shell=True).splitlines()
         if p1 in bdfs:
             p1 = get_eth_by_bdf(p1)
         if p2 in bdfs:
         if p1 in bdfs:
             p1 = get_eth_by_bdf(p1)
         if p2 in bdfs:
@@ -182,6 +197,10 @@ class OvsPlugin(model.VswitchPlugin):
             port_num, interface = s.replace('(', ' ').replace(')', ' ').split()
             ovs_port[interface] = port_num
         pn1, pn2 = ovs_port[p1], ovs_port[p2]
             port_num, interface = s.replace('(', ' ').replace(')', ' ').split()
             ovs_port[interface] = port_num
         pn1, pn2 = ovs_port[p1], ovs_port[p2]
-        check_call("ovs-ofctl add-flow %s in_port=%s,priority=100,action=output:%s" % (br, pn1, pn2), shell=True)
-        check_call("ovs-ofctl add-flow %s in_port=%s,priority=100,action=output:%s" % (br, pn2, pn1), shell=True)
+        check_call(
+            "ovs-ofctl add-flow %s in_port=%s,priority=100,action=output:%s" %
+            (br, pn1, pn2), shell=True)
+        check_call(
+            "ovs-ofctl add-flow %s in_port=%s,priority=100,action=output:%s" %
+            (br, pn2, pn1), shell=True)
         return True
         return True
index df7d24d..83b8d15 100644 (file)
@@ -6,4 +6,3 @@
 # which accompanies this distribution, and is available at
 # http://www.apache.org/licenses/LICENSE-2.0
 ##############################################################################
 # which accompanies this distribution, and is available at
 # http://www.apache.org/licenses/LICENSE-2.0
 ##############################################################################
-
index 30e1de1..2fd20db 100644 (file)
@@ -24,6 +24,7 @@ def run_cmd(cmd, shell=True):
 
 
 class Resource(object):
 
 
 class Resource(object):
+
     def __init__(self):
         super(Resource, self).__init__()
         self.sysfs = "/sys/devices/system/node"
     def __init__(self):
         super(Resource, self).__init__()
         self.sysfs = "/sys/devices/system/node"
@@ -35,16 +36,18 @@ class Resource(object):
             for process_index in xrange(0, len(bin(process_mapping)) - 2):
                 if process_mapping & 1 << process_index != 0:
                     core = self._get_core_id(node, process_index)
             for process_index in xrange(0, len(bin(process_mapping)) - 2):
                 if process_mapping & 1 << process_index != 0:
                     core = self._get_core_id(node, process_index)
-                    if not self.mapping[node].has_key(core):
+                    if core not in self.mapping[node]:
                         self.mapping[node][core] = []
                     self.mapping[node][core].append(process_index)
 
     def _get_process_mapping(self, numa_node):
                         self.mapping[node][core] = []
                     self.mapping[node][core].append(process_index)
 
     def _get_process_mapping(self, numa_node):
-        ret = run_cmd("cat " + self.sysfs + '/' + numa_node + '/cpumap').replace(',', '').lstrip('0')
+        ret = run_cmd("cat " + self.sysfs + '/' + numa_node +
+                      '/cpumap').replace(',', '').lstrip('0')
         return int(ret, 16)
 
     def _get_core_id(self, numa_node, process_index):
         return int(ret, 16)
 
     def _get_core_id(self, numa_node, process_index):
-        cmd = "cat " + self.sysfs + '/' + numa_node + '/cpu' + str(process_index) + '/topology/core_id'
+        cmd = "cat " + self.sysfs + '/' + numa_node + \
+            '/cpu' + str(process_index) + '/topology/core_id'
         return run_cmd(cmd).strip('\n')
 
     def _init_numa(self):
         return run_cmd(cmd).strip('\n')
 
     def _init_numa(self):
@@ -63,6 +66,7 @@ class Resource(object):
 
 
 class Equalizer(Resource):
 
 
 class Equalizer(Resource):
+
     def __init__(self):
         super(Equalizer, self).__init__()
 
     def __init__(self):
         super(Equalizer, self).__init__()
 
index 0c92f97..8a01dfc 100644 (file)
@@ -16,6 +16,7 @@ except ImportError:
 
 
 class GetPhyInfo(object):
 
 
 class GetPhyInfo(object):
+
     def __init__(self):
         pass
 
     def __init__(self):
         pass
 
@@ -46,7 +47,9 @@ class GetPhyInfo(object):
         numa = {}
         num = self._get_numa_num()
         for numa_id in range(0, int(num)):
         numa = {}
         num = self._get_numa_num()
         for numa_id in range(0, int(num)):
-            flag, temp = commands.getstatusoutput('lscpu | grep "NUMA node%s"' % (str(numa_id)))
+            flag, temp = commands.getstatusoutput(
+                'lscpu | grep "NUMA node%s"' %
+                (str(numa_id)))
             try:
                 temp = temp.split(':')[1].split()[0]
             except:
             try:
                 temp = temp.split(':')[1].split()[0]
             except:
@@ -58,7 +61,9 @@ class GetPhyInfo(object):
     def get_nic_numa(self, nic):
         result = {}
         try:
     def get_nic_numa(self, nic):
         result = {}
         try:
-            flag, id = commands.getstatusoutput('cat /sys/class/net/%s/device/numa_node' % (nic))
+            flag, id = commands.getstatusoutput(
+                'cat /sys/class/net/%s/device/numa_node' %
+                (nic))
         except:
             print('get nic numa id failed.')
         return id
         except:
             print('get nic numa id failed.')
         return id
@@ -102,7 +107,9 @@ class GetPhyInfo(object):
 
         # get vhost info
         proc_name = 'vhost-' + _main_pid
 
         # get vhost info
         proc_name = 'vhost-' + _main_pid
-        flag, temp = commands.getstatusoutput('ps -ef | grep %s | grep -v grep' % (proc_name))
+        flag, temp = commands.getstatusoutput(
+            'ps -ef | grep %s | grep -v grep' %
+            (proc_name))
         for line in temp.split('\n'):
             try:
                 vhost = line.split()[1]
         for line in temp.split('\n'):
             try:
                 vhost = line.split()[1]
@@ -134,7 +141,8 @@ class GetPhyInfo(object):
 
     def _get_proc_by_irq(self, irq):
         try:
 
     def _get_proc_by_irq(self, irq):
         try:
-            flag, info = commands.getstatusoutput('ps -ef | grep irq/%s | grep -v grep ' % (irq))
+            flag, info = commands.getstatusoutput(
+                'ps -ef | grep irq/%s | grep -v grep ' % (irq))
             proc_id = info.split('\n')[0].split()[1]
         except:
             print("[ERROR]grep process id failed.")
             proc_id = info.split('\n')[0].split()[1]
         except:
             print("[ERROR]grep process id failed.")
@@ -142,7 +150,8 @@ class GetPhyInfo(object):
 
     def get_nic_interrupt_proc(self, nic):
         _phy_nic_thread = []
 
     def get_nic_interrupt_proc(self, nic):
         _phy_nic_thread = []
-        flag, info = commands.getstatusoutput('cat /proc/interrupts | grep %s' % (nic))
+        flag, info = commands.getstatusoutput(
+            'cat /proc/interrupts | grep %s' % (nic))
         for line in info.split('\n'):
             try:
                 irq_num = line.split(':')[0].split()[0]
         for line in info.split('\n'):
             try:
                 irq_num = line.split(':')[0].split()[0]
index 5a09900..4579c50 100644 (file)
@@ -15,17 +15,24 @@ import re
 # pdb.set_trace()
 
 class Optimize(object):
 # pdb.set_trace()
 
 class Optimize(object):
+
     def __init__(self):
         pass
 
     def bind_cpu(self, cpu_range, thread):
     def __init__(self):
         pass
 
     def bind_cpu(self, cpu_range, thread):
-        flag, num = commands.getstatusoutput('taskset -pc %s %s' % (cpu_range, thread))
+        flag, num = commands.getstatusoutput(
+            'taskset -pc %s %s' %
+            (cpu_range, thread))
         return flag
 
     def catch_thread_info(self):
         return flag
 
     def catch_thread_info(self):
-        thread_info = {'fwd_vhost': None, 'src_recv_irq': None, 'dst_send_irq': None}
+        thread_info = {
+            'fwd_vhost': None,
+            'src_recv_irq': None,
+            'dst_send_irq': None}
         # top -H get the usage info
         # top -H get the usage info
-        flag, threads_usages = commands.getstatusoutput('top -bH -n1 -c -w 2000')
+        flag, threads_usages = commands.getstatusoutput(
+            'top -bH -n1 -c -w 2000')
         line_array = threads_usages.split('\n')
         # get highest vhost line
         for line in line_array:
         line_array = threads_usages.split('\n')
         # get highest vhost line
         for line in line_array:
index df7d24d..83b8d15 100644 (file)
@@ -6,4 +6,3 @@
 # which accompanies this distribution, and is available at
 # http://www.apache.org/licenses/LICENSE-2.0
 ##############################################################################
 # which accompanies this distribution, and is available at
 # http://www.apache.org/licenses/LICENSE-2.0
 ##############################################################################
-
index 5b20363..316cbab 100644 (file)
@@ -18,4 +18,3 @@ def affctl_load(policy):
 def affctl_list():
     cmd = "affctl list"
     return check_output(cmd, shell=True)
 def affctl_list():
     cmd = "affctl list"
     return check_output(cmd, shell=True)
-
index 3105be4..8eca165 100644 (file)
@@ -21,6 +21,7 @@ LOG = logging.getLogger(__name__)
 
 
 class Iperf(object):
 
 
 class Iperf(object):
+
     def __init__(self):
         self._send_processes = []
         self._receive_processes = []
     def __init__(self):
         self._send_processes = []
         self._receive_processes = []
@@ -40,7 +41,10 @@ class Iperf(object):
         cmd = self.format_send_start(**kwargs)
         LOG.debug("cmd:%s", cmd)
 
         cmd = self.format_send_start(**kwargs)
         LOG.debug("cmd:%s", cmd)
 
-        process = subprocess.Popen(cmd.split(), stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+        process = subprocess.Popen(
+            cmd.split(),
+            stdout=subprocess.PIPE,
+            stderr=subprocess.PIPE)
         time.sleep(1)
         ret = process.poll()
         if ret is None:
         time.sleep(1)
         ret = process.poll()
         if ret is None:
@@ -90,7 +94,10 @@ class Iperf(object):
         cmd = self.format_receive_start(**kwargs)
         LOG.debug("cmd:%s", cmd)
 
         cmd = self.format_receive_start(**kwargs)
         LOG.debug("cmd:%s", cmd)
 
-        process = subprocess.Popen(cmd.split(), stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+        process = subprocess.Popen(
+            cmd.split(),
+            stdout=subprocess.PIPE,
+            stderr=subprocess.PIPE)
         time.sleep(1)
         ret = process.poll()
         if ret is None:
         time.sleep(1)
         ret = process.poll()
         if ret is None:
@@ -151,5 +158,8 @@ def unit_test():
 if __name__ == "__main__":
     from vstf.common.log import setup_logging
 
 if __name__ == "__main__":
     from vstf.common.log import setup_logging
 
-    setup_logging(level=logging.DEBUG, log_file="/var/log/vstf-iperf.log", clevel=logging.DEBUG)
+    setup_logging(
+        level=logging.DEBUG,
+        log_file="/var/log/vstf-iperf.log",
+        clevel=logging.DEBUG)
     unit_test()
     unit_test()
index 88a2544..bd9cc97 100644 (file)
@@ -18,6 +18,7 @@ LOG = logging.getLogger(__name__)
 
 
 class Netmap(object):
 
 
 class Netmap(object):
+
     def __init__(self):
         self._send_processes = []
         self._receive_processes = []
     def __init__(self):
         self._send_processes = []
         self._receive_processes = []
@@ -33,7 +34,10 @@ class Netmap(object):
         cmd = self.format_send_start(**kwargs)
         LOG.info("cmd:%s", cmd)
 
         cmd = self.format_send_start(**kwargs)
         LOG.info("cmd:%s", cmd)
 
-        process = my_popen(cmd.split(), stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+        process = my_popen(
+            cmd.split(),
+            stdout=subprocess.PIPE,
+            stderr=subprocess.PIPE)
         self._send_processes.append(process)
         time.sleep(0.5)
 
         self._send_processes.append(process)
         time.sleep(0.5)
 
@@ -89,7 +93,10 @@ class Netmap(object):
         cmd = self.format_receive_start(**kwargs)
         LOG.info("cmd:%s", cmd)
 
         cmd = self.format_receive_start(**kwargs)
         LOG.info("cmd:%s", cmd)
 
-        process = my_popen(cmd.split(), stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+        process = my_popen(
+            cmd.split(),
+            stdout=subprocess.PIPE,
+            stderr=subprocess.PIPE)
         self._receive_processes.append(process)
         time.sleep(0.5)
 
         self._receive_processes.append(process)
         time.sleep(0.5)
 
@@ -164,5 +171,8 @@ def unit_test():
 if __name__ == "__main__":
     from vstf.common.log import setup_logging
 
 if __name__ == "__main__":
     from vstf.common.log import setup_logging
 
-    setup_logging(level=logging.DEBUG, log_file="/var/log/vstf/vstf-netmap.log", clevel=logging.INFO)
+    setup_logging(
+        level=logging.DEBUG,
+        log_file="/var/log/vstf/vstf-netmap.log",
+        clevel=logging.INFO)
     unit_test()
     unit_test()
index c3b7386..9aaaf58 100644 (file)
@@ -16,6 +16,7 @@ LOG = logging.getLogger(__name__)
 
 
 class Netns(object):
 
 
 class Netns(object):
+
     def __init__(self):
         super(Netns, self).__init__()
         self.netns_add_str = "ip netns add %s"
     def __init__(self):
         super(Netns, self).__init__()
         self.netns_add_str = "ip netns add %s"
@@ -74,13 +75,14 @@ class Netns(object):
 
 
 class NetnsManager(object):
 
 
 class NetnsManager(object):
+
     def __init__(self):
         super(NetnsManager, self).__init__()
         self._netns = Netns()
 
     def config_dev(self, netdev):
     def __init__(self):
         super(NetnsManager, self).__init__()
         self._netns = Netns()
 
     def config_dev(self, netdev):
-        ns, device, ip = netdev["namespace"], netdev["iface"], netdev['ip_setting'] if "ip_setting" in netdev else \
-            netdev['ip']
+        ns, device, ip = netdev["namespace"], netdev["iface"], netdev[
+            'ip_setting'] if "ip_setting" in netdev else netdev['ip']
         self._netns.create_namespace(ns)
         self._netns.add_device(ns, device)
         self._netns.config_ip(ns, device, ip)
         self._netns.create_namespace(ns)
         self._netns.add_device(ns, device)
         self._netns.config_ip(ns, device, ip)
index 99f1c90..dac7d64 100644 (file)
@@ -20,6 +20,7 @@ LOG = logging.getLogger(__name__)
 
 
 class Netperf(object):
 
 
 class Netperf(object):
+
     def __init__(self):
         self._send_processes = []
         self._islat = False
     def __init__(self):
         self._send_processes = []
         self._islat = False
@@ -48,7 +49,10 @@ class Netperf(object):
         LOG.info("cmd:%s", cmd)
 
         for _ in range(threads):
         LOG.info("cmd:%s", cmd)
 
         for _ in range(threads):
-            process = my_popen(cmd.split(), stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+            process = my_popen(
+                cmd.split(),
+                stdout=subprocess.PIPE,
+                stderr=subprocess.PIPE)
             self._send_processes.append(process)
         time.sleep(0.5)
         for process in self._send_processes:
             self._send_processes.append(process)
         time.sleep(0.5)
         for process in self._send_processes:
@@ -119,7 +123,10 @@ class Netperf(object):
         cmd = self.format_receive_start(**kwargs)
         LOG.info("cmd:%s", cmd)
 
         cmd = self.format_receive_start(**kwargs)
         LOG.info("cmd:%s", cmd)
 
-        process = my_popen(cmd.split(), stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+        process = my_popen(
+            cmd.split(),
+            stdout=subprocess.PIPE,
+            stderr=subprocess.PIPE)
         time.sleep(0.5)
         ret = process.poll()
         if ret:
         time.sleep(0.5)
         ret = process.poll()
         if ret:
@@ -177,5 +184,8 @@ def unit_test():
 if __name__ == "__main__":
     from vstf.common.log import setup_logging
 
 if __name__ == "__main__":
     from vstf.common.log import setup_logging
 
-    setup_logging(level=logging.DEBUG, log_file="/var/log/vstf/vstf-netperf.log", clevel=logging.DEBUG)
+    setup_logging(
+        level=logging.DEBUG,
+        log_file="/var/log/vstf/vstf-netperf.log",
+        clevel=logging.DEBUG)
     unit_test()
     unit_test()
index 671e1aa..9aff0a0 100644 (file)
@@ -18,6 +18,7 @@ LOG = logging.getLogger(__name__)
 
 
 class Pktgen(object):
 
 
 class Pktgen(object):
+
     def __init__(self):
         utils.modprobe_pktgen()
         self._send_processes = []
     def __init__(self):
         utils.modprobe_pktgen()
         self._send_processes = []
@@ -33,7 +34,11 @@ class Pktgen(object):
 
     def _start(self):
         cmd = 'echo start > /proc/net/pktgen/pgctrl'
 
     def _start(self):
         cmd = 'echo start > /proc/net/pktgen/pgctrl'
-        process = my_popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+        process = my_popen(
+            cmd,
+            shell=True,
+            stdout=subprocess.PIPE,
+            stderr=subprocess.PIPE)
         LOG.info('running pid:%s', process.pid)
         time.sleep(0.5)
         ret = process.poll()
         LOG.info('running pid:%s', process.pid)
         time.sleep(0.5)
         ret = process.poll()
@@ -42,7 +47,8 @@ class Pktgen(object):
             self._send_processes.append(process)
             error_str = "start pktgen send success"
         else:
             self._send_processes.append(process)
             error_str = "start pktgen send success"
         else:
-            error_str = "start pktgen send failed, stdout:%s,stderr:%s" % (process.stdout.read(), process.stderr.read())
+            error_str = "start pktgen send failed, stdout:%s,stderr:%s" % (
+                process.stdout.read(), process.stderr.read())
             LOG.info(error_str)
         return ret, error_str
 
             LOG.info(error_str)
         return ret, error_str
 
@@ -149,5 +155,8 @@ def unit_test():
 if __name__ == "__main__":
     from vstf.common.log import setup_logging
 
 if __name__ == "__main__":
     from vstf.common.log import setup_logging
 
-    setup_logging(level=logging.DEBUG, log_file="/var/log/vstf/vstf-pktgen.log", clevel=logging.DEBUG)
+    setup_logging(
+        level=logging.DEBUG,
+        log_file="/var/log/vstf/vstf-pktgen.log",
+        clevel=logging.DEBUG)
     unit_test()
     unit_test()
index afdf44d..25272d8 100644 (file)
@@ -18,6 +18,7 @@ LOG = logging.getLogger(__name__)
 
 
 class Qperf(object):
 
 
 class Qperf(object):
+
     def __init__(self):
         self._send_processes = []
         self._receive_processes = []
     def __init__(self):
         self._send_processes = []
         self._receive_processes = []
@@ -30,7 +31,10 @@ class Qperf(object):
     def send_start(self, **kwargs):
         cmd = self.format_send_start(**kwargs)
         LOG.info("cmd:%s", cmd)
     def send_start(self, **kwargs):
         cmd = self.format_send_start(**kwargs)
         LOG.info("cmd:%s", cmd)
-        process = my_popen(cmd.split(), stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+        process = my_popen(
+            cmd.split(),
+            stdout=subprocess.PIPE,
+            stderr=subprocess.PIPE)
         time.sleep(0.5)
         ret = process.poll()
         if ret is None:
         time.sleep(0.5)
         ret = process.poll()
         if ret is None:
@@ -76,7 +80,10 @@ class Qperf(object):
         cmd = self.format_receive_start(**kwargs)
         LOG.info("cmd:%s", cmd)
 
         cmd = self.format_receive_start(**kwargs)
         LOG.info("cmd:%s", cmd)
 
-        process = my_popen(cmd.split(), stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+        process = my_popen(
+            cmd.split(),
+            stdout=subprocess.PIPE,
+            stderr=subprocess.PIPE)
         time.sleep(0.5)
         ret = process.poll()
         if ret is None:
         time.sleep(0.5)
         ret = process.poll()
         if ret is None:
@@ -163,5 +170,8 @@ def unit_test():
 if __name__ == "__main__":
     from vstf.common.log import setup_logging
 
 if __name__ == "__main__":
     from vstf.common.log import setup_logging
 
-    setup_logging(level=logging.DEBUG, log_file="/var/log/vstf/vstf-qperf.log", clevel=logging.DEBUG)
+    setup_logging(
+        level=logging.DEBUG,
+        log_file="/var/log/vstf/vstf-qperf.log",
+        clevel=logging.DEBUG)
     unit_test()
     unit_test()
index 0231d5c..72d0082 100644 (file)
@@ -20,13 +20,17 @@ LOG = logging.getLogger(__name__)
 
 
 class Sar(object):
 
 
 class Sar(object):
+
     def __init__(self):
         self.sar_cmd_str = "sar -u %(interval)s"
         self.child_process = {}
 
     def start(self, interval=2):
         cmd = self.sar_cmd_str % {'interval': interval}
     def __init__(self):
         self.sar_cmd_str = "sar -u %(interval)s"
         self.child_process = {}
 
     def start(self, interval=2):
         cmd = self.sar_cmd_str % {'interval': interval}
-        child = my_popen(cmd.split(), stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+        child = my_popen(
+            cmd.split(),
+            stdout=subprocess.PIPE,
+            stderr=subprocess.PIPE)
         time.sleep(1)
         if child.poll() is not None:
             print child.poll()
         time.sleep(1)
         if child.poll() is not None:
             print child.poll()
@@ -55,7 +59,9 @@ class Sar(object):
         data = {}
         for h, d in zip(head, average):
             data[h.strip('%')] = float(d)
         data = {}
         for h, d in zip(head, average):
             data[h.strip('%')] = float(d)
-        cpu_num = check_output('cat /proc/cpuinfo  | grep processor | wc -l', shell=True).strip()
+        cpu_num = check_output(
+            'cat /proc/cpuinfo  | grep processor | wc -l',
+            shell=True).strip()
         data.update({'cpu_num': int(cpu_num)})
         return data
 
         data.update({'cpu_num': int(cpu_num)})
         return data
 
index 4f7ddb6..f9ca46c 100644 (file)
@@ -26,7 +26,9 @@ def get_pid_by_name(process_name):
 
 
 def get_cpu_num():
 
 
 def get_cpu_num():
-    cpu_num = check_output('cat /proc/cpuinfo  | grep processor | wc -l', shell=True).strip()
+    cpu_num = check_output(
+        'cat /proc/cpuinfo  | grep processor | wc -l',
+        shell=True).strip()
     cpu_num = int(cpu_num)
     return cpu_num
 
     cpu_num = int(cpu_num)
     return cpu_num
 
index b12ac1a..49e4f0c 100644 (file)
@@ -19,6 +19,7 @@ LOG = logging.getLogger(__name__)
 
 
 class VnStat(object):
 
 
 class VnStat(object):
+
     def __init__(self):
         self.netns_exec_str = "ip netns exec %s "
         self.vnstat_cmd_str = "vnstat -l -i %s"
     def __init__(self):
         self.netns_exec_str = "ip netns exec %s "
         self.vnstat_cmd_str = "vnstat -l -i %s"
@@ -63,7 +64,9 @@ class VnStat(object):
         m = {}
 
         digits = re.compile(r"\d+\.?\d*")
         m = {}
 
         digits = re.compile(r"\d+\.?\d*")
-        units = re.compile("(?:gib|mib|kib|kbit/s|gbits/s|mbit/s|p/s)", re.IGNORECASE | re.MULTILINE)
+        units = re.compile(
+            "(?:gib|mib|kib|kbit/s|gbits/s|mbit/s|p/s)",
+            re.IGNORECASE | re.MULTILINE)
         units_arr = units.findall(buf)
 
         LOG.debug(units_arr)
         units_arr = units.findall(buf)
 
         LOG.debug(units_arr)
index 8be3c4e..939b12e 100644 (file)
@@ -40,6 +40,7 @@ LOG = logging.getLogger(__name__)
 
 
 class Vstfperf(object):
 
 
 class Vstfperf(object):
+
     def __init__(self):
         for tool in cst.TOOLS:
             obj_name = 'vstf_' + tool
     def __init__(self):
         for tool in cst.TOOLS:
             obj_name = 'vstf_' + tool
@@ -75,7 +76,10 @@ class Vstfperf(object):
 
 def unit_test():
     from vstf.common.log import setup_logging
 
 def unit_test():
     from vstf.common.log import setup_logging
-    setup_logging(level=logging.DEBUG, log_file="/var/log/vstf/vstf-vstfperf.log", clevel=logging.INFO)
+    setup_logging(
+        level=logging.DEBUG,
+        log_file="/var/log/vstf/vstf-vstfperf.log",
+        clevel=logging.INFO)
 
     perf = Vstfperf()
     start = {
 
     perf = Vstfperf()
     start = {
index 6271a09..9ba1e12 100644 (file)
@@ -25,6 +25,7 @@ LOG = logging.getLogger(__name__)
 
 
 class ENV(object):
 
 
 class ENV(object):
+
     def __init__(self):
         super(ENV, self).__init__()
         self.builder = builder.PluginManager()
     def __init__(self):
         super(ENV, self).__init__()
         self.builder = builder.PluginManager()
@@ -45,6 +46,7 @@ class ENV(object):
 
 
 class Drivers(object):
 
 
 class Drivers(object):
+
     def __init__(self):
         super(Drivers, self).__init__()
         self.dr_mgr = DriverPluginManager()
     def __init__(self):
         super(Drivers, self).__init__()
         self.dr_mgr = DriverPluginManager()
@@ -69,6 +71,7 @@ class Drivers(object):
 
 
 class Cpu(object):
 
 
 class Cpu(object):
+
     def affctl_load(self, policy):
         return affctl.affctl_load(policy)
 
     def affctl_load(self, policy):
         return affctl.affctl_load(policy)
 
@@ -77,12 +80,13 @@ class Cpu(object):
 
 
 class Perf(object):
 
 
 class Perf(object):
+
     def __init__(self):
         super(Perf, self).__init__()
         self._vnstat = vnstat.VnStat()
         self._vstfperf = vstfperf.Vstfperf()
         self._sar = sar.Sar()
     def __init__(self):
         super(Perf, self).__init__()
         self._vnstat = vnstat.VnStat()
         self._vstfperf = vstfperf.Vstfperf()
         self._sar = sar.Sar()
-   
+
     def run_vnstat(self, device, namespace=None):
         return self._vnstat.run_vnstat(device, namespace)
 
     def run_vnstat(self, device, namespace=None):
         return self._vnstat.run_vnstat(device, namespace)
 
@@ -92,7 +96,7 @@ class Perf(object):
     def perf_run(self, **kwargs):
         return self._vstfperf.run(**kwargs)
 
     def perf_run(self, **kwargs):
         return self._vstfperf.run(**kwargs)
 
-    def run_cpuwatch(self, interval = 2):
+    def run_cpuwatch(self, interval=2):
         return self._sar.start(interval)
 
     def kill_cpuwatch(self, pid):
         return self._sar.start(interval)
 
     def kill_cpuwatch(self, pid):
@@ -106,6 +110,7 @@ class Perf(object):
 
 
 class EqualizerOps(GetPhyInfo, Optimize):
 
 
 class EqualizerOps(GetPhyInfo, Optimize):
+
     def __init__(self):
         super(EqualizerOps, self).__init__()
 
     def __init__(self):
         super(EqualizerOps, self).__init__()
 
@@ -115,19 +120,20 @@ class BaseAgent(coll.Collect,
                 Cpu,
                 Drivers,
                 DeviceManager,
                 Cpu,
                 Drivers,
                 DeviceManager,
-                commandline.CommandLine, 
+                commandline.CommandLine,
                 netns.NetnsManager,
                 SourceCodeManager
                 ):
                 netns.NetnsManager,
                 SourceCodeManager
                 ):
+
     def __init__(self):
         super(BaseAgent, self).__init__()
 
 
 class softAgent(BaseAgent, Perf, EqualizerOps):
     def __init__(self):
         super(BaseAgent, self).__init__()
 
 
 class softAgent(BaseAgent, Perf, EqualizerOps):
+
     def __init__(self):
         super(softAgent, self).__init__()
 
 
 if __name__ == '__main__':
     softAgent()
     def __init__(self):
         super(softAgent, self).__init__()
 
 
 if __name__ == '__main__':
     softAgent()
-
index 904de73..ff2af1f 100644 (file)
@@ -14,23 +14,26 @@ import Tkinter
 def build_cmd(*args):
     cmd = ''
     for arg in args:
 def build_cmd(*args):
     cmd = ''
     for arg in args:
-        cmd = cmd+str(arg)+' '
+        cmd = cmd + str(arg) + ' '
     #import pdb
     #import pdb
-    #pdb.set_trace()
+    # pdb.set_trace()
     return cmd
 
 
 class stcPython():
     return cmd
 
 
 class stcPython():
+
     def __init__(self):
         self.tclsh = Tkinter.Tcl()
         self.stcpkg = '/home/Spirent_TestCenter_4.46/Spirent_TestCenter_Application_Linux'
     def __init__(self):
         self.tclsh = Tkinter.Tcl()
         self.stcpkg = '/home/Spirent_TestCenter_4.46/Spirent_TestCenter_Application_Linux'
-        self.tclsh.eval("set auto_path [ linsert $auto_path 0 %s ]" %(self.stcpkg))
+        self.tclsh.eval(
+            "set auto_path [ linsert $auto_path 0 %s ]" %
+            (self.stcpkg))
         self.tclsh.eval("package require SpirentTestCenter")
 
     def build_cmd(self, *args):
         cmd = ''
         for arg in args:
         self.tclsh.eval("package require SpirentTestCenter")
 
     def build_cmd(self, *args):
         cmd = ''
         for arg in args:
-            cmd = cmd+str(arg)+' '
+            cmd = cmd + str(arg) + ' '
         return cmd
 
     # [ stc base interface ]
         return cmd
 
     # [ stc base interface ]
@@ -38,206 +41,294 @@ class stcPython():
         cmd = build_cmd('stc::init', *args)
         return self.tclsh.eval(cmd)
     # stc connect
         cmd = build_cmd('stc::init', *args)
         return self.tclsh.eval(cmd)
     # stc connect
-    def stc_connect(self,*args):
+
+    def stc_connect(self, *args):
         cmd = build_cmd('stc::connect', *args)
         return self.tclsh.eval(cmd)
     # stc disconnect
         cmd = build_cmd('stc::connect', *args)
         return self.tclsh.eval(cmd)
     # stc disconnect
-    def stc_disconnect(self,*args):
+
+    def stc_disconnect(self, *args):
         cmd = build_cmd('stc::disconnect', *args)
         return self.tclsh.eval(cmd)
     # stc create
         cmd = build_cmd('stc::disconnect', *args)
         return self.tclsh.eval(cmd)
     # stc create
-    def stc_create(self,*args):
+
+    def stc_create(self, *args):
         cmd = build_cmd('stc::create', *args)
         return self.tclsh.eval(cmd)
     # stc delete
         cmd = build_cmd('stc::create', *args)
         return self.tclsh.eval(cmd)
     # stc delete
-    def stc_delete(self,*args):
+
+    def stc_delete(self, *args):
         cmd = build_cmd('stc::delete', *args)
         return self.tclsh.eval(cmd)
     # stc config
         cmd = build_cmd('stc::delete', *args)
         return self.tclsh.eval(cmd)
     # stc config
-    def stc_config(self,*args):
+
+    def stc_config(self, *args):
         cmd = build_cmd('stc::config', *args)
         return self.tclsh.eval(cmd)
     # stc get
         cmd = build_cmd('stc::config', *args)
         return self.tclsh.eval(cmd)
     # stc get
-    def stc_get(self,*args):
+
+    def stc_get(self, *args):
         cmd = build_cmd('stc::get', *args)
         return self.tclsh.eval(cmd)
     # stc apply
         cmd = build_cmd('stc::get', *args)
         return self.tclsh.eval(cmd)
     # stc apply
-    def stc_apply(self,*args):
+
+    def stc_apply(self, *args):
         cmd = build_cmd('stc::apply', *args)
         return self.tclsh.eval(cmd)
     # stc perform
         cmd = build_cmd('stc::apply', *args)
         return self.tclsh.eval(cmd)
     # stc perform
-    def stc_perform(self,*args):
+
+    def stc_perform(self, *args):
         cmd = build_cmd('stc::perform', *args)
         return self.tclsh.eval(cmd)
     # stc reserve
         cmd = build_cmd('stc::perform', *args)
         return self.tclsh.eval(cmd)
     # stc reserve
-    def stc_reserve(self,*args):
+
+    def stc_reserve(self, *args):
         cmd = build_cmd('stc::reserve', *args)
         return self.tclsh.eval(cmd)
     # stc release
         cmd = build_cmd('stc::reserve', *args)
         return self.tclsh.eval(cmd)
     # stc release
-    def stc_release(self,*args):
+
+    def stc_release(self, *args):
         cmd = build_cmd('stc::release', *args)
         return self.tclsh.eval(cmd)
     # stc subscribe
         cmd = build_cmd('stc::release', *args)
         return self.tclsh.eval(cmd)
     # stc subscribe
-    def stc_subscribe(self,*args):
-        cmd = build_cmd('stc::subscribe',*args)
+
+    def stc_subscribe(self, *args):
+        cmd = build_cmd('stc::subscribe', *args)
         return self.tclsh.eval(cmd)
     # stc unsubscribe
         return self.tclsh.eval(cmd)
     # stc unsubscribe
-    def stc_unsubscribe(self,*args):
+
+    def stc_unsubscribe(self, *args):
         cmd = build_cmd('stc::unsubscribe', *args)
         return self.tclsh.eval(cmd)
     # stc wait until sequencer complete
         cmd = build_cmd('stc::unsubscribe', *args)
         return self.tclsh.eval(cmd)
     # stc wait until sequencer complete
-    def stc_waituntilcomplete(self,*args):
+
+    def stc_waituntilcomplete(self, *args):
         cmd = build_cmd('stc::waituntilcomplete', *args)
         return self.tclsh.eval(cmd)
     # stc help
         cmd = build_cmd('stc::waituntilcomplete', *args)
         return self.tclsh.eval(cmd)
     # stc help
+
     def stc_help(self, *args):
     def stc_help(self, *args):
-        cmd = build_cmd('stc::help',*args)
+        cmd = build_cmd('stc::help', *args)
         return self.tclsh.eval(cmd)
 
     # [ stc expand interface ]
     # get one dict-key's value
     # return value
         return self.tclsh.eval(cmd)
 
     # [ stc expand interface ]
     # get one dict-key's value
     # return value
-    def stc_get_value(self,stc_dict,stc_key):
-        cmd = stc_dict+' -'+stc_key
+    def stc_get_value(self, stc_dict, stc_key):
+        cmd = stc_dict + ' -' + stc_key
         return self.stc_get(cmd)
     # create project
     # return: project_name
         return self.stc_get(cmd)
     # create project
     # return: project_name
+
     def stc_create_project(self):
         return self.stc_create('project')
     # create port under project
     # return: port name
     def stc_create_project(self):
         return self.stc_create('project')
     # create port under project
     # return: port name
-    def stc_create_port(self,project_name):
-        cmd = 'port -under '+project_name
+
+    def stc_create_port(self, project_name):
+        cmd = 'port -under ' + project_name
         return self.stc_create(cmd)
     # config port location
     # return: None
         return self.stc_create(cmd)
     # config port location
     # return: None
-    def stc_config_port_location(self,port_name,chassisAddress,slot,port):
+
+    def stc_config_port_location(self, port_name, chassisAddress, slot, port):
         #import pdb
         #import pdb
-        #pdb.set_trace()
-        cmd = port_name+' -location //'+chassisAddress+'/'+slot+'/'+port+' -UseDefaultHost False'
+        # pdb.set_trace()
+        cmd = port_name + ' -location //' + chassisAddress + \
+            '/' + slot + '/' + port + ' -UseDefaultHost False'
         return self.stc_config(cmd)
     # create streamblock under port
     # return: streamblock name
         return self.stc_config(cmd)
     # create streamblock under port
     # return: streamblock name
-    def stc_create_streamblock(self,port_name,vlan_tag,ExpectedRxPort,srcMac,dstMac,sourceAddr,destAddr):
+
+    def stc_create_streamblock(
+            self,
+            port_name,
+            vlan_tag,
+            ExpectedRxPort,
+            srcMac,
+            dstMac,
+            sourceAddr,
+            destAddr):
         #import pdb
         #import pdb
-        #pdb.set_trace()
-        if vlan_tag == None or vlan_tag == 'None':
+        # pdb.set_trace()
+        if vlan_tag is None or vlan_tag == 'None':
             frameStruc = '"EthernetII IPv4 Udp"'
             frameStruc = '"EthernetII IPv4 Udp"'
-            if ExpectedRxPort == '' :
-                return self.stc_create( 'streamBlock -under ',port_name,
-                                        '-frameConfig ',frameStruc,
-                                        '-frame "EthernetII.srcMac',srcMac,'EthernetII.dstMac',dstMac,
-                                        'IPv4.1.sourceAddr',sourceAddr,'IPv4.1.destAddr',destAddr,'"')
-            else :
-                return self.stc_create( 'streamBlock -under ',port_name,
-                                        '-ExpectedRxPort',ExpectedRxPort,
-                                        '-frameConfig ',frameStruc,
-                                        '-frame "EthernetII.srcMac',srcMac,'EthernetII.dstMac',dstMac,
-                                        'IPv4.1.sourceAddr',sourceAddr,'IPv4.1.destAddr',destAddr,'"')
-        else :
+            if ExpectedRxPort == '':
+                return self.stc_create(
+                    'streamBlock -under ',
+                    port_name,
+                    '-frameConfig ',
+                    frameStruc,
+                    '-frame "EthernetII.srcMac',
+                    srcMac,
+                    'EthernetII.dstMac',
+                    dstMac,
+                    'IPv4.1.sourceAddr',
+                    sourceAddr,
+                    'IPv4.1.destAddr',
+                    destAddr,
+                    '"')
+            else:
+                return self.stc_create(
+                    'streamBlock -under ',
+                    port_name,
+                    '-ExpectedRxPort',
+                    ExpectedRxPort,
+                    '-frameConfig ',
+                    frameStruc,
+                    '-frame "EthernetII.srcMac',
+                    srcMac,
+                    'EthernetII.dstMac',
+                    dstMac,
+                    'IPv4.1.sourceAddr',
+                    sourceAddr,
+                    'IPv4.1.destAddr',
+                    destAddr,
+                    '"')
+        else:
             frameStruc = '"EthernetII Vlan IPv4 Udp"'
             frameStruc = '"EthernetII Vlan IPv4 Udp"'
-            if ExpectedRxPort == '' :
-                return self.stc_create( 'streamBlock -under ',port_name,
-                                        '-frameConfig '+frameStruc,
-                                        '-frame "EthernetII.srcMac',srcMac,'EthernetII.dstMac',dstMac,
-                                        'Vlan.1.id',vlan_tag,
-                                        'IPv4.1.sourceAddr',sourceAddr,'IPv4.1.destAddr',destAddr,'"')
-            else :
-                return self.stc_create( 'streamBlock -under ',port_name,
-                                        '-ExpectedRxPort',ExpectedRxPort,
-                                        '-frameConfig '+frameStruc,
-                                        '-frame "EthernetII.srcMac',srcMac,'EthernetII.dstMac',dstMac,
-                                        'Vlan.1.id',vlan_tag,
-                                        'IPv4.1.sourceAddr',sourceAddr,'IPv4.1.destAddr',destAddr,'"')
+            if ExpectedRxPort == '':
+                return self.stc_create(
+                    'streamBlock -under ',
+                    port_name,
+                    '-frameConfig ' +
+                    frameStruc,
+                    '-frame "EthernetII.srcMac',
+                    srcMac,
+                    'EthernetII.dstMac',
+                    dstMac,
+                    'Vlan.1.id',
+                    vlan_tag,
+                    'IPv4.1.sourceAddr',
+                    sourceAddr,
+                    'IPv4.1.destAddr',
+                    destAddr,
+                    '"')
+            else:
+                return self.stc_create(
+                    'streamBlock -under ',
+                    port_name,
+                    '-ExpectedRxPort',
+                    ExpectedRxPort,
+                    '-frameConfig ' +
+                    frameStruc,
+                    '-frame "EthernetII.srcMac',
+                    srcMac,
+                    'EthernetII.dstMac',
+                    dstMac,
+                    'Vlan.1.id',
+                    vlan_tag,
+                    'IPv4.1.sourceAddr',
+                    sourceAddr,
+                    'IPv4.1.destAddr',
+                    destAddr,
+                    '"')
     # config streamblock with part arguments
     # argument list use args dictionary
     # config streamblock with part arguments
     # argument list use args dictionary
-    def stc_config_streamblock(self,streamblock_name,args_dict):
+
+    def stc_config_streamblock(self, streamblock_name, args_dict):
         cmd = ''
         cmd = ''
-        for key in args_dict.keys() :
-            temp_cmd = '-'+key+' '+str(args_dict[key])
+        for key in args_dict.keys():
+            temp_cmd = '-' + key + ' ' + str(args_dict[key])
             cmd = cmd + temp_cmd
             cmd = cmd + temp_cmd
-        return self.stc_config(streamblock_name,cmd)
+        return self.stc_config(streamblock_name, cmd)
     # get generator name from port name
     # return: generator name
     # get generator name from port name
     # return: generator name
-    def stc_get_generator(self,port_name):
-        cmd = port_name+' -children-generator'
+
+    def stc_get_generator(self, port_name):
+        cmd = port_name + ' -children-generator'
         return self.stc_get(cmd)
     # config generator with part arguments
     # argument list use args dictionary
     # return none
         return self.stc_get(cmd)
     # config generator with part arguments
     # argument list use args dictionary
     # return none
-    def stc_config_generator(self,generator_name,args_dict):
+
+    def stc_config_generator(self, generator_name, args_dict):
         cmd = ''
         cmd = ''
-        for key in args_dict.keys() :
-            temp_cmd = '-'+key+' '+str(args_dict[key])
+        for key in args_dict.keys():
+            temp_cmd = '-' + key + ' ' + str(args_dict[key])
             cmd = cmd + temp_cmd
             cmd = cmd + temp_cmd
-        return self.stc_config(generator_name,cmd)
+        return self.stc_config(generator_name, cmd)
     # attach port
     # return: port's parent project info
     # attach port
     # return: port's parent project info
-    def stc_attach_ports(self,portList):
+
+    def stc_attach_ports(self, portList):
         cmd = 'AttachPorts -portList {'
         cmd = 'AttachPorts -portList {'
-        for port in portList :
-            cmd = cmd+' '+port
-        cmd = cmd+'} -autoConnect TRUE'
+        for port in portList:
+            cmd = cmd + ' ' + port
+        cmd = cmd + '} -autoConnect TRUE'
         return self.stc_perform(cmd)
     # config src mac and dst mac
     # return: none
         return self.stc_perform(cmd)
     # config src mac and dst mac
     # return: none
-    def stc_config_ethII(self,ethII,src_mac,dst_mac):
-        cmd = ethII+' -srcMac '+src_mac+' -dstMac '+dst_mac
+
+    def stc_config_ethII(self, ethII, src_mac, dst_mac):
+        cmd = ethII + ' -srcMac ' + src_mac + ' -dstMac ' + dst_mac
         return self.stc_config(cmd)
     # config src ip and dst ip
     # return: none
         return self.stc_config(cmd)
     # config src ip and dst ip
     # return: none
-    def stc_config_ethIII(self,ethIII,src_ip,dst_ip):
-        cmd = ethIII+' -sourceAddr '+src_ip+' -destAddr '+dst_ip
+
+    def stc_config_ethIII(self, ethIII, src_ip, dst_ip):
+        cmd = ethIII + ' -sourceAddr ' + src_ip + ' -destAddr ' + dst_ip
         return self.stc_config(cmd)
     # start streamblock
     # return: none
         return self.stc_config(cmd)
     # start streamblock
     # return: none
-    def stc_streamblock_start(self,streamblock_list):
+
+    def stc_streamblock_start(self, streamblock_list):
         cmd = 'StreamBlockStart -StreamBlockList {'
         cmd = 'StreamBlockStart -StreamBlockList {'
-        for streamblock in streamblock_list :
-            cmd = cmd+' '+streamblock
-        cmd = cmd+' } -ExecuteSynchronous TRUE'
+        for streamblock in streamblock_list:
+            cmd = cmd + ' ' + streamblock
+        cmd = cmd + ' } -ExecuteSynchronous TRUE'
         return self.stc_perform(cmd)
     # stop streamblock
         return self.stc_perform(cmd)
     # stop streamblock
-    def stc_streamblock_stop(self,streamblock_list):
+
+    def stc_streamblock_stop(self, streamblock_list):
         cmd = 'StreamBlockStop -StreamBlockList {'
         cmd = 'StreamBlockStop -StreamBlockList {'
-        for streamblock in streamblock_list :
-            cmd = cmd+' '+streamblock
-        cmd = cmd+' } -ExecuteSynchronous TRUE'
+        for streamblock in streamblock_list:
+            cmd = cmd + ' ' + streamblock
+        cmd = cmd + ' } -ExecuteSynchronous TRUE'
         return self.stc_perform(cmd)
     # start generator
     # return: none
         return self.stc_perform(cmd)
     # start generator
     # return: none
-    def stc_generator_start(self,generator_List):
+
+    def stc_generator_start(self, generator_List):
         cmd = 'GeneratorStart -generatorList {'
         cmd = 'GeneratorStart -generatorList {'
-        for generator in generator_List :
-            cmd = cmd+' '+generator
-        cmd = cmd+' }'
+        for generator in generator_List:
+            cmd = cmd + ' ' + generator
+        cmd = cmd + ' }'
         return self.stc_perform(cmd)
     # stop generator
     # return: none
         return self.stc_perform(cmd)
     # stop generator
     # return: none
-    def stc_generator_stop(self,generator_List):
+
+    def stc_generator_stop(self, generator_List):
         cmd = 'GeneratorStop -generatorList {'
         cmd = 'GeneratorStop -generatorList {'
-        for generator in generator_List :
-            cmd = cmd+' '+generator
-        cmd = cmd+' }'
+        for generator in generator_List:
+            cmd = cmd + ' ' + generator
+        cmd = cmd + ' }'
         return self.stc_perform(cmd)
     # create rfc2544 throughput test
         return self.stc_perform(cmd)
     # create rfc2544 throughput test
+
     def stc_setup_rfc2544_throughput(self):
         pass
     # create rfc2544 frameloss test
     def stc_setup_rfc2544_throughput(self):
         pass
     # create rfc2544 frameloss test
+
     def stc_setup_rfc2544_frameloss(self):
         pass
     # create rfc2544 latency test
     def stc_setup_rfc2544_frameloss(self):
         pass
     # create rfc2544 latency test
+
     def stc_setup_rfc2544_latency(self):
         pass
     # start Sequence start
     def stc_setup_rfc2544_latency(self):
         pass
     # start Sequence start
+
     def stc_sequence_start(self):
         return self.stc_perform('SequencerStart')
     # output rfc2544 throughput result
     def stc_sequence_start(self):
         return self.stc_perform('SequencerStart')
     # output rfc2544 throughput result
+
     def stc_get_rfc2544_throughput_result(self):
         pass
     # output rfc2544 frameloss result
     def stc_get_rfc2544_throughput_result(self):
         pass
     # output rfc2544 frameloss result
+
     def stc_get_rfc2544_frameloss_result(self):
         pass
     # output rfc2544 latency result
     def stc_get_rfc2544_frameloss_result(self):
         pass
     # output rfc2544 latency result
+
     def stc_get_rfc2544_latency_result(self):
         pass
     def stc_get_rfc2544_latency_result(self):
         pass
index 088a7b1..0936d39 100644 (file)
 import time
 from spirent import stcPython
 
 import time
 from spirent import stcPython
 
+
 class Spirent_Tools(object):
     baseAPI = stcPython()
 class Spirent_Tools(object):
     baseAPI = stcPython()
+
     def __init__(self):
         """This class provide API of Spirent
     def __init__(self):
         """This class provide API of Spirent
-        
+
         """
         super(Spirent_Tools, self).__init__()
         """
         super(Spirent_Tools, self).__init__()
-    
-    def send_packet(self,flow):
+
+    def send_packet(self, flow):
         try:
             #import pdb
         try:
             #import pdb
-            #pdb.set_trace()
+            # pdb.set_trace()
             flow = eval(flow)
             flow = eval(flow)
-            #stc init action
+            # stc init action
             self.baseAPI.stc_perform(' ResetConfig -config system1')
             self.baseAPI.stc_init()
             self.baseAPI.stc_perform(' ResetConfig -config system1')
             self.baseAPI.stc_init()
-            #create project
+            # create project
             project = self.baseAPI.stc_create_project()
             project = self.baseAPI.stc_create_project()
-            #create port
+            # create port
             port_handle = self.baseAPI.stc_create_port(project)
             port_handle = self.baseAPI.stc_create_port(project)
-            #config port
+            # config port
             slot = flow['send_port'].split('/')[0]
             port = flow['send_port'].split('/')[1]
             slot = flow['send_port'].split('/')[0]
             port = flow['send_port'].split('/')[1]
-            self.baseAPI.stc_config_port_location(port_handle,flow['tester_ip'],slot,port)
-            #create streamblock
+            self.baseAPI.stc_config_port_location(
+                port_handle, flow['tester_ip'], slot, port)
+            # create streamblock
             streamblock_handle = self.baseAPI.stc_create_streamblock(
             streamblock_handle = self.baseAPI.stc_create_streamblock(
-                                                                 port_name  = port_handle,
-                                                                 ExpectedRxPort = '',
-                                                                 vlan_tag = flow['vlan'],
-                                                                 srcMac = flow['src_mac'],
-                                                                 dstMac = flow['dst_mac'],
-                                                                 sourceAddr = flow['src_ip'],
-                                                                 destAddr =flow['dst_ip']
-                                                                 )
+                port_name=port_handle,
+                ExpectedRxPort='',
+                vlan_tag=flow['vlan'],
+                srcMac=flow['src_mac'],
+                dstMac=flow['dst_mac'],
+                sourceAddr=flow['src_ip'],
+                destAddr=flow['dst_ip']
+            )
             # attach port
             port_list = [port_handle]
             self.baseAPI.stc_attach_ports(port_list)
             # attach port
             port_list = [port_handle]
             self.baseAPI.stc_attach_ports(port_list)
-            #start streamblock
+            # start streamblock
             streamblock_list = [streamblock_handle]
             flag = self.baseAPI.stc_streamblock_start(streamblock_list)
             return str(streamblock_list).strip('[]')
             streamblock_list = [streamblock_handle]
             flag = self.baseAPI.stc_streamblock_start(streamblock_list)
             return str(streamblock_list).strip('[]')
-        except :
+        except:
             print("[ERROR]create stream block and send packet failed.")
             return False
 
             print("[ERROR]create stream block and send packet failed.")
             return False
 
-    def mac_learning(self,flowA,flowB):
+    def mac_learning(self, flowA, flowB):
         try:
             #import pdb
         try:
             #import pdb
-            #pdb.set_trace()
+            # pdb.set_trace()
             flowA = eval(flowA)
             flowB = eval(flowB)
             port_list = []
             streamblock_list = []
             flowA = eval(flowA)
             flowB = eval(flowB)
             port_list = []
             streamblock_list = []
-            #stc init action
+            # stc init action
             self.baseAPI.stc_perform(' ResetConfig -config system1')
             self.baseAPI.stc_init()
             self.baseAPI.stc_perform(' ResetConfig -config system1')
             self.baseAPI.stc_init()
-            #create project
+            # create project
             project = self.baseAPI.stc_create_project()
             project = self.baseAPI.stc_create_project()
-            #create port and config port 
-            for flow in [ flowA,flowB ]:
+            # create port and config port
+            for flow in [flowA, flowB]:
                 flow['port_handle'] = self.baseAPI.stc_create_port(project)
                 tmp_test_ip = flow['tester_ip']
                 tmp_slot = flow['send_port'].split('/')[0]
                 tmp_port = flow['send_port'].split('/')[1]
                 flow['port_handle'] = self.baseAPI.stc_create_port(project)
                 tmp_test_ip = flow['tester_ip']
                 tmp_slot = flow['send_port'].split('/')[0]
                 tmp_port = flow['send_port'].split('/')[1]
-                self.baseAPI.stc_config_port_location(flow['port_handle'],tmp_test_ip,tmp_slot,tmp_port)
-                #create streamblock
-                flow['streamblock'] = self.baseAPI.stc_create_streamblock(port_name  = flow['port_handle'],
-                                                                     ExpectedRxPort = '',
-                                                                     vlan_tag = flow['vlan'],
-                                                                     srcMac = flow['src_mac'],
-                                                                     dstMac = flow['dst_mac'],
-                                                                     sourceAddr = flow['src_ip'],
-                                                                     destAddr =flow['dst_ip'])
-                #create port and stream block list
+                self.baseAPI.stc_config_port_location(
+                    flow['port_handle'], tmp_test_ip, tmp_slot, tmp_port)
+                # create streamblock
+                flow['streamblock'] = self.baseAPI.stc_create_streamblock(
+                    port_name=flow['port_handle'],
+                    ExpectedRxPort='',
+                    vlan_tag=flow['vlan'],
+                    srcMac=flow['src_mac'],
+                    dstMac=flow['dst_mac'],
+                    sourceAddr=flow['src_ip'],
+                    destAddr=flow['dst_ip'])
+                # create port and stream block list
                 port_list.append(flow['port_handle'])
                 streamblock_list.append(flow['streamblock'])
 
                 port_list.append(flow['port_handle'])
                 streamblock_list.append(flow['streamblock'])
 
-            #attach port
+            # attach port
             self.baseAPI.stc_attach_ports(port_list)
             self.baseAPI.stc_attach_ports(port_list)
-            #start streamblock
+            # start streamblock
             flag = self.baseAPI.stc_streamblock_start(streamblock_list)
             # mac learning
             time.sleep(2)
             # stop stream block
             self.baseAPI.stc_streamblock_stop(streamblock_list)
             # delete streamblock and release port
             flag = self.baseAPI.stc_streamblock_start(streamblock_list)
             # mac learning
             time.sleep(2)
             # stop stream block
             self.baseAPI.stc_streamblock_stop(streamblock_list)
             # delete streamblock and release port
-            for flow in [ flowA,flowB ]:
+            for flow in [flowA, flowB]:
                 tmp_test_ip = flow['tester_ip']
                 tmp_slot = flow['send_port'].split('/')[0]
                 tmp_port = flow['send_port'].split('/')[1]
                 self.baseAPI.stc_delete(flow['streamblock'])
                 tmp_test_ip = flow['tester_ip']
                 tmp_slot = flow['send_port'].split('/')[0]
                 tmp_port = flow['send_port'].split('/')[1]
                 self.baseAPI.stc_delete(flow['streamblock'])
-                self.baseAPI.stc_release('%s/%s/%s' %(tmp_test_ip,tmp_slot,tmp_port))
+                self.baseAPI.stc_release(
+                    '%s/%s/%s' %
+                    (tmp_test_ip, tmp_slot, tmp_port))
             # delete project
             self.baseAPI.stc_delete('project1')
             ret = self.baseAPI.stc_perform('ResetConfig -config system1')
             return True
             # delete project
             self.baseAPI.stc_delete('project1')
             ret = self.baseAPI.stc_perform('ResetConfig -config system1')
             return True
-        except :
+        except:
             print("[ERROR]mac learning failed")
             return False
 
             print("[ERROR]mac learning failed")
             return False
 
-    def stop_flow(self,streamblock_list,flow):
+    def stop_flow(self, streamblock_list, flow):
         flow = eval(flow)
         streamblock_list = streamblock_list.strip('\'').split(',')
         flow = eval(flow)
         streamblock_list = streamblock_list.strip('\'').split(',')
-        #stop streamblock list
-        try :
+        # stop streamblock list
+        try:
             ret = self.baseAPI.stc_streamblock_stop(streamblock_list)
             ret = self.baseAPI.stc_streamblock_stop(streamblock_list)
-        except :
+        except:
             print("[ERROR]Stop the streamblock list failed.")
             return False
             print("[ERROR]Stop the streamblock list failed.")
             return False
-        #delete streamblock
-        try :
-            for streamblock in streamblock_list :
+        # delete streamblock
+        try:
+            for streamblock in streamblock_list:
                 ret = self.baseAPI.stc_delete(streamblock)
                 ret = self.baseAPI.stc_delete(streamblock)
-        except :
+        except:
             print("[ERROR]delete stream block.")
             return False
             print("[ERROR]delete stream block.")
             return False
-        #release port
-        try :
+        # release port
+        try:
             slot = flow['send_port'].split('/')[0]
             port = flow['send_port'].split('/')[1]
             slot = flow['send_port'].split('/')[0]
             port = flow['send_port'].split('/')[1]
-            ret = self.baseAPI.stc_release('%s/%s/%s' %(flow['tester_ip'],slot,port))
-        except :
+            ret = self.baseAPI.stc_release(
+                '%s/%s/%s' %
+                (flow['tester_ip'], slot, port))
+        except:
             print("[ERROR]Release port failed")
             return False
             print("[ERROR]Release port failed")
             return False
-        ##delete project
-        try :
+        # delete project
+        try:
             ret = self.baseAPI.stc_delete('project1')
             ret = self.baseAPI.stc_perform('ResetConfig -config system1')
             return True
             ret = self.baseAPI.stc_delete('project1')
             ret = self.baseAPI.stc_perform('ResetConfig -config system1')
             return True
-        except :
+        except:
             print("[ERROR]Delete project1 failed.")
             return False
             print("[ERROR]Delete project1 failed.")
             return False
-        
-    def run_rfc2544_throughput(self,forward_init_flows,reverse_init_flows):
+
+    def run_rfc2544_throughput(self, forward_init_flows, reverse_init_flows):
         #import pdb
         #import pdb
-        #pdb.set_trace()
-        #rebuild the flows 
+        # pdb.set_trace()
+        # rebuild the flows
         forward_init_flows = eval(forward_init_flows)
         reverse_init_flows = eval(reverse_init_flows)
         forward_init_flows = eval(forward_init_flows)
         reverse_init_flows = eval(reverse_init_flows)
-        #stc init action
+        # stc init action
         self.baseAPI.stc_perform(' ResetConfig -config system1')
         self.baseAPI.stc_init()
         self.baseAPI.stc_perform(' ResetConfig -config system1')
         self.baseAPI.stc_init()
-        #create project 
+        # create project
         project = self.baseAPI.stc_create_project()
         project = self.baseAPI.stc_create_project()
-        #create sequencer
-        seq_handle = self.baseAPI.stc_create('Sequencer -under %s' %(project))
-        #create port handle
+        # create sequencer
+        seq_handle = self.baseAPI.stc_create('Sequencer -under %s' % (project))
+        # create port handle
         forward_port_handle = self.baseAPI.stc_create_port(project)
         reverse_port_handle = self.baseAPI.stc_create_port(project)
         forward_port_handle = self.baseAPI.stc_create_port(project)
         reverse_port_handle = self.baseAPI.stc_create_port(project)
-        #create forward flow streamblock
+        # create forward flow streamblock
         for key in forward_init_flows.keys():
             forward_init_flows[key]['port_handle'] = forward_port_handle
             tmp_test_ip = forward_init_flows[key]['tester_ip']
         for key in forward_init_flows.keys():
             forward_init_flows[key]['port_handle'] = forward_port_handle
             tmp_test_ip = forward_init_flows[key]['tester_ip']
-            tmp_slot    = forward_init_flows[key]['send_port'].split('/')[0]
-            tmp_port    = forward_init_flows[key]['send_port'].split('/')[1]
-            self.baseAPI.stc_config_port_location(forward_init_flows[key]['port_handle'],tmp_test_ip,tmp_slot,tmp_port)
-            #create streamblock
-            forward_init_flows[key]['streamblock'] = self.baseAPI.stc_create_streamblock(port_name  = forward_init_flows[key]['port_handle'],
-                                                                                     vlan_tag   = forward_init_flows[key]['vlan'],
-                                                                                     ExpectedRxPort = reverse_port_handle,
-                                                                                     srcMac     = forward_init_flows[key]['src_mac'],
-                                                                                     dstMac     = forward_init_flows[key]['dst_mac'],
-                                                                                     sourceAddr = forward_init_flows[key]['src_ip'],
-                                                                                     destAddr   = forward_init_flows[key]['dst_ip'])
-        #create reverse flow streamblock
+            tmp_slot = forward_init_flows[key]['send_port'].split('/')[0]
+            tmp_port = forward_init_flows[key]['send_port'].split('/')[1]
+            self.baseAPI.stc_config_port_location(
+                forward_init_flows[key]['port_handle'], tmp_test_ip, tmp_slot, tmp_port)
+            # create streamblock
+            forward_init_flows[key]['streamblock'] = self.baseAPI.stc_create_streamblock(
+                port_name=forward_init_flows[key]['port_handle'],
+                vlan_tag=forward_init_flows[key]['vlan'],
+                ExpectedRxPort=reverse_port_handle,
+                srcMac=forward_init_flows[key]['src_mac'],
+                dstMac=forward_init_flows[key]['dst_mac'],
+                sourceAddr=forward_init_flows[key]['src_ip'],
+                destAddr=forward_init_flows[key]['dst_ip'])
+        # create reverse flow streamblock
         for key in reverse_init_flows.keys():
             reverse_init_flows[key]['port_handle'] = reverse_port_handle
             tmp_test_ip = reverse_init_flows[key]['tester_ip']
         for key in reverse_init_flows.keys():
             reverse_init_flows[key]['port_handle'] = reverse_port_handle
             tmp_test_ip = reverse_init_flows[key]['tester_ip']
-            tmp_slot    = reverse_init_flows[key]['send_port'].split('/')[0]
-            tmp_port    = reverse_init_flows[key]['send_port'].split('/')[1]
-            self.baseAPI.stc_config_port_location(reverse_init_flows[key]['port_handle'],tmp_test_ip,tmp_slot,tmp_port)
-            #create streamblock
-            reverse_init_flows[key]['streamblock'] = self.baseAPI.stc_create_streamblock(port_name  = reverse_init_flows[key]['port_handle'],
-                                                                                     vlan_tag   = reverse_init_flows[key]['vlan'],
-                                                                                     ExpectedRxPort = forward_port_handle,
-                                                                                     srcMac     = reverse_init_flows[key]['src_mac'],
-                                                                                     dstMac     = reverse_init_flows[key]['dst_mac'],
-                                                                                     sourceAddr = reverse_init_flows[key]['src_ip'],
-                                                                                     destAddr   = reverse_init_flows[key]['dst_ip'])
-        #Create the RFC 2544 throughput test
-        throughput_config = self.baseAPI.stc_create('Rfc2544ThroughputConfig -under ',project,
-                                                '-AcceptableFrameLoss 0.01',
-                                                '-NumOfTrials 1',
-                                                '-DurationSeconds 60',
-                                                '-SearchMode BINARY',
-                                                '-RateLowerLimit 1',
-                                                '-RateUpperLimit 100',
-                                                '-RateInitial 10',
-                                                '-UseExistingStreamBlocks True',
-                                                '-EnableLearning False',
-                                                '-FrameSizeIterationMode CUSTOM',
-                                                '-CustomFrameSizeList "70 128 256 512 1024 1280 1518"',
-                                                '-LatencyType LIFO',
-                                                '-EnableJitterMeasurement TRUE'
-                                                )
+            tmp_slot = reverse_init_flows[key]['send_port'].split('/')[0]
+            tmp_port = reverse_init_flows[key]['send_port'].split('/')[1]
+            self.baseAPI.stc_config_port_location(
+                reverse_init_flows[key]['port_handle'], tmp_test_ip, tmp_slot, tmp_port)
+            # create streamblock
+            reverse_init_flows[key]['streamblock'] = self.baseAPI.stc_create_streamblock(
+                port_name=reverse_init_flows[key]['port_handle'],
+                vlan_tag=reverse_init_flows[key]['vlan'],
+                ExpectedRxPort=forward_port_handle,
+                srcMac=reverse_init_flows[key]['src_mac'],
+                dstMac=reverse_init_flows[key]['dst_mac'],
+                sourceAddr=reverse_init_flows[key]['src_ip'],
+                destAddr=reverse_init_flows[key]['dst_ip'])
+        # Create the RFC 2544 throughput test
+        throughput_config = self.baseAPI.stc_create(
+            'Rfc2544ThroughputConfig -under ',
+            project,
+            '-AcceptableFrameLoss 0.01',
+            '-NumOfTrials 1',
+            '-DurationSeconds 60',
+            '-SearchMode BINARY',
+            '-RateLowerLimit 1',
+            '-RateUpperLimit 100',
+            '-RateInitial 10',
+            '-UseExistingStreamBlocks True',
+            '-EnableLearning False',
+            '-FrameSizeIterationMode CUSTOM',
+            '-CustomFrameSizeList "70 128 256 512 1024 1280 1518"',
+            '-LatencyType LIFO',
+            '-EnableJitterMeasurement TRUE')
         #import pdb
         #import pdb
-        #pdb.set_trace()
+        # pdb.set_trace()
         # list streamblocks
         streamblock_list = '" '
         for key in forward_init_flows.keys():
         # list streamblocks
         streamblock_list = '" '
         for key in forward_init_flows.keys():
-            streamblock_list = streamblock_list+forward_init_flows[key]['streamblock']+' '
+            streamblock_list = streamblock_list + \
+                forward_init_flows[key]['streamblock'] + ' '
         for key in reverse_init_flows.keys():
         for key in reverse_init_flows.keys():
-            streamblock_list = streamblock_list+reverse_init_flows[key]['streamblock']+' '
-        streamblock_list = streamblock_list+'"'
+            streamblock_list = streamblock_list + \
+                reverse_init_flows[key]['streamblock'] + ' '
+        streamblock_list = streamblock_list + '"'
 
 
-        throughput_sbProfile= self.baseAPI.stc_create('Rfc2544StreamBlockProfile -under '+throughput_config+' -Active TRUE -LocalActive TRUE')
-        self.baseAPI.stc_config(throughput_sbProfile,'-StreamBlockList '+streamblock_list)
-        self.baseAPI.stc_perform('ExpandBenchmarkConfigCommand','-config ',throughput_config)
+        throughput_sbProfile = self.baseAPI.stc_create(
+            'Rfc2544StreamBlockProfile -under ' +
+            throughput_config +
+            ' -Active TRUE -LocalActive TRUE')
+        self.baseAPI.stc_config(
+            throughput_sbProfile,
+            '-StreamBlockList ' +
+            streamblock_list)
+        self.baseAPI.stc_perform(
+            'ExpandBenchmarkConfigCommand',
+            '-config ',
+            throughput_config)
 
 
-        #attach the port before testing
-        port_list = [ forward_port_handle,reverse_port_handle]
+        # attach the port before testing
+        port_list = [forward_port_handle, reverse_port_handle]
         self.baseAPI.stc_attach_ports(port_list)
 
         self.baseAPI.stc_attach_ports(port_list)
 
-        #stc apply and begin to sequence test
+        # stc apply and begin to sequence test
         self.baseAPI.stc_apply()
         self.baseAPI.stc_perform("SequencerStart")
 
         self.baseAPI.stc_apply()
         self.baseAPI.stc_perform("SequencerStart")
 
-        #wait until complete
+        # wait until complete
         self.baseAPI.stc_waituntilcomplete()
         self.baseAPI.stc_waituntilcomplete()
-        
-        #get result db
-        resultsdb = self.baseAPI.stc_get("system1.project.TestResultSetting", "-CurrentResultFileName")
-        results_dict = self.baseAPI.stc_perform('QueryResult','-DatabaseConnectionString',resultsdb,'-ResultPath RFC2544ThroughputTestResultDetailedSummaryView')
-        #print results_dict
-        return True,results_dict
 
 
-    def run_rfc2544_frameloss(self,forward_init_flows,reverse_init_flows):
+        # get result db
+        resultsdb = self.baseAPI.stc_get(
+            "system1.project.TestResultSetting",
+            "-CurrentResultFileName")
+        results_dict = self.baseAPI.stc_perform(
+            'QueryResult',
+            '-DatabaseConnectionString',
+            resultsdb,
+            '-ResultPath RFC2544ThroughputTestResultDetailedSummaryView')
+        # print results_dict
+        return True, results_dict
+
+    def run_rfc2544_frameloss(self, forward_init_flows, reverse_init_flows):
         #import pdb
         #import pdb
-        #pdb.set_trace()
-        #rebuild the flows
+        # pdb.set_trace()
+        # rebuild the flows
         forward_init_flows = eval(forward_init_flows)
         reverse_init_flows = eval(reverse_init_flows)
         forward_init_flows = eval(forward_init_flows)
         reverse_init_flows = eval(reverse_init_flows)
-        #stc init action
+        # stc init action
         self.baseAPI.stc_perform(' ResetConfig -config system1')
         self.baseAPI.stc_init()
         self.baseAPI.stc_perform(' ResetConfig -config system1')
         self.baseAPI.stc_init()
-        #create project
+        # create project
         project = self.baseAPI.stc_create_project()
         project = self.baseAPI.stc_create_project()
-        #create sequencer
-        seq_handle = self.baseAPI.stc_create('Sequencer -under %s' %(project))
-        #create port handle
+        # create sequencer
+        seq_handle = self.baseAPI.stc_create('Sequencer -under %s' % (project))
+        # create port handle
         forward_port_handle = self.baseAPI.stc_create_port(project)
         reverse_port_handle = self.baseAPI.stc_create_port(project)
         forward_port_handle = self.baseAPI.stc_create_port(project)
         reverse_port_handle = self.baseAPI.stc_create_port(project)
-        #create forward flow streamblock
+        # create forward flow streamblock
         for key in forward_init_flows.keys():
             forward_init_flows[key]['port_handle'] = forward_port_handle
             tmp_test_ip = forward_init_flows[key]['tester_ip']
         for key in forward_init_flows.keys():
             forward_init_flows[key]['port_handle'] = forward_port_handle
             tmp_test_ip = forward_init_flows[key]['tester_ip']
-            tmp_slot    = forward_init_flows[key]['send_port'].split('/')[0]
-            tmp_port    = forward_init_flows[key]['send_port'].split('/')[1]
-            self.baseAPI.stc_config_port_location(forward_init_flows[key]['port_handle'],tmp_test_ip,tmp_slot,tmp_port)
-            #create streamblock
-            forward_init_flows[key]['streamblock'] = self.baseAPI.stc_create_streamblock(port_name  = forward_init_flows[key]['port_handle'],
-                                                                                     vlan_tag   = forward_init_flows[key]['vlan'],
-                                                                                     ExpectedRxPort = reverse_port_handle,
-                                                                                     srcMac     = forward_init_flows[key]['src_mac'],
-                                                                                     dstMac     = forward_init_flows[key]['dst_mac'],
-                                                                                     sourceAddr = forward_init_flows[key]['src_ip'],
-                                                                                     destAddr   = forward_init_flows[key]['dst_ip'])
-        #create reverse flow streamblock
+            tmp_slot = forward_init_flows[key]['send_port'].split('/')[0]
+            tmp_port = forward_init_flows[key]['send_port'].split('/')[1]
+            self.baseAPI.stc_config_port_location(
+                forward_init_flows[key]['port_handle'], tmp_test_ip, tmp_slot, tmp_port)
+            # create streamblock
+            forward_init_flows[key]['streamblock'] = self.baseAPI.stc_create_streamblock(
+                port_name=forward_init_flows[key]['port_handle'],
+                vlan_tag=forward_init_flows[key]['vlan'],
+                ExpectedRxPort=reverse_port_handle,
+                srcMac=forward_init_flows[key]['src_mac'],
+                dstMac=forward_init_flows[key]['dst_mac'],
+                sourceAddr=forward_init_flows[key]['src_ip'],
+                destAddr=forward_init_flows[key]['dst_ip'])
+        # create reverse flow streamblock
         for key in reverse_init_flows.keys():
             reverse_init_flows[key]['port_handle'] = reverse_port_handle
             tmp_test_ip = reverse_init_flows[key]['tester_ip']
         for key in reverse_init_flows.keys():
             reverse_init_flows[key]['port_handle'] = reverse_port_handle
             tmp_test_ip = reverse_init_flows[key]['tester_ip']
-            tmp_slot    = reverse_init_flows[key]['send_port'].split('/')[0]
-            tmp_port    = reverse_init_flows[key]['send_port'].split('/')[1]
-            self.baseAPI.stc_config_port_location(reverse_init_flows[key]['port_handle'],tmp_test_ip,tmp_slot,tmp_port)
-            #create streamblock
-            reverse_init_flows[key]['streamblock'] = self.baseAPI.stc_create_streamblock(port_name  = reverse_init_flows[key]['port_handle'],
-                                                                                     vlan_tag   = reverse_init_flows[key]['vlan'],
-                                                                                     ExpectedRxPort = forward_port_handle,
-                                                                                     srcMac     = reverse_init_flows[key]['src_mac'],
-                                                                                     dstMac     = reverse_init_flows[key]['dst_mac'],
-                                                                                     sourceAddr = reverse_init_flows[key]['src_ip'],
-                                                                                     destAddr   = reverse_init_flows[key]['dst_ip'])
-        #Create the RFC 2544 frameloss test
-        frameloss_config = self.baseAPI.stc_create('Rfc2544FrameLossConfig -under ',project,
-                                                '-NumOfTrials 1 ',
-                                                '-DurationSeconds 60 ',
-                                                '-LoadUnits PERCENT_LINE_RATE ',
-                                                '-LoadType CUSTOM '
-                                                '-CustomLoadList 100 '
-                                                '-UseExistingStreamBlocks True ',
-                                                '-EnableLearning False ',
-                                                '-FrameSizeIterationMode CUSTOM ',
-                                                '-CustomFrameSizeList "70 128 256 512 1024 1280 1518"',
-                                                '-LatencyType LIFO',
-                                                '-EnableJitterMeasurement TRUE'
-                                                )
+            tmp_slot = reverse_init_flows[key]['send_port'].split('/')[0]
+            tmp_port = reverse_init_flows[key]['send_port'].split('/')[1]
+            self.baseAPI.stc_config_port_location(
+                reverse_init_flows[key]['port_handle'], tmp_test_ip, tmp_slot, tmp_port)
+            # create streamblock
+            reverse_init_flows[key]['streamblock'] = self.baseAPI.stc_create_streamblock(
+                port_name=reverse_init_flows[key]['port_handle'],
+                vlan_tag=reverse_init_flows[key]['vlan'],
+                ExpectedRxPort=forward_port_handle,
+                srcMac=reverse_init_flows[key]['src_mac'],
+                dstMac=reverse_init_flows[key]['dst_mac'],
+                sourceAddr=reverse_init_flows[key]['src_ip'],
+                destAddr=reverse_init_flows[key]['dst_ip'])
+        # Create the RFC 2544 frameloss test
+        frameloss_config = self.baseAPI.stc_create(
+            'Rfc2544FrameLossConfig -under ',
+            project,
+            '-NumOfTrials 1 ',
+            '-DurationSeconds 60 ',
+            '-LoadUnits PERCENT_LINE_RATE ',
+            '-LoadType CUSTOM '
+            '-CustomLoadList 100 '
+            '-UseExistingStreamBlocks True ',
+            '-EnableLearning False ',
+            '-FrameSizeIterationMode CUSTOM ',
+            '-CustomFrameSizeList "70 128 256 512 1024 1280 1518"',
+            '-LatencyType LIFO',
+            '-EnableJitterMeasurement TRUE')
         #import pdb
         #import pdb
-        #pdb.set_trace()
+        # pdb.set_trace()
         # list streamblocks
         streamblock_list = '" '
         for key in forward_init_flows.keys():
         # list streamblocks
         streamblock_list = '" '
         for key in forward_init_flows.keys():
-            streamblock_list = streamblock_list+forward_init_flows[key]['streamblock']+' '
+            streamblock_list = streamblock_list + \
+                forward_init_flows[key]['streamblock'] + ' '
         for key in reverse_init_flows.keys():
         for key in reverse_init_flows.keys():
-            streamblock_list = streamblock_list+reverse_init_flows[key]['streamblock']+' '
-        streamblock_list = streamblock_list+'"'
+            streamblock_list = streamblock_list + \
+                reverse_init_flows[key]['streamblock'] + ' '
+        streamblock_list = streamblock_list + '"'
 
 
-        frameloss_sbProfile= self.baseAPI.stc_create('Rfc2544StreamBlockProfile -under '+frameloss_config+' -Active TRUE -LocalActive TRUE')
-        self.baseAPI.stc_config(frameloss_sbProfile,'-StreamBlockList '+streamblock_list)
-        self.baseAPI.stc_perform('ExpandBenchmarkConfigCommand','-config ',frameloss_config)
+        frameloss_sbProfile = self.baseAPI.stc_create(
+            'Rfc2544StreamBlockProfile -under ' +
+            frameloss_config +
+            ' -Active TRUE -LocalActive TRUE')
+        self.baseAPI.stc_config(
+            frameloss_sbProfile,
+            '-StreamBlockList ' +
+            streamblock_list)
+        self.baseAPI.stc_perform(
+            'ExpandBenchmarkConfigCommand',
+            '-config ',
+            frameloss_config)
 
 
-        #attach the port before testing
-        port_list = [ forward_port_handle,reverse_port_handle]
+        # attach the port before testing
+        port_list = [forward_port_handle, reverse_port_handle]
         self.baseAPI.stc_attach_ports(port_list)
 
         self.baseAPI.stc_attach_ports(port_list)
 
-        #stc apply and begin to sequence test
+        # stc apply and begin to sequence test
         self.baseAPI.stc_apply()
         self.baseAPI.stc_perform("SequencerStart")
 
         self.baseAPI.stc_apply()
         self.baseAPI.stc_perform("SequencerStart")
 
-        #wait until complete
+        # wait until complete
         self.baseAPI.stc_waituntilcomplete()
 
         self.baseAPI.stc_waituntilcomplete()
 
-        #get result db
-        resultsdb = self.baseAPI.stc_get("system1.project.TestResultSetting", "-CurrentResultFileName")
-        results_dict = self.baseAPI.stc_perform('QueryResult','-DatabaseConnectionString',resultsdb,'-ResultPath RFC2544FrameLossTestResultDetailedSummaryView')
+        # get result db
+        resultsdb = self.baseAPI.stc_get(
+            "system1.project.TestResultSetting",
+            "-CurrentResultFileName")
+        results_dict = self.baseAPI.stc_perform(
+            'QueryResult',
+            '-DatabaseConnectionString',
+            resultsdb,
+            '-ResultPath RFC2544FrameLossTestResultDetailedSummaryView')
         #import pdb
         #import pdb
-        #pdb.set_trace()
-        return True,results_dict
+        # pdb.set_trace()
+        return True, results_dict
 
 
-    def run_rfc2544_latency(self,forward_init_flows,reverse_init_flows):
+    def run_rfc2544_latency(self, forward_init_flows, reverse_init_flows):
         pass
         pass
-
index 8951f96..46583df 100644 (file)
@@ -12,5 +12,6 @@ from vstf.agent.spirent.tools import Spirent_Tools as Spirent
 
 
 class agentSpirent(Spirent):
 
 
 class agentSpirent(Spirent):
+
     def __init__(self):
         super(agentSpirent, self).__init__()
     def __init__(self):
         super(agentSpirent, self).__init__()
index df7d24d..83b8d15 100644 (file)
@@ -6,4 +6,3 @@
 # which accompanies this distribution, and is available at
 # http://www.apache.org/licenses/LICENSE-2.0
 ##############################################################################
 # which accompanies this distribution, and is available at
 # http://www.apache.org/licenses/LICENSE-2.0
 ##############################################################################
-
index 818ae76..306d912 100644 (file)
@@ -36,7 +36,7 @@ def dict2text(info):
 
 
 def text2dict(candy):
 
 
 def text2dict(candy):
-    tmp = candy.replace("##","#").split("#")
+    tmp = candy.replace("##", "#").split("#")
     result = {
         "sn": int(tmp[0]),
         "node": tmp[1],
     result = {
         "sn": int(tmp[0]),
         "node": tmp[1],
@@ -49,7 +49,7 @@ def text2dict(candy):
 
 
 def text2tuple(candy):
 
 
 def text2tuple(candy):
-    tmp = candy.replace("##","#").split("#")
+    tmp = candy.replace("##", "#").split("#")
 
     sn = int(tmp[0])
     node = tmp[1]
 
     sn = int(tmp[0])
     node = tmp[1]
index 9de5a2c..802cb21 100644 (file)
@@ -12,6 +12,7 @@ from oslo.config import cfg
 
 
 class CfgParser(object):
 
 
 class CfgParser(object):
+
     def __init__(self, config_file):
         super(CfgParser, self).__init__()
         if os.path.isfile(config_file) is False:
     def __init__(self, config_file):
         super(CfgParser, self).__init__()
         if os.path.isfile(config_file) is False:
index 541bba7..8997cd6 100644 (file)
@@ -7,6 +7,7 @@
 # http://www.apache.org/licenses/LICENSE-2.0
 ##############################################################################
 
 # http://www.apache.org/licenses/LICENSE-2.0
 ##############################################################################
 
+
 def arg(*args, **kwargs):
     """Decorator for CLI args.
 
 def arg(*args, **kwargs):
     """Decorator for CLI args.
 
@@ -33,4 +34,4 @@ def add_arg(func, *args, **kwargs):
     if (args, kwargs) not in func.arguments:
         # Because of the semantics of decorator composition if we just append
         # to the options list positional options will appear to be backwards.
     if (args, kwargs) not in func.arguments:
         # Because of the semantics of decorator composition if we just append
         # to the options list positional options will appear to be backwards.
-        func.arguments.insert(0, (args, kwargs))
\ No newline at end of file
+        func.arguments.insert(0, (args, kwargs))
index 2952be2..f348a80 100644 (file)
@@ -20,7 +20,9 @@ def execute(cmd=None, care_result=True):
     try:
         (status, ret) = commands.getstatusoutput(cmd)
         if care_result and 0 != status:
     try:
         (status, ret) = commands.getstatusoutput(cmd)
         if care_result and 0 != status:
-            LOG.error('CMD<%(cmd)s> \nSTDOUT:\n%(ret)s.', {'cmd':cmd, 'ret':ret})
+            LOG.error(
+                'CMD<%(cmd)s> \nSTDOUT:\n%(ret)s.', {
+                    'cmd': cmd, 'ret': ret})
             return None
         else:
             return ret
             return None
         else:
             return ret
index 35933da..4608749 100644 (file)
@@ -7,7 +7,10 @@
 # http://www.apache.org/licenses/LICENSE-2.0
 ##############################################################################
 
 # http://www.apache.org/licenses/LICENSE-2.0
 ##############################################################################
 
-import sys, os, time, atexit
+import sys
+import os
+import time
+import atexit
 import logging
 from signal import SIGTERM
 
 import logging
 from signal import SIGTERM
 
@@ -17,11 +20,16 @@ LOG = logging.getLogger(__name__)
 class Daemon(object):
     """
     A generic daemon class.
 class Daemon(object):
     """
     A generic daemon class.
-    
+
     Usage: subclass the Daemon class and override the run() method
     """
 
     Usage: subclass the Daemon class and override the run() method
     """
 
-    def __init__(self, pidfile, stdin='/dev/null', stdout='/dev/null', stderr='/dev/null'):
+    def __init__(
+            self,
+            pidfile,
+            stdin='/dev/null',
+            stdout='/dev/null',
+            stderr='/dev/null'):
         super(Daemon, self).__init__()
         self.stdin = stdin
         self.stdout = stdout
         super(Daemon, self).__init__()
         self.stdin = stdin
         self.stdout = stdout
@@ -30,7 +38,7 @@ class Daemon(object):
 
     def daemonize(self):
         """
 
     def daemonize(self):
         """
-        do the UNIX double-fork magic, see Stevens' "Advanced 
+        do the UNIX double-fork magic, see Stevens' "Advanced
         Programming in the UNIX Environment" for details (ISBN 0201563177)
         http://www.erlenstar.demon.co.uk/unix/faq_2.html#SEC16
         """
         Programming in the UNIX Environment" for details (ISBN 0201563177)
         http://www.erlenstar.demon.co.uk/unix/faq_2.html#SEC16
         """
@@ -38,7 +46,7 @@ class Daemon(object):
             pid = os.fork()
             if pid > 0:
                 sys.exit(0)
             pid = os.fork()
             if pid > 0:
                 sys.exit(0)
-        except OSError, e:
+        except OSError as e:
             LOG.error("fork #1 failed: %(errno)s, %(strerror)s",
                       {'errno': e.errno, 'strerror': e.strerror})
             sys.exit(1)
             LOG.error("fork #1 failed: %(errno)s, %(strerror)s",
                       {'errno': e.errno, 'strerror': e.strerror})
             sys.exit(1)
@@ -54,7 +62,7 @@ class Daemon(object):
             if pid > 0:
                 # exit from second parent
                 sys.exit(0)
             if pid > 0:
                 # exit from second parent
                 sys.exit(0)
-        except OSError, e:
+        except OSError as e:
             LOG.error("fork #1 failed: %(errno)s, %(strerror)s",
                       {'errno': e.errno, 'strerror': e.strerror})
             sys.exit(1)
             LOG.error("fork #1 failed: %(errno)s, %(strerror)s",
                       {'errno': e.errno, 'strerror': e.strerror})
             sys.exit(1)
@@ -116,12 +124,12 @@ class Daemon(object):
             sys.stderr.write(message % self.pidfile)
             return  # not an error in a restart
 
             sys.stderr.write(message % self.pidfile)
             return  # not an error in a restart
 
-        # Try killing the daemon process    
+        # Try killing the daemon process
         try:
         try:
-            while 1:
+            while True:
                 os.kill(pid, SIGTERM)
                 time.sleep(0.1)
                 os.kill(pid, SIGTERM)
                 time.sleep(0.1)
-        except OSError, err:
+        except OSError as err:
             err = str(err)
             if err.find("No such process") > 0:
                 if os.path.exists(self.pidfile):
             err = str(err)
             if err.find("No such process") > 0:
                 if os.path.exists(self.pidfile):
@@ -139,16 +147,16 @@ class Daemon(object):
 
     def run(self):
         """
 
     def run(self):
         """
-        You should override this method when you subclass Daemon. 
+        You should override this method when you subclass Daemon.
         It will be called after the process has been
         daemonized by start() or restart().
         It will be called after the process has been
         daemonized by start() or restart().
-        
+
         """
         pass
 
     def daemon_die(self):
         """You should override this method when you shutdown daemon
         this func will be call by stop() before kill the process
         """
         pass
 
     def daemon_die(self):
         """You should override this method when you shutdown daemon
         this func will be call by stop() before kill the process
-        
+
         """
         pass
         """
         pass
index 98d539f..ed91055 100644 (file)
@@ -17,7 +17,9 @@ def check(key, choices=[], defaults=_DEFAULTS):
                 if defaults != _DEFAULTS:
                     kwargs[key] = defaults
                 else:
                 if defaults != _DEFAULTS:
                     kwargs[key] = defaults
                 else:
-                    raise Exception("Error: '%s' is needed in %s" % (key, func))
+                    raise Exception(
+                        "Error: '%s' is needed in %s" %
+                        (key, func))
 
             if choices and kwargs[key] not in choices:
                 raise Exception("Error: %s :%s" % (key, kwargs[key]))
 
             if choices and kwargs[key] not in choices:
                 raise Exception("Error: %s :%s" % (key, kwargs[key]))
@@ -40,7 +42,9 @@ def dcheck(key, choices=[]):
                 values = None
             if isinstance(values, dict):
                 if key not in values:
                 values = None
             if isinstance(values, dict):
                 if key not in values:
-                    raise Exception("Error: '%s' is needed in %s" % (key, func))
+                    raise Exception(
+                        "Error: '%s' is needed in %s" %
+                        (key, func))
                 if choices and values[key] not in choices:
                     raise Exception("Error: %s :%s" % (key, values[key]))
             ret = func(*args)
                 if choices and values[key] not in choices:
                     raise Exception("Error: %s :%s" % (key, values[key]))
             ret = func(*args)
@@ -84,7 +88,8 @@ def namespace():
             ret = func(*args, **kwargs)
             nspace = kwargs.get("namespace", None)
             if nspace:
             ret = func(*args, **kwargs)
             nspace = kwargs.get("namespace", None)
             if nspace:
-                ret = "ip netns exec %(namespace)s " % {"namespace": nspace} + ret
+                ret = "ip netns exec %(namespace)s " % {
+                    "namespace": nspace} + ret
             return ret
 
         return __deco
             return ret
 
         return __deco
index 415b003..b8b6488 100644 (file)
@@ -34,7 +34,10 @@ def _init_log(log_file, level=logging.INFO, clevel=logging.INFO):
     return file_handler, console
 
 
     return file_handler, console
 
 
-def setup_logging(level=logging.INFO, log_file="/var/log/esp_test.log", clevel=logging.WARNING):
+def setup_logging(
+        level=logging.INFO,
+        log_file="/var/log/esp_test.log",
+        clevel=logging.WARNING):
     log = logging.getLogger()
     log.setLevel(level)
     file_handler, console = _init_log(log_file, level, clevel)
     log = logging.getLogger()
     log.setLevel(level)
     file_handler, console = _init_log(log_file, level, clevel)
index f3adee8..b216229 100644 (file)
@@ -14,22 +14,124 @@ doc_type = '<!DOCTYPE HTML>\n'
 default_title = "Html Page"
 charset = '<meta http-equiv="Content-Type" content="text/html;charset=utf-8" />\n'
 
 default_title = "Html Page"
 charset = '<meta http-equiv="Content-Type" content="text/html;charset=utf-8" />\n'
 
-html4_tags = {'a', 'abbr', 'acronym', 'address', 'area', 'b', 'base', 'bdo', 'big',
-              'blockquote', 'body', 'br', 'button', 'caption', 'cite', 'code', 'col',
-              'colgroup', 'dd', 'del', 'div', 'dfn', 'dl', 'dt', 'em', 'fieldset',
-              'form', 'frame', 'frameset', 'h1', 'h2', 'h3', 'h4', 'h5', 'h6', 'head',
-              'hr', 'html', 'i', 'iframe', 'img', 'input', 'ins', 'kbd',
-              'label', 'legend', 'li', 'link', 'map', 'menu', 'menuitem', 'meta',
-              'noframes', 'noscript', 'object', 'ol', 'optgroup', 'option', 'p',
-              'param', 'pre', 'q', 'samp', 'script', 'select', 'small', 'span', 'strong',
-              'style', 'sub', 'sup', 'table', 'tbody', 'td', 'textarea', 'tfoot', 'th',
-              'thead', 'title', 'tr', 'tt', 'ul', 'var'}
+html4_tags = {
+    'a',
+    'abbr',
+    'acronym',
+    'address',
+    'area',
+    'b',
+    'base',
+    'bdo',
+    'big',
+    'blockquote',
+    'body',
+    'br',
+    'button',
+    'caption',
+    'cite',
+    'code',
+    'col',
+    'colgroup',
+    'dd',
+    'del',
+    'div',
+    'dfn',
+    'dl',
+    'dt',
+    'em',
+    'fieldset',
+    'form',
+    'frame',
+    'frameset',
+    'h1',
+    'h2',
+    'h3',
+    'h4',
+    'h5',
+    'h6',
+    'head',
+    'hr',
+    'html',
+    'i',
+    'iframe',
+    'img',
+    'input',
+    'ins',
+    'kbd',
+    'label',
+    'legend',
+    'li',
+    'link',
+    'map',
+    'menu',
+    'menuitem',
+    'meta',
+    'noframes',
+    'noscript',
+    'object',
+    'ol',
+    'optgroup',
+    'option',
+    'p',
+    'param',
+    'pre',
+    'q',
+    'samp',
+    'script',
+    'select',
+    'small',
+    'span',
+    'strong',
+    'style',
+    'sub',
+    'sup',
+    'table',
+    'tbody',
+    'td',
+    'textarea',
+    'tfoot',
+    'th',
+    'thead',
+    'title',
+    'tr',
+    'tt',
+    'ul',
+    'var'}
 disused_tags = {'isindex', 'font', 'dir', 's', 'strike',
                 'u', 'center', 'basefont', 'applet', 'xmp'}
 disused_tags = {'isindex', 'font', 'dir', 's', 'strike',
                 'u', 'center', 'basefont', 'applet', 'xmp'}
-html5_tags = {'article', 'aside', 'audio', 'bdi', 'canvas', 'command', 'datalist', 'details',
-              'dialog', 'embed', 'figcaption', 'figure', 'footer', 'header',
-              'keygen', 'mark', 'meter', 'nav', 'output', 'progress', 'rp', 'rt', 'ruby',
-              'section', 'source', 'summary', 'details', 'time', 'track', 'video', 'wbr'}
+html5_tags = {
+    'article',
+    'aside',
+    'audio',
+    'bdi',
+    'canvas',
+    'command',
+    'datalist',
+    'details',
+    'dialog',
+    'embed',
+    'figcaption',
+    'figure',
+    'footer',
+    'header',
+    'keygen',
+    'mark',
+    'meter',
+    'nav',
+    'output',
+    'progress',
+    'rp',
+    'rt',
+    'ruby',
+    'section',
+    'source',
+    'summary',
+    'details',
+    'time',
+    'track',
+    'video',
+    'wbr'}
 
 nl = '\n'
 tags = html4_tags | disused_tags | html5_tags
 
 nl = '\n'
 tags = html4_tags | disused_tags | html5_tags
@@ -105,7 +207,8 @@ class Tag(list):
         result = ''
         if self.tag_name:
             result += '<%s%s%s>' % (self.tag_name,
         result = ''
         if self.tag_name:
             result += '<%s%s%s>' % (self.tag_name,
-                                    self._render_attr(), self._self_close() * ' /')
+                                    self._render_attr(),
+                                    self._self_close() * ' /')
         if not self._self_close():
             isnl = True
             for c in self:
         if not self._self_close():
             isnl = True
             for c in self:
index 2209dfd..0333136 100644 (file)
 
 # from __future__ import nested_scopes
 
 
 # from __future__ import nested_scopes
 
-import os, os.path, shutil, glob, re, sys, getopt, stat, string
+import os
+import os.path
+import shutil
+import glob
+import re
+import sys
+import getopt
+import stat
+import string
 
 try:
     import win32file
 
 try:
     import win32file
@@ -19,6 +27,7 @@ except:
 
 
 class Cookie:
 
 
 class Cookie:
+
     def __init__(self):
         self.sink_root = ""
         self.target_root = ""
     def __init__(self):
         self.sink_root = ""
         self.target_root = ""
@@ -45,7 +54,7 @@ class Cookie:
 def visit(cookie, dirname, names):
     """Copy files names from sink_root + (dirname - sink_root) to target_root + (dirname - sink_root)"""
     if os.path.split(cookie.sink_root)[
 def visit(cookie, dirname, names):
     """Copy files names from sink_root + (dirname - sink_root) to target_root + (dirname - sink_root)"""
     if os.path.split(cookie.sink_root)[
-        1]:  # Should be tested with (C:\Cvs -> C:\)! (C:\Archives\MyDatas\UltraEdit -> C:\Archives\MyDatas) (Cvs -> "")! (Archives\MyDatas\UltraEdit -> Archives\MyDatas) (\Cvs -> \)! (\Archives\MyDatas\UltraEdit -> Archives\MyDatas)
+            1]:  # Should be tested with (C:\Cvs -> C:\)! (C:\Archives\MyDatas\UltraEdit -> C:\Archives\MyDatas) (Cvs -> "")! (Archives\MyDatas\UltraEdit -> Archives\MyDatas) (\Cvs -> \)! (\Archives\MyDatas\UltraEdit -> Archives\MyDatas)
         dirname = dirname[len(cookie.sink_root) + 1:]
     else:
         dirname = dirname[len(cookie.sink_root):]
         dirname = dirname[len(cookie.sink_root) + 1:]
     else:
         dirname = dirname[len(cookie.sink_root):]
@@ -81,7 +90,9 @@ def visit(cookie, dirname, names):
                             elif os.path.isdir(sink):
                                 removeDir(cookie, sink)
                             else:
                             elif os.path.isdir(sink):
                                 removeDir(cookie, sink)
                             else:
-                                logError("Sink %s is neither a file nor a folder (skip removal)" % sink)
+                                logError(
+                                    "Sink %s is neither a file nor a folder (skip removal)" %
+                                    sink)
                         names_excluded += [names[name_index]]
                         del (names[name_index])
                         name_index = name_index - 1
                         names_excluded += [names[name_index]]
                         del (names[name_index])
                         name_index = name_index - 1
@@ -95,7 +106,7 @@ def visit(cookie, dirname, names):
         for name in os.listdir(target_dir):
             if not cookie.delete_excluded and name in names_excluded:
                 continue
         for name in os.listdir(target_dir):
             if not cookie.delete_excluded and name in names_excluded:
                 continue
-            if not name in names:
+            if name not in names:
                 target = os.path.join(target_dir, name)
                 if os.path.isfile(target):
                     removeFile(cookie, target)
                 target = os.path.join(target_dir, name)
                 if os.path.isfile(target):
                     removeFile(cookie, target)
@@ -122,7 +133,9 @@ def visit(cookie, dirname, names):
                     copyFile(cookie, sink, target)
                 else:
                     # file-???
                     copyFile(cookie, sink, target)
                 else:
                     # file-???
-                    logError("Target %s is neither a file nor folder (skip update)" % sink)
+                    logError(
+                        "Target %s is neither a file nor folder (skip update)" %
+                        sink)
 
             elif os.path.isdir(sink):
                 if os.path.isfile(target):
 
             elif os.path.isdir(sink):
                 if os.path.isfile(target):
@@ -131,7 +144,9 @@ def visit(cookie, dirname, names):
                     makeDir(cookie, target)
             else:
                 # ???-xxx
                     makeDir(cookie, target)
             else:
                 # ???-xxx
-                logError("Sink %s is neither a file nor a folder (skip update)" % sink)
+                logError(
+                    "Sink %s is neither a file nor a folder (skip update)" %
+                    sink)
 
         elif not cookie.existing:
             # When target dont exist:
 
         elif not cookie.existing:
             # When target dont exist:
@@ -142,7 +157,9 @@ def visit(cookie, dirname, names):
                 # folder
                 makeDir(cookie, target)
             else:
                 # folder
                 makeDir(cookie, target)
             else:
-                logError("Sink %s is neither a file nor a folder (skip update)" % sink)
+                logError(
+                    "Sink %s is neither a file nor a folder (skip update)" %
+                    sink)
 
 
 def log(cookie, message):
 
 
 def log(cookie, message):
@@ -166,7 +183,9 @@ def shouldUpdate(cookie, sink, target):
         sink_sz = sink_st.st_size
         sink_mt = sink_st.st_mtime
     except:
         sink_sz = sink_st.st_size
         sink_mt = sink_st.st_mtime
     except:
-        logError("Fail to retrieve information about sink %s (skip update)" % sink)
+        logError(
+            "Fail to retrieve information about sink %s (skip update)" %
+            sink)
         return 0
 
     try:
         return 0
 
     try:
@@ -174,7 +193,9 @@ def shouldUpdate(cookie, sink, target):
         target_sz = target_st.st_size
         target_mt = target_st.st_mtime
     except:
         target_sz = target_st.st_size
         target_mt = target_st.st_mtime
     except:
-        logError("Fail to retrieve information about target %s (skip update)" % target)
+        logError(
+            "Fail to retrieve information about target %s (skip update)" %
+            target)
         return 0
 
     if cookie.update:
         return 0
 
     if cookie.update:
@@ -203,7 +224,7 @@ def copyFile(cookie, sink, target):
         if cookie.time:
             try:
                 s = os.stat(sink)
         if cookie.time:
             try:
                 s = os.stat(sink)
-                os.utime(target, (s.st_atime, s.st_mtime));
+                os.utime(target, (s.st_atime, s.st_mtime))
             except:
                 logError("Fail to copy timestamp of %s" % sink)
 
             except:
                 logError("Fail to copy timestamp of %s" % sink)
 
@@ -216,8 +237,9 @@ def updateFile(cookie, sink, target):
             try:
                 if win32file:
                     filemode = win32file.GetFileAttributesW(target)
             try:
                 if win32file:
                     filemode = win32file.GetFileAttributesW(target)
-                    win32file.SetFileAttributesW(target,
-                                                 filemode & ~win32file.FILE_ATTRIBUTE_READONLY & ~win32file.FILE_ATTRIBUTE_HIDDEN & ~win32file.FILE_ATTRIBUTE_SYSTEM)
+                    win32file.SetFileAttributesW(
+                        target,
+                        filemode & ~win32file.FILE_ATTRIBUTE_READONLY & ~win32file.FILE_ATTRIBUTE_HIDDEN & ~win32file.FILE_ATTRIBUTE_SYSTEM)
                 else:
                     os.chmod(target, stat.S_IWUSR)
             except:
                 else:
                     os.chmod(target, stat.S_IWUSR)
             except:
@@ -228,10 +250,11 @@ def updateFile(cookie, sink, target):
             if cookie.time:
                 try:
                     s = os.stat(sink)
             if cookie.time:
                 try:
                     s = os.stat(sink)
-                    os.utime(target, (s.st_atime, s.st_mtime));
+                    os.utime(target, (s.st_atime, s.st_mtime))
                 except:
                 except:
-                    logError(
-                        "Fail to copy timestamp of %s" % sink)  # The utime api of the 2.3 version of python is not unicode compliant.
+                    # The utime api of the 2.3 version of python is not unicode
+                    # compliant.
+                    logError("Fail to copy timestamp of %s" % sink)
         except:
             logError("Fail to override %s" % sink)
 
         except:
             logError("Fail to override %s" % sink)
 
@@ -242,8 +265,8 @@ def updateFile(cookie, sink, target):
 def prepareRemoveFile(path):
     if win32file:
         filemode = win32file.GetFileAttributesW(path)
 def prepareRemoveFile(path):
     if win32file:
         filemode = win32file.GetFileAttributesW(path)
-        win32file.SetFileAttributesW(path,
-                                     filemode & ~win32file.FILE_ATTRIBUTE_READONLY & ~win32file.FILE_ATTRIBUTE_HIDDEN & ~win32file.FILE_ATTRIBUTE_SYSTEM)
+        win32file.SetFileAttributesW(path, filemode & ~win32file.FILE_ATTRIBUTE_READONLY &
+                                     ~win32file.FILE_ATTRIBUTE_HIDDEN & ~win32file.FILE_ATTRIBUTE_SYSTEM)
     else:
         os.chmod(path, stat.S_IWUSR)
 
     else:
         os.chmod(path, stat.S_IWUSR)
 
@@ -305,7 +328,8 @@ def convertPath(path):
     if separator != "/":
         path = re.sub(re.escape(separator), "/", path)
 
     if separator != "/":
         path = re.sub(re.escape(separator), "/", path)
 
-    # Help file, folder pattern to express that it should match the all file or folder name.
+    # Help file, folder pattern to express that it should match the all file
+    # or folder name.
     path = "/" + path
     return path
 
     path = "/" + path
     return path
 
@@ -360,7 +384,7 @@ def convertPatterns(path, sign):
     """Read the files for pattern and return a vector of filters"""
     filters = []
     f = open(path, "r")
     """Read the files for pattern and return a vector of filters"""
     filters = []
     f = open(path, "r")
-    while 1:
+    while True:
         pattern = f.readline()
         if not pattern:
             break
         pattern = f.readline()
         if not pattern:
             break
@@ -428,8 +452,8 @@ def main(args):
             cookie.relative = 1
         elif o in ["-n", "--dry-run"]:
             cookie.dry_run = 1
             cookie.relative = 1
         elif o in ["-n", "--dry-run"]:
             cookie.dry_run = 1
-        elif o in ["-t", "--times",
-                   "--time"]:  # --time is there to guaranty backward compatibility with previous buggy version.
+        # --time is there to guaranty backward compatibility with previous buggy version.
+        elif o in ["-t", "--times", "--time"]:
             cookie.time = 1
         elif o in ["-u", "--update"]:
             cookie.update = 1
             cookie.time = 1
         elif o in ["-u", "--update"]:
             cookie.update = 1
@@ -474,7 +498,7 @@ def main(args):
     target_root = args[1]
     try:  # In order to allow compatibility below 2.3.
         pass
     target_root = args[1]
     try:  # In order to allow compatibility below 2.3.
         pass
-        if os.path.__dict__.has_key("supports_unicode_filenames") and os.path.supports_unicode_filenames:
+        if "supports_unicode_filenames" in os.path.__dict__ and os.path.supports_unicode_filenames:
             target_root = unicode(target_root, sys.getfilesystemencoding())
     finally:
         cookie.target_root = target_root
             target_root = unicode(target_root, sys.getfilesystemencoding())
     finally:
         cookie.target_root = target_root
@@ -486,7 +510,7 @@ def main(args):
     sink_families = {}
     for sink in sinks:
         try:  # In order to allow compatibility below 2.3.
     sink_families = {}
     for sink in sinks:
         try:  # In order to allow compatibility below 2.3.
-            if os.path.__dict__.has_key("supports_unicode_filenames") and os.path.supports_unicode_filenames:
+            if "supports_unicode_filenames" in os.path.__dict__ and os.path.supports_unicode_filenames:
                 sink = unicode(sink, sys.getfilesystemencoding())
         except:
             pass
                 sink = unicode(sink, sys.getfilesystemencoding())
         except:
             pass
@@ -499,7 +523,7 @@ def main(args):
                 break
             sink_root, sink_name = os.path.split(sink_root)
         sink_root = sink_drive + sink_root
                 break
             sink_root, sink_name = os.path.split(sink_root)
         sink_root = sink_drive + sink_root
-        if not sink_families.has_key(sink_root):
+        if sink_root not in sink_families:
             sink_families[sink_root] = []
         sink_families[sink_root] = sink_families[sink_root] + [sink_name]
 
             sink_families[sink_root] = []
         sink_families[sink_root] = sink_families[sink_root] + [sink_name]
 
@@ -509,15 +533,28 @@ def main(args):
         else:
             cookie.sink_root = sink_root
 
         else:
             cookie.sink_root = sink_root
 
-        global y  # In order to allow compatibility below 2.1 (nested scope where used before).
+        # In order to allow compatibility below 2.1 (nested scope where used
+        # before).
+        global y
         y = sink_root
         y = sink_root
-        files = filter(lambda x: os.path.isfile(os.path.join(y, x)), sink_families[sink_root])
+        files = filter(
+            lambda x: os.path.isfile(
+                os.path.join(
+                    y,
+                    x)),
+            sink_families[sink_root])
         if files:
             visit(cookie, sink_root, files)
 
         if files:
             visit(cookie, sink_root, files)
 
-        # global y # In order to allow compatibility below 2.1 (nested scope where used before).
+        # global y # In order to allow compatibility below 2.1 (nested scope
+        # where used before).
         y = sink_root
         y = sink_root
-        folders = filter(lambda x: os.path.isdir(os.path.join(y, x)), sink_families[sink_root])
+        folders = filter(
+            lambda x: os.path.isdir(
+                os.path.join(
+                    y,
+                    x)),
+            sink_families[sink_root])
         for folder in folders:
             folder_path = os.path.join(sink_root, folder)
             if not cookie.recursive:
         for folder in folders:
             folder_path = os.path.join(sink_root, folder)
             if not cookie.recursive:
index 030bef5..96bdc91 100644 (file)
@@ -32,14 +32,16 @@ class Mysalt(object):
             cmds.execute("grep '^pillar_roots' \
                     /etc/salt/master -A 2 | sed 1,2d | awk '{print $2}'") + '/')
         if self.pillar_path == "":
             cmds.execute("grep '^pillar_roots' \
                     /etc/salt/master -A 2 | sed 1,2d | awk '{print $2}'") + '/')
         if self.pillar_path == "":
-            log.warning("pillar path not found, make sure the pillar_roots configed")
+            log.warning(
+                "pillar path not found, make sure the pillar_roots configed")
         else:
             os.system("mkdir -p " + self.pillar_path)
 
         self.state_path = str(cmds.execute("grep '^file_roots' \
             /etc/salt/master -A 2 | sed 1,2d | awk '{print $2}'") + '/')
         if self.state_path == "":
         else:
             os.system("mkdir -p " + self.pillar_path)
 
         self.state_path = str(cmds.execute("grep '^file_roots' \
             /etc/salt/master -A 2 | sed 1,2d | awk '{print $2}'") + '/')
         if self.state_path == "":
-            log.warning("state path not found, make sure the file_roots configed")
+            log.warning(
+                "state path not found, make sure the file_roots configed")
         else:
             os.system("mkdir -p " + self.state_path)
 
         else:
             os.system("mkdir -p " + self.state_path)
 
@@ -72,7 +74,8 @@ class Mysalt(object):
         elif flag == "state":
             dst = self.state_path
         else:
         elif flag == "state":
             dst = self.state_path
         else:
-            log.error("this file or dir not pillar or state, can not support now.")
+            log.error(
+                "this file or dir not pillar or state, can not support now.")
             return False
 
         if self.IS_FILE == self.__is_dir_or_file(target):
             return False
 
         if self.IS_FILE == self.__is_dir_or_file(target):
@@ -125,20 +128,27 @@ class Mysalt(object):
                     num_s += 1
                 else:
                     num_f += 1
                     num_s += 1
                 else:
                     num_f += 1
-                    msg = msg + self.__luxuriant_line("Failed %d:\n" % num_f, "red")
+                    msg = msg + \
+                        self.__luxuriant_line("Failed %d:\n" % num_f, "red")
                     msg = msg + "\t" + key + '\n'
                     msg = msg + "\t" + key + '\n'
-                    msg = msg + self.__luxuriant_line("\t%s\n" % ret[host][key]['comment'], "red")
-                    if True == ret[host][key]['changes'].has_key('retcode'):
-                        msg = msg + "RETCODE: %s\n" % (ret[host][key]['changes']['retcode'])
-                    if True == ret[host][key]['changes'].has_key('stderr'):
-                        msg = msg + "STDERR: %s\n" % (ret[host][key]['changes']['stderr'])
-                    if True == ret[host][key]['changes'].has_key('stdout'):
-                        msg = msg + "STDOUT: %s\n" % (ret[host][key]['changes']['stdout'])
-            msg = msg + self.__luxuriant_line("total success: %d\n" % num_s, "green")
+                    msg = msg + \
+                        self.__luxuriant_line("\t%s\n" % ret[host][key]['comment'], "red")
+                    if True == ('retcode' in ret[host][key]['changes']):
+                        msg = msg + \
+                            "RETCODE: %s\n" % (ret[host][key]['changes']['retcode'])
+                    if True == ('stderr' in ret[host][key]['changes']):
+                        msg = msg + \
+                            "STDERR: %s\n" % (ret[host][key]['changes']['stderr'])
+                    if True == ('stdout' in ret[host][key]['changes']):
+                        msg = msg + \
+                            "STDOUT: %s\n" % (ret[host][key]['changes']['stdout'])
+            msg = msg + \
+                self.__luxuriant_line("total success: %d\n" % num_s, "green")
             msg = msg + self.__luxuriant_line("failed: %d\n" % num_f, "red")
         except Exception as e:
             msg = msg + self.__luxuriant_line("failed: %d\n" % num_f, "red")
         except Exception as e:
-            log.error("sorry, thy to check result happend error, <%(e)s>.\nret:%(ret)s",
-                      {'e': e, 'ret': ret})
+            log.error(
+                "sorry, thy to check result happend error, <%(e)s>.\nret:%(ret)s", {
+                    'e': e, 'ret': ret})
             return -1
         log.info(':\n' + msg)
         return num_f
             return -1
         log.info(':\n' + msg)
         return num_f
@@ -147,7 +157,9 @@ class Mysalt(object):
         try:
             log.info("salt " + host + " state.sls " +
                      fstate + ' pillar=\'' + str(ext_pillar) + '\'')
         try:
             log.info("salt " + host + " state.sls " +
                      fstate + ' pillar=\'' + str(ext_pillar) + '\'')
-            ret = self.salt.cmd(host, 'state.sls', [fstate, 'pillar=' + str(ext_pillar)], 180, 'list')
+            ret = self.salt.cmd(
+                host, 'state.sls', [
+                    fstate, 'pillar=' + str(ext_pillar)], 180, 'list')
         except Exception as e:
             log.error("try to init host %(host)s happend error: <%(e)s>.",
                       {'host': host, 'e': e})
         except Exception as e:
             log.error("try to init host %(host)s happend error: <%(e)s>.",
                       {'host': host, 'e': e})
@@ -170,7 +182,7 @@ class Mysalt(object):
         return ret
 
     def copy_by_state(self, host, src, state_cmd, **kwargs):
         return ret
 
     def copy_by_state(self, host, src, state_cmd, **kwargs):
-        '''the src must be a dir, and the state.sls 
+        '''the src must be a dir, and the state.sls
         must be the name of the dir name'''
 
         if not self.slave_exists(host):
         must be the name of the dir name'''
 
         if not self.slave_exists(host):
@@ -184,10 +196,12 @@ class Mysalt(object):
 
     def get_master_ip(self, host=None):
         if not host:
 
     def get_master_ip(self, host=None):
         if not host:
-            ret = cmds.execute("grep '^interface:' /etc/salt/master | awk '{print $2}'").strip()
+            ret = cmds.execute(
+                "grep '^interface:' /etc/salt/master | awk '{print $2}'").strip()
             return ret
         try:
             return ret
         try:
-            ret = self.salt.cmd(host, "grains.item", ["master"])[host]['master']
+            ret = self.salt.cmd(host, "grains.item", ["master"])[
+                host]['master']
         except Exception:
             log.error("salt happened error when get master ip")
             return ""
         except Exception:
             log.error("salt happened error when get master ip")
             return ""
index 7b85e08..5cf196d 100644 (file)
@@ -19,6 +19,7 @@ LOG = logging.getLogger(__name__)
 
 
 class SSHClientContext(paramiko.SSHClient):
 
 
 class SSHClientContext(paramiko.SSHClient):
+
     def __init__(self, ip, user, passwd, port=22):
         self.host = ip
         self.user = user
     def __init__(self, ip, user, passwd, port=22):
         self.host = ip
         self.user = user
@@ -31,11 +32,20 @@ class SSHClientContext(paramiko.SSHClient):
         ret = stdout.channel.recv_exit_status()
         out = stdout.read().strip()
         err = stderr.read().strip()
         ret = stdout.channel.recv_exit_status()
         out = stdout.read().strip()
         err = stderr.read().strip()
-        LOG.info("in %s,%s,return:%s,output:%s:error:%s" % (self.host, cmd, ret, out, err))
+        LOG.info(
+            "in %s,%s,return:%s,output:%s:error:%s" %
+            (self.host, cmd, ret, out, err))
         return ret, out, err
 
     def connect(self):
         return ret, out, err
 
     def connect(self):
-        super(SSHClientContext, self).connect(self.host, self.port, self.user, self.passwd, timeout=10)
+        super(
+            SSHClientContext,
+            self).connect(
+            self.host,
+            self.port,
+            self.user,
+            self.passwd,
+            timeout=10)
 
     def __enter__(self):
         self.set_missing_host_key_policy(paramiko.AutoAddPolicy())
 
     def __enter__(self):
         self.set_missing_host_key_policy(paramiko.AutoAddPolicy())
@@ -48,6 +58,7 @@ class SSHClientContext(paramiko.SSHClient):
 
 
 class SFTPClientContext(object):
 
 
 class SFTPClientContext(object):
+
     def __init__(self, ip, user, passwd, port=22):
         self.host = ip
         self.passwd = passwd
     def __init__(self, ip, user, passwd, port=22):
         self.host = ip
         self.passwd = passwd
@@ -97,7 +108,9 @@ def upload_dir(host, user, passwd, local_dir, remote_dir):
     remote_dir = os.path.join(remote_dir, os.path.basename(local_dir))
     ret, _, _ = run_cmd(host, user, passwd, "sudo rm -rf %s" % remote_dir)
     if ret != 0 and ret != 1:
     remote_dir = os.path.join(remote_dir, os.path.basename(local_dir))
     ret, _, _ = run_cmd(host, user, passwd, "sudo rm -rf %s" % remote_dir)
     if ret != 0 and ret != 1:
-        LOG.error("somehow failed in rm -rf %s on host:%s,return:%s" % (remote_dir, host, ret))
+        LOG.error(
+            "somehow failed in rm -rf %s on host:%s,return:%s" %
+            (remote_dir, host, ret))
         exit(1)
     with SFTPClientContext(host, user, passwd) as sftp:
         sftp.connect()
         exit(1)
     with SFTPClientContext(host, user, passwd) as sftp:
         sftp.connect()
@@ -117,7 +130,7 @@ def upload_dir(host, user, passwd, local_dir, remote_dir):
                 try:
                     sftp.mkdir(remote_path)
                     LOG.info("mkdir path %s" % remote_path)
                 try:
                     sftp.mkdir(remote_path)
                     LOG.info("mkdir path %s" % remote_path)
-                except Exception, e:
+                except Exception as e:
                     raise
     return remote_dir
 
                     raise
     return remote_dir
 
@@ -177,7 +190,9 @@ def download_dir(host, user, passwd, remote_path, local_path):
                 dest_path = local_path
             else:
                 raise Exception('path:%s is not exists' % dir_name)
                 dest_path = local_path
             else:
                 raise Exception('path:%s is not exists' % dir_name)
-    LOG.info("download_dir from host:%s:%s to dest:%s" % (host, remote_path, dest_path))
+    LOG.info(
+        "download_dir from host:%s:%s to dest:%s" %
+        (host, remote_path, dest_path))
     transport = paramiko.Transport((host, 22))
     transport.connect(username=user, password=passwd)
     sftp = paramiko.SFTPClient.from_transport(transport)
     transport = paramiko.Transport((host, 22))
     transport.connect(username=user, password=passwd)
     sftp = paramiko.SFTPClient.from_transport(transport)
@@ -189,7 +204,8 @@ def download_dir(host, user, passwd, remote_path, local_path):
             path = q.get()
             st = sftp.lstat(path).st_mode
             relative_path = path[len(remote_path):]
             path = q.get()
             st = sftp.lstat(path).st_mode
             relative_path = path[len(remote_path):]
-            if relative_path.startswith('/'): relative_path = relative_path[1:]
+            if relative_path.startswith('/'):
+                relative_path = relative_path[1:]
             local = os.path.join(dest_path, relative_path)
             if os.path.exists(local):
                 shutil.rmtree(local)
             local = os.path.join(dest_path, relative_path)
             if os.path.exists(local):
                 shutil.rmtree(local)
@@ -206,7 +222,9 @@ def download_dir(host, user, passwd, remote_path, local_path):
                     sftp.get(fullpath, dest)
                     os.chmod(dest, st)
     else:
                     sftp.get(fullpath, dest)
                     os.chmod(dest, st)
     else:
-        raise Exception('path:%s:%s not exists or is not a dir' % (host, remote_path))
+        raise Exception(
+            'path:%s:%s not exists or is not a dir' %
+            (host, remote_path))
     return dest_path
 
 
     return dest_path
 
 
@@ -218,6 +236,7 @@ def run_cmd(host, user, passwd, cmd):
 
 
 class SshFileTransfer(object):
 
 
 class SshFileTransfer(object):
+
     def __init__(self, ip, user, passwd):
         self.ip, self.user, self.passwd = ip, user, passwd
 
     def __init__(self, ip, user, passwd):
         self.ip, self.user, self.passwd = ip, user, passwd
 
index 2a9a4c0..3fa23cd 100644 (file)
@@ -20,4 +20,4 @@ from vstf.common import cliutil as util
           help="a params of test-xx")
 def do_test_xx(args):
     """this is a help doc"""
           help="a params of test-xx")
 def do_test_xx(args):
     """this is a help doc"""
-    print "run test01 " + args.test + args.xx
\ No newline at end of file
+    print "run test01 " + args.test + args.xx
index 97582c7..ac3c9b7 100644 (file)
@@ -14,49 +14,51 @@ from vstf.common import message
 
 
 class UdpServer(object):
 
 
 class UdpServer(object):
+
     def __init__(self):
         super(UdpServer, self).__init__()
         try:
             os.unlink(constants.sockaddr)
         except OSError:
             if os.path.exists(constants.sockaddr):
     def __init__(self):
         super(UdpServer, self).__init__()
         try:
             os.unlink(constants.sockaddr)
         except OSError:
             if os.path.exists(constants.sockaddr):
-                raise Exception("socket not found %s" % constants.sockaddr)    
-        self.conn=socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)    
-    
-    def listen(self,backlog=5):
+                raise Exception("socket not found %s" % constants.sockaddr)
+        self.conn = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
+
+    def listen(self, backlog=5):
         self.conn.listen(backlog)
         self.conn.listen(backlog)
-        
+
     def accept(self):
         return self.conn.accept()
     def accept(self):
         return self.conn.accept()
-    
+
     def bind(self, addr=constants.sockaddr):
         return self.conn.bind(addr)
     def bind(self, addr=constants.sockaddr):
         return self.conn.bind(addr)
-       
+
 #     def send(self, data, addr):
 #         return message.sendto(self.conn.sendto, data, addr)
 #     def send(self, data, addr):
 #         return message.sendto(self.conn.sendto, data, addr)
-        
+
 #     def recv(self, size=constants.buff_size):
 #         return message.recv(self.conn.recvfrom)
 #     def recv(self, size=constants.buff_size):
 #         return message.recv(self.conn.recvfrom)
-    
+
     def close(self):
         self.conn.close()
 
 
 class UdpClient(object):
     def close(self):
         self.conn.close()
 
 
 class UdpClient(object):
+
     def __init__(self):
         super(UdpClient, self).__init__()
         if not os.path.exists(constants.sockaddr):
     def __init__(self):
         super(UdpClient, self).__init__()
         if not os.path.exists(constants.sockaddr):
-            raise Exception("socket not found %s" % constants.sockaddr)    
-        self.conn=socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
-     
+            raise Exception("socket not found %s" % constants.sockaddr)
+        self.conn = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
+
     def connect(self, addr=constants.sockaddr):
         return self.conn.connect(addr)
     def connect(self, addr=constants.sockaddr):
         return self.conn.connect(addr)
-       
+
     def send(self, data):
         message.send(self.conn.send, data)
     def send(self, data):
         message.send(self.conn.send, data)
-        
+
     def recv(self):
         return message.recv(self.conn.recv)
     def recv(self):
         return message.recv(self.conn.recv)
-    
+
     def close(self):
     def close(self):
-        self.conn.close()
\ No newline at end of file
+        self.conn.close()
index f2e1409..e9ee279 100644 (file)
@@ -82,7 +82,7 @@ def my_mkdir(filepath):
     try:
         LOG.info("mkdir -p %s" % filepath)
         os.makedirs(filepath)
     try:
         LOG.info("mkdir -p %s" % filepath)
         os.makedirs(filepath)
-    except OSError, e:
+    except OSError as e:
         if e.errno == 17:
             LOG.info("! %s already exists" % filepath)
         else:
         if e.errno == 17:
             LOG.info("! %s already exists" % filepath)
         else:
@@ -107,7 +107,9 @@ def check_and_kill(process):
 
 
 def list_mods():
 
 
 def list_mods():
-    return check_output("lsmod | sed 1,1d | awk '{print $1}'", shell=True).split()
+    return check_output(
+        "lsmod | sed 1,1d | awk '{print $1}'",
+        shell=True).split()
 
 
 def check_and_rmmod(mod):
 
 
 def check_and_rmmod(mod):
@@ -144,6 +146,7 @@ def randomMAC():
 
 
 class IPCommandHelper(object):
 
 
 class IPCommandHelper(object):
+
     def __init__(self, ns=None):
         self.devices = []
         self.macs = []
     def __init__(self, ns=None):
         self.devices = []
         self.macs = []
@@ -174,7 +177,10 @@ class IPCommandHelper(object):
             cmd = "ip netns exec %s " % ns + cmd
         for device in self.devices:
             buf = check_output(cmd % device, shell=True)
             cmd = "ip netns exec %s " % ns + cmd
         for device in self.devices:
             buf = check_output(cmd % device, shell=True)
-            bdfs = re.findall(r'^bus-info: \d{4}:(\d{2}:\d{2}\.\d*)$', buf, re.MULTILINE)
+            bdfs = re.findall(
+                r'^bus-info: \d{4}:(\d{2}:\d{2}\.\d*)$',
+                buf,
+                re.MULTILINE)
             if bdfs:
                 self.bdf_device_map[bdfs[0]] = device
                 self.device_bdf_map[device] = bdfs[0]
             if bdfs:
                 self.bdf_device_map[bdfs[0]] = device
                 self.device_bdf_map[device] = bdfs[0]
@@ -188,7 +194,9 @@ class IPCommandHelper(object):
         if ns:
             cmd = "ip netns exec %s " % ns + cmd
         buf = check_output(cmd, shell=True)
         if ns:
             cmd = "ip netns exec %s " % ns + cmd
         buf = check_output(cmd, shell=True)
-        macs = re.compile(r"[A-F0-9]{2}(?::[A-F0-9]{2}){5}", re.IGNORECASE | re.MULTILINE)
+        macs = re.compile(
+            r"[A-F0-9]{2}(?::[A-F0-9]{2}){5}",
+            re.IGNORECASE | re.MULTILINE)
         for mac in macs.findall(buf):
             if mac.lower() not in ('00:00:00:00:00:00', 'ff:ff:ff:ff:ff:ff'):
                 return mac
         for mac in macs.findall(buf):
             if mac.lower() not in ('00:00:00:00:00:00', 'ff:ff:ff:ff:ff:ff'):
                 return mac
index 896bb1d..ae4fecf 100644 (file)
@@ -12,6 +12,7 @@ import sys
 
 
 class VstfHelpFormatter(argparse.HelpFormatter):
 
 
 class VstfHelpFormatter(argparse.HelpFormatter):
+
     def start_section(self, heading):
         # Title-case the headings
         heading = '%s%s' % (heading[0].upper(), heading[1:])
     def start_section(self, heading):
         # Title-case the headings
         heading = '%s%s' % (heading[0].upper(), heading[1:])
@@ -19,6 +20,7 @@ class VstfHelpFormatter(argparse.HelpFormatter):
 
 
 class VstfParser(argparse.ArgumentParser):
 
 
 class VstfParser(argparse.ArgumentParser):
+
     def __init__(self,
                  prog='vstf',
                  description="",
     def __init__(self,
                  prog='vstf',
                  description="",
@@ -41,11 +43,12 @@ class VstfParser(argparse.ArgumentParser):
             desc = callback.__doc__ or ''
             action_help = desc.strip()
             arguments = getattr(callback, 'arguments', [])
             desc = callback.__doc__ or ''
             action_help = desc.strip()
             arguments = getattr(callback, 'arguments', [])
-            subparser = subparsers.add_parser(command,
-                                              help=action_help,
-                                              description=desc,
-                                              add_help=False,
-                                              formatter_class=VstfHelpFormatter)
+            subparser = subparsers.add_parser(
+                command,
+                help=action_help,
+                description=desc,
+                add_help=False,
+                formatter_class=VstfHelpFormatter)
             subparser.add_argument('-h', '--help',
                                    action='help',
                                    help=argparse.SUPPRESS)
             subparser.add_argument('-h', '--help',
                                    action='help',
                                    help=argparse.SUPPRESS)
index df7d24d..83b8d15 100644 (file)
@@ -6,4 +6,3 @@
 # which accompanies this distribution, and is available at
 # http://www.apache.org/licenses/LICENSE-2.0
 ##############################################################################
 # which accompanies this distribution, and is available at
 # http://www.apache.org/licenses/LICENSE-2.0
 ##############################################################################
-
index a37bf4c..02bf486 100644 (file)
@@ -45,10 +45,11 @@ cmd = CommandLine()
 
 
 class OpsChains(object):
 
 
 class OpsChains(object):
+
     def __init__(self, monitor, port):
         """The ops chains will setup the proxy to rabbitmq
         and setup a thread to watch the queues of rabbitmq
     def __init__(self, monitor, port):
         """The ops chains will setup the proxy to rabbitmq
         and setup a thread to watch the queues of rabbitmq
-        
+
         """
         LOG.info("VSTF Manager start to listen to %s", monitor)
         if not os.path.exists(cst.VSTFCPATH):
         """
         LOG.info("VSTF Manager start to listen to %s", monitor)
         if not os.path.exists(cst.VSTFCPATH):
@@ -63,7 +64,8 @@ class OpsChains(object):
         if not target:
             respond = "the target is empty, not support now."
         else:
         if not target:
             respond = "the target is empty, not support now."
         else:
-            respond = self.chanl.call(self.chanl.make_msg("list_nic_devices"), target)
+            respond = self.chanl.call(
+                self.chanl.make_msg("list_nic_devices"), target)
         return respond
 
     def src_install(self, host, config_file):
         return respond
 
     def src_install(self, host, config_file):
@@ -118,8 +120,8 @@ class OpsChains(object):
         return Fabricant(host, self.chanl).affctl_list()
 
     def _create_task(self, scenario):
         return Fabricant(host, self.chanl).affctl_list()
 
     def _create_task(self, scenario):
-        taskid = self.dbconn.create_task(str(uuid.uuid4()), time.strftime(cst.TIME_FORMAT),
-                                         desc=scenario + "Test")
+        taskid = self.dbconn.create_task(str(uuid.uuid4()), time.strftime(
+            cst.TIME_FORMAT), desc=scenario + "Test")
         LOG.info("new Task id:%s" % taskid)
         if -1 == taskid:
             raise Exception("DB create task failed.")
         LOG.info("new Task id:%s" % taskid)
         if -1 == taskid:
             raise Exception("DB create task failed.")
@@ -142,7 +144,8 @@ class OpsChains(object):
 
             LOG.info(nic_info)
 
 
             LOG.info(nic_info)
 
-            os_info, cpu_info, mem_info, hw_info = self.collection.collect_host_info(host["agent"])
+            os_info, cpu_info, mem_info, hw_info = self.collection.collect_host_info(host[
+                                                                                     "agent"])
             LOG.info(os_info)
             LOG.info(cpu_info)
             LOG.info(mem_info)
             LOG.info(os_info)
             LOG.info(cpu_info)
             LOG.info(mem_info)
@@ -165,11 +168,11 @@ class OpsChains(object):
         forward_settings = ForwardingSettings()
         head_d = {
             "ip": head,
         forward_settings = ForwardingSettings()
         head_d = {
             "ip": head,
-            "namespace":forward_settings.settings["head"]["namespace"]
+            "namespace": forward_settings.settings["head"]["namespace"]
         }
         tail_d = {
             "ip": tail,
         }
         tail_d = {
             "ip": tail,
-            "namespace":forward_settings.settings["tail"]["namespace"]
+            "namespace": forward_settings.settings["tail"]["namespace"]
         }
         LOG.info(head_d)
         LOG.info(tail_d)
         }
         LOG.info(head_d)
         LOG.info(tail_d)
@@ -184,10 +187,19 @@ class OpsChains(object):
         info_str = "do report over"
         return info_str
 
         info_str = "do report over"
         return info_str
 
-    def run_perf_cmd(self, case, rpath='./', affctl=False, build_on=False, save_on=False, report_on=False,
-                     mail_on=False):
+    def run_perf_cmd(
+            self,
+            case,
+            rpath='./',
+            affctl=False,
+            build_on=False,
+            save_on=False,
+            report_on=False,
+            mail_on=False):
         LOG.info(case)
         LOG.info(case)
-        LOG.info("build_on:%s report_on:%s mail_on:%s" % (build_on, report_on, mail_on))
+        LOG.info(
+            "build_on:%s report_on:%s mail_on:%s" %
+            (build_on, report_on, mail_on))
         casetag = case['case']
         tool = case['tool']
         protocol = case['protocol']
         casetag = case['case']
         tool = case['tool']
         protocol = case['protocol']
@@ -216,7 +228,10 @@ class OpsChains(object):
         tool_settings = ToolSettings()
         tester_settings = TesterSettings()
         flow_producer = FlowsProducer(self.chanl, flows_settings)
         tool_settings = ToolSettings()
         tester_settings = TesterSettings()
         flow_producer = FlowsProducer(self.chanl, flows_settings)
-        provider = PerfProvider(flows_settings.settings, tool_settings.settings, tester_settings.settings)
+        provider = PerfProvider(
+            flows_settings.settings,
+            tool_settings.settings,
+            tester_settings.settings)
 
         perf = pf.Performance(self.chanl, provider)
         flow_producer.create(scenario, casetag)
 
         perf = pf.Performance(self.chanl, provider)
         flow_producer.create(scenario, casetag)
@@ -225,20 +240,29 @@ class OpsChains(object):
         LOG.info(result)
         if save_on:
             taskid = self._create_task(scenario)
         LOG.info(result)
         if save_on:
             taskid = self._create_task(scenario)
-            testid = self.dbconn.add_test_2task(taskid, casetag, protocol, ttype, switch, provider, tool)
+            testid = self.dbconn.add_test_2task(
+                taskid, casetag, protocol, ttype, switch, provider, tool)
             LOG.info(testid)
             self.dbconn.add_data_2test(testid, result)
             if report_on:
                 self.report(rpath, not mail_on, taskid)
         return result
 
             LOG.info(testid)
             self.dbconn.add_data_2test(testid, result)
             if report_on:
                 self.report(rpath, not mail_on, taskid)
         return result
 
-    def run_perf_file(self, rpath='./', affctl=False, report_on=True, mail_on=True):
+    def run_perf_file(
+            self,
+            rpath='./',
+            affctl=False,
+            report_on=True,
+            mail_on=True):
         perf_settings = PerfSettings()
         flows_settings = FlowsSettings()
         tool_settings = ToolSettings()
         tester_settings = TesterSettings()
         flow_producer = FlowsProducer(self.chanl, flows_settings)
         perf_settings = PerfSettings()
         flows_settings = FlowsSettings()
         tool_settings = ToolSettings()
         tester_settings = TesterSettings()
         flow_producer = FlowsProducer(self.chanl, flows_settings)
-        provider = PerfProvider(flows_settings.settings, tool_settings.settings, tester_settings.settings)
+        provider = PerfProvider(
+            flows_settings.settings,
+            tool_settings.settings,
+            tester_settings.settings)
         perf = pf.Performance(self.chanl, provider)
         tests = perf_settings.settings
 
         perf = pf.Performance(self.chanl, provider)
         tests = perf_settings.settings
 
@@ -274,7 +298,8 @@ class OpsChains(object):
                 result = perf.run(tool, protocol, ttype, sizes, affctl)
                 LOG.info(result)
 
                 result = perf.run(tool, protocol, ttype, sizes, affctl)
                 LOG.info(result)
 
-                testid = self.dbconn.add_test_2task(taskid, casetag, protocol, ttype, switch, provider, tool)
+                testid = self.dbconn.add_test_2task(
+                    taskid, casetag, protocol, ttype, switch, provider, tool)
                 LOG.info(testid)
 
                 self.dbconn.add_data_2test(testid, result)
                 LOG.info(testid)
 
                 self.dbconn.add_data_2test(testid, result)
@@ -293,6 +318,7 @@ class OpsChains(object):
 
 
 class Manager(daemon.Daemon):
 
 
 class Manager(daemon.Daemon):
+
     def __init__(self):
         """
         The manager will create a socket for vstfadm.
     def __init__(self):
         """
         The manager will create a socket for vstfadm.
@@ -356,13 +382,16 @@ class Manager(daemon.Daemon):
                     self.daemon_die()
                     raise e
                 except Exception as e:
                     self.daemon_die()
                     raise e
                 except Exception as e:
-                    # here just the function failed no need exit, just return the msg
+                    # here just the function failed no need exit, just return
+                    # the msg
                     msg = "Run function failed. [ %s ]" % (e)
                     response = msg
                     LOG.error(msg)
                 try:
                     response = message.add_context(response, **context)
                     msg = "Run function failed. [ %s ]" % (e)
                     response = msg
                     LOG.error(msg)
                 try:
                     response = message.add_context(response, **context)
-                    LOG.debug("Manager send the response: <%(r)s", {'r': response})
+                    LOG.debug(
+                        "Manager send the response: <%(r)s", {
+                            'r': response})
                     message.send(conn.send, message.encode(response))
                 except Exception as e:
                     self.daemon_die()
                     message.send(conn.send, message.encode(response))
                 except Exception as e:
                     self.daemon_die()
@@ -374,7 +403,8 @@ class Manager(daemon.Daemon):
         """overwrite daemon.Daemon.daemon_die(self)"""
         LOG.info("manage catch the signal %s to exit." % signum)
         if self.conn:
         """overwrite daemon.Daemon.daemon_die(self)"""
         LOG.info("manage catch the signal %s to exit." % signum)
         if self.conn:
-            # we can not close the conn direct, just tell manager to stop accept
+            # we can not close the conn direct, just tell manager to stop
+            # accept
             self.run_flag = False
 
         if self.ops:
             self.run_flag = False
 
         if self.ops:
@@ -418,8 +448,13 @@ def do_stop(args):
 
 def main():
     """this is for vstfctl"""
 
 def main():
     """this is for vstfctl"""
-    setup_logging(level=logging.INFO, log_file="/var/log/vstf/vstf-manager.log", clevel=logging.INFO)
-    parser = VstfParser(prog="vstf-manager", description="vstf manager command line")
+    setup_logging(
+        level=logging.INFO,
+        log_file="/var/log/vstf/vstf-manager.log",
+        clevel=logging.INFO)
+    parser = VstfParser(
+        prog="vstf-manager",
+        description="vstf manager command line")
     parser.set_subcommand_parser(target=sys.modules[__name__])
     args = parser.parse_args()
     args.func(args)
     parser.set_subcommand_parser(target=sys.modules[__name__])
     args = parser.parse_args()
     args.func(args)
index df7d24d..83b8d15 100644 (file)
@@ -6,4 +6,3 @@
 # which accompanies this distribution, and is available at
 # http://www.apache.org/licenses/LICENSE-2.0
 ##############################################################################
 # which accompanies this distribution, and is available at
 # http://www.apache.org/licenses/LICENSE-2.0
 ##############################################################################
-
index 410e1ee..a2aad9e 100644 (file)
@@ -33,6 +33,7 @@ def after_cursor_execute(conn, cursor, statement,
 
 
 class DbManage(object):
 
 
 class DbManage(object):
+
     def __init__(self, db_name=const.DBPATH):
         db_exists = os.path.exists(db_name)
         try:
     def __init__(self, db_name=const.DBPATH):
         db_exists = os.path.exists(db_name)
         try:
@@ -165,9 +166,18 @@ class DbManage(object):
         else:
             return 0
 
         else:
             return 0
 
-    def add_test_2task(self, task, case, protocol, typ, switch, provider, tool):
+    def add_test_2task(
+            self,
+            task,
+            case,
+            protocol,
+            typ,
+            switch,
+            provider,
+            tool):
         try:
         try:
-            item = table.TblTestList(task, case, protocol, typ, switch, provider, tool)
+            item = table.TblTestList(
+                task, case, protocol, typ, switch, provider, tool)
             self._session.add(item)
             self._session.commit()
         except Exception:
             self._session.add(item)
             self._session.commit()
         except Exception:
@@ -236,7 +246,8 @@ class DbManage(object):
         ret = self._session.query(table.TblTaskList)
         if ret:
             for tmp in ret.all():
         ret = self._session.query(table.TblTaskList)
         if ret:
             for tmp in ret.all():
-                result.append([tmp.TaskID, tmp.TaskName, tmp.Date, tmp.EXTInfo])
+                result.append(
+                    [tmp.TaskID, tmp.TaskName, tmp.Date, tmp.EXTInfo])
         return result
 
     def query_all_task_id(self):
         return result
 
     def query_all_task_id(self):
@@ -255,7 +266,9 @@ class DbManage(object):
         return query.all()
 
     def query_scenario(self, casetag):
         return query.all()
 
     def query_scenario(self, casetag):
-        query = self._session.query(table.TblCaseInfo.ScenarioName).filter(table.TblCaseInfo.CaseTag == casetag)
+        query = self._session.query(
+            table.TblCaseInfo.ScenarioName).filter(
+            table.TblCaseInfo.CaseTag == casetag)
         ret = ""
         if query and query.first():
             ret = query.first()[0]
         ret = ""
         if query and query.first():
             ret = query.first()[0]
@@ -282,10 +295,13 @@ class DbManage(object):
     # Single TblTestList API
 
     def query_caselist(self, taskid, scenario):
     # Single TblTestList API
 
     def query_caselist(self, taskid, scenario):
-        query = self._session.query(table.TblTestList.CaseTag).filter(and_(
-            table.TblTestList.CaseTag == table.TblCaseInfo.CaseTag,
-            table.TblCaseInfo.ScenarioName == scenario,
-            table.TblTestList.TaskID == taskid)).group_by(table.TblCaseInfo.CaseTag)
+        query = self._session.query(
+            table.TblTestList.CaseTag).filter(
+            and_(
+                table.TblTestList.CaseTag == table.TblCaseInfo.CaseTag,
+                table.TblCaseInfo.ScenarioName == scenario,
+                table.TblTestList.TaskID == taskid)).group_by(
+                table.TblCaseInfo.CaseTag)
         return query.all()
 
     def query_testlist(self, taskid, scenario):
         return query.all()
 
     def query_testlist(self, taskid, scenario):
@@ -308,65 +324,85 @@ class DbManage(object):
         return query.all()
 
     def query_casetools(self, taskid, casetag):
         return query.all()
 
     def query_casetools(self, taskid, casetag):
-        query = self._session.query(table.TblTestList.Tools).filter(and_(
-            table.TblTestList.CaseTag == casetag,
-            table.TblTestList.TaskID == taskid)).group_by(table.TblTestList.Tools)
+        query = self._session.query(
+            table.TblTestList.Tools).filter(
+            and_(
+                table.TblTestList.CaseTag == casetag,
+                table.TblTestList.TaskID == taskid)).group_by(
+                table.TblTestList.Tools)
         return query.all()
 
     def query_scenariolist(self, taskid):
         return query.all()
 
     def query_scenariolist(self, taskid):
-        query = self._session.query(table.TblCaseInfo.ScenarioName).filter(and_(
-            table.TblTestList.CaseTag == table.TblCaseInfo.CaseTag,
-            table.TblTestList.TaskID == taskid)).group_by(table.TblCaseInfo.ScenarioName)
+        query = self._session.query(
+            table.TblCaseInfo.ScenarioName).filter(
+            and_(
+                table.TblTestList.CaseTag == table.TblCaseInfo.CaseTag,
+                table.TblTestList.TaskID == taskid)).group_by(
+                table.TblCaseInfo.ScenarioName)
         return query.all()
 
     def query_throughput_load(self, taskid, casetag, provider):
         ptype = 'throughput'
         return query.all()
 
     def query_throughput_load(self, taskid, casetag, provider):
         ptype = 'throughput'
-        query = self._session.query(table.TblThroughput.AvgFrameSize, table.TblThroughput.OfferedLoad).filter(and_(
-            table.TblTestList.TaskID == taskid,
-            table.TblTestList.CaseTag == casetag,
-            table.TblTestList.Provider == provider, table.TblTestList.Type == ptype,
-            table.TblTestList.TestID == table.TblThroughput.TestID))
+        query = self._session.query(
+            table.TblThroughput.AvgFrameSize,
+            table.TblThroughput.OfferedLoad).filter(
+            and_(
+                table.TblTestList.TaskID == taskid,
+                table.TblTestList.CaseTag == casetag,
+                table.TblTestList.Provider == provider,
+                table.TblTestList.Type == ptype,
+                table.TblTestList.TestID == table.TblThroughput.TestID))
         return query.all()
 
     def query_throughput_bandwidth(self, taskid, casetag, provider):
         ptype = 'throughput'
         return query.all()
 
     def query_throughput_bandwidth(self, taskid, casetag, provider):
         ptype = 'throughput'
-        query = self._session.query(table.TblThroughput.AvgFrameSize, table.TblThroughput.Bandwidth).filter(and_(
-            table.TblTestList.TaskID == taskid,
-            table.TblTestList.CaseTag == casetag,
-            table.TblTestList.Provider == provider, table.TblTestList.Type == ptype,
-            table.TblTestList.TestID == table.TblThroughput.TestID))
+        query = self._session.query(
+            table.TblThroughput.AvgFrameSize,
+            table.TblThroughput.Bandwidth).filter(
+            and_(
+                table.TblTestList.TaskID == taskid,
+                table.TblTestList.CaseTag == casetag,
+                table.TblTestList.Provider == provider,
+                table.TblTestList.Type == ptype,
+                table.TblTestList.TestID == table.TblThroughput.TestID))
         return query.all()
 
     def query_throughput_table(self, taskid, casetag, provider):
         ptype = 'throughput'
         return query.all()
 
     def query_throughput_table(self, taskid, casetag, provider):
         ptype = 'throughput'
-        query = self._session.query(table.TblThroughput.AvgFrameSize,
-                                    table.TblThroughput.Bandwidth,
-                                    table.TblThroughput.OfferedLoad,
-                                    table.TblThroughput.CPU,
-                                    table.TblThroughput.MppspGhz,
-                                    table.TblThroughput.MinimumLatency,
-                                    table.TblThroughput.MaximumLatency,
-                                    table.TblThroughput.AverageLatency,
-                                    ).filter(and_(
-            table.TblTestList.TaskID == taskid,
-            table.TblTestList.CaseTag == casetag,
-            table.TblTestList.Provider == provider, table.TblTestList.Type == ptype,
-            table.TblTestList.TestID == table.TblThroughput.TestID))
+        query = self._session.query(
+            table.TblThroughput.AvgFrameSize,
+            table.TblThroughput.Bandwidth,
+            table.TblThroughput.OfferedLoad,
+            table.TblThroughput.CPU,
+            table.TblThroughput.MppspGhz,
+            table.TblThroughput.MinimumLatency,
+            table.TblThroughput.MaximumLatency,
+            table.TblThroughput.AverageLatency,
+        ).filter(
+            and_(
+                table.TblTestList.TaskID == taskid,
+                table.TblTestList.CaseTag == casetag,
+                table.TblTestList.Provider == provider,
+                table.TblTestList.Type == ptype,
+                table.TblTestList.TestID == table.TblThroughput.TestID))
         return query.all()
 
     def query_throughput_simpletable(self, taskid, casetag, provider):
         ptype = 'throughput'
         return query.all()
 
     def query_throughput_simpletable(self, taskid, casetag, provider):
         ptype = 'throughput'
-        query = self._session.query(table.TblThroughput.AvgFrameSize,
-                                    table.TblThroughput.Bandwidth,
-                                    table.TblThroughput.OfferedLoad,
-                                    table.TblThroughput.CPU,
-                                    table.TblThroughput.MppspGhz,
-                                    table.TblThroughput.AverageLatency,
-                                    ).filter(and_(
-            table.TblTestList.TaskID == taskid,
-            table.TblTestList.CaseTag == casetag,
-            table.TblTestList.Provider == provider, table.TblTestList.Type == ptype,
-            table.TblTestList.TestID == table.TblThroughput.TestID))
+        query = self._session.query(
+            table.TblThroughput.AvgFrameSize,
+            table.TblThroughput.Bandwidth,
+            table.TblThroughput.OfferedLoad,
+            table.TblThroughput.CPU,
+            table.TblThroughput.MppspGhz,
+            table.TblThroughput.AverageLatency,
+        ).filter(
+            and_(
+                table.TblTestList.TaskID == taskid,
+                table.TblTestList.CaseTag == casetag,
+                table.TblTestList.Provider == provider,
+                table.TblTestList.Type == ptype,
+                table.TblTestList.TestID == table.TblThroughput.TestID))
         return query.all()
 
     def query_testdata(self, testid, ptype):
         return query.all()
 
     def query_testdata(self, testid, ptype):
@@ -376,79 +412,103 @@ class DbManage(object):
 
     def query_throughput_avg(self, taskid, casetag, provider):
         ptype = 'throughput'
 
     def query_throughput_avg(self, taskid, casetag, provider):
         ptype = 'throughput'
-        query = self._session.query(table.TblThroughput.AvgFrameSize, table.TblThroughput.AverageLatency).filter(and_(
-            table.TblTestList.TaskID == taskid,
-            table.TblTestList.CaseTag == casetag,
-            table.TblTestList.Provider == provider, table.TblTestList.Type == ptype,
-            table.TblTestList.TestID == table.TblThroughput.TestID))
+        query = self._session.query(
+            table.TblThroughput.AvgFrameSize,
+            table.TblThroughput.AverageLatency).filter(
+            and_(
+                table.TblTestList.TaskID == taskid,
+                table.TblTestList.CaseTag == casetag,
+                table.TblTestList.Provider == provider,
+                table.TblTestList.Type == ptype,
+                table.TblTestList.TestID == table.TblThroughput.TestID))
         return query.all()
 
     def query_frameloss_bandwidth(self, taskid, casetag, provider):
         ptype = 'frameloss'
         return query.all()
 
     def query_frameloss_bandwidth(self, taskid, casetag, provider):
         ptype = 'frameloss'
-        query = self._session.query(table.TblFrameloss.AvgFrameSize, table.TblFrameloss.Bandwidth).filter(and_(
-            table.TblTestList.TaskID == taskid,
-            table.TblTestList.CaseTag == casetag,
-            table.TblTestList.Provider == provider, table.TblTestList.Type == ptype,
-            table.TblTestList.TestID == table.TblFrameloss.TestID))
+        query = self._session.query(
+            table.TblFrameloss.AvgFrameSize,
+            table.TblFrameloss.Bandwidth).filter(
+            and_(
+                table.TblTestList.TaskID == taskid,
+                table.TblTestList.CaseTag == casetag,
+                table.TblTestList.Provider == provider,
+                table.TblTestList.Type == ptype,
+                table.TblTestList.TestID == table.TblFrameloss.TestID))
         return query.all()
 
     def query_frameloss_load(self, taskid, casetag, provider):
         ptype = 'frameloss'
         return query.all()
 
     def query_frameloss_load(self, taskid, casetag, provider):
         ptype = 'frameloss'
-        query = self._session.query(table.TblFrameloss.AvgFrameSize, table.TblFrameloss.OfferedLoad).filter(and_(
-            table.TblTestList.TaskID == taskid,
-            table.TblTestList.CaseTag == casetag,
-            table.TblTestList.Provider == provider, table.TblTestList.Type == ptype,
-            table.TblTestList.TestID == table.TblFrameloss.TestID))
+        query = self._session.query(
+            table.TblFrameloss.AvgFrameSize,
+            table.TblFrameloss.OfferedLoad).filter(
+            and_(
+                table.TblTestList.TaskID == taskid,
+                table.TblTestList.CaseTag == casetag,
+                table.TblTestList.Provider == provider,
+                table.TblTestList.Type == ptype,
+                table.TblTestList.TestID == table.TblFrameloss.TestID))
         return query.all()
 
     def query_frameloss_table(self, taskid, casetag, provider):
         ptype = 'frameloss'
         return query.all()
 
     def query_frameloss_table(self, taskid, casetag, provider):
         ptype = 'frameloss'
-        query = self._session.query(table.TblFrameloss.AvgFrameSize,
-                                    table.TblFrameloss.Bandwidth,
-                                    table.TblFrameloss.OfferedLoad,
-                                    table.TblFrameloss.CPU,
-                                    table.TblFrameloss.MppspGhz,
-                                    table.TblFrameloss.MinimumLatency,
-                                    table.TblFrameloss.MaximumLatency,
-                                    table.TblFrameloss.AverageLatency
-                                    ).filter(and_(
-            table.TblTestList.TaskID == taskid,
-            table.TblTestList.CaseTag == casetag,
-            table.TblTestList.Provider == provider, table.TblTestList.Type == ptype,
-            table.TblTestList.TestID == table.TblFrameloss.TestID))
+        query = self._session.query(
+            table.TblFrameloss.AvgFrameSize,
+            table.TblFrameloss.Bandwidth,
+            table.TblFrameloss.OfferedLoad,
+            table.TblFrameloss.CPU,
+            table.TblFrameloss.MppspGhz,
+            table.TblFrameloss.MinimumLatency,
+            table.TblFrameloss.MaximumLatency,
+            table.TblFrameloss.AverageLatency).filter(
+            and_(
+                table.TblTestList.TaskID == taskid,
+                table.TblTestList.CaseTag == casetag,
+                table.TblTestList.Provider == provider,
+                table.TblTestList.Type == ptype,
+                table.TblTestList.TestID == table.TblFrameloss.TestID))
         return query.all()
 
     def query_frameloss_simpletable(self, taskid, casetag, provider):
         ptype = 'frameloss'
         return query.all()
 
     def query_frameloss_simpletable(self, taskid, casetag, provider):
         ptype = 'frameloss'
-        query = self._session.query(table.TblFrameloss.AvgFrameSize,
-                                    table.TblFrameloss.Bandwidth,
-                                    table.TblFrameloss.OfferedLoad,
-                                    table.TblFrameloss.CPU,
-                                    table.TblFrameloss.MppspGhz,
-                                    table.TblFrameloss.AverageLatency
-                                    ).filter(and_(
-            table.TblTestList.TaskID == taskid,
-            table.TblTestList.CaseTag == casetag,
-            table.TblTestList.Provider == provider, table.TblTestList.Type == ptype,
-            table.TblTestList.TestID == table.TblFrameloss.TestID))
+        query = self._session.query(
+            table.TblFrameloss.AvgFrameSize,
+            table.TblFrameloss.Bandwidth,
+            table.TblFrameloss.OfferedLoad,
+            table.TblFrameloss.CPU,
+            table.TblFrameloss.MppspGhz,
+            table.TblFrameloss.AverageLatency).filter(
+            and_(
+                table.TblTestList.TaskID == taskid,
+                table.TblTestList.CaseTag == casetag,
+                table.TblTestList.Provider == provider,
+                table.TblTestList.Type == ptype,
+                table.TblTestList.TestID == table.TblFrameloss.TestID))
         return query.all()
 
     def query_frameloss_avg(self, taskid, casetag, provider):
         ptype = 'frameloss'
         return query.all()
 
     def query_frameloss_avg(self, taskid, casetag, provider):
         ptype = 'frameloss'
-        query = self._session.query(table.TblFrameloss.AvgFrameSize, table.TblFrameloss.AverageLatency).filter(and_(
-            table.TblTestList.TaskID == taskid,
-            table.TblTestList.CaseTag == casetag,
-            table.TblTestList.Provider == provider, table.TblTestList.Type == ptype,
-            table.TblTestList.TestID == table.TblFrameloss.TestID))
+        query = self._session.query(
+            table.TblFrameloss.AvgFrameSize,
+            table.TblFrameloss.AverageLatency).filter(
+            and_(
+                table.TblTestList.TaskID == taskid,
+                table.TblTestList.CaseTag == casetag,
+                table.TblTestList.Provider == provider,
+                table.TblTestList.Type == ptype,
+                table.TblTestList.TestID == table.TblFrameloss.TestID))
         return query.all()
 
     def query_latency_avg(self, taskid, casetag, provider):
         ptype = 'latency'
         return query.all()
 
     def query_latency_avg(self, taskid, casetag, provider):
         ptype = 'latency'
-        query = self._session.query(table.TblLatency.AvgFrameSize, table.TblLatency.AverageLatency).filter(and_(
-            table.TblTestList.TaskID == taskid,
-            table.TblTestList.CaseTag == casetag,
-            table.TblTestList.Provider == provider, table.TblTestList.Type == ptype,
-            table.TblTestList.TestID == table.TblLatency.TestID))
+        query = self._session.query(
+            table.TblLatency.AvgFrameSize,
+            table.TblLatency.AverageLatency).filter(
+            and_(
+                table.TblTestList.TaskID == taskid,
+                table.TblTestList.CaseTag == casetag,
+                table.TblTestList.Provider == provider,
+                table.TblTestList.Type == ptype,
+                table.TblTestList.TestID == table.TblLatency.TestID))
         return query.all()
 
     def query_summary_table(self, taskid, casetag, provider, ptype):
         return query.all()
 
     def query_summary_table(self, taskid, casetag, provider, ptype):
@@ -482,51 +542,71 @@ class DbManage(object):
         return []
 
     def query_throughput_provider(self, taskid, casetag, provider):
         return []
 
     def query_throughput_provider(self, taskid, casetag, provider):
-        query = self._session.query(table.TblThroughput).filter(and_(table.TblTestList.CaseTag == casetag,
-                                                                     table.TblTestList.Provider == provider,
-                                                                     table.TblTestList.TaskID == taskid,
-                                                                     table.TblTestList.TestID == table.TblThroughput.TestID))
+        query = self._session.query(
+            table.TblThroughput).filter(
+            and_(
+                table.TblTestList.CaseTag == casetag,
+                table.TblTestList.Provider == provider,
+                table.TblTestList.TaskID == taskid,
+                table.TblTestList.TestID == table.TblThroughput.TestID))
         return query.all()
 
     def query_frameloss_provider(self, taskid, casetag, provider):
         return query.all()
 
     def query_frameloss_provider(self, taskid, casetag, provider):
-        query = self._session.query(table.TblFrameloss).filter(and_(table.TblTestList.CaseTag == casetag,
-                                                                    table.TblTestList.Provider == provider,
-                                                                    table.TblTestList.TaskID == taskid,
-                                                                    table.TblTestList.TestID == table.TblFrameloss.TestID))
+        query = self._session.query(
+            table.TblFrameloss).filter(
+            and_(
+                table.TblTestList.CaseTag == casetag,
+                table.TblTestList.Provider == provider,
+                table.TblTestList.TaskID == taskid,
+                table.TblTestList.TestID == table.TblFrameloss.TestID))
         return query.all()
 
     def query_latency_provider(self, taskid, casetag, provider):
         return query.all()
 
     def query_latency_provider(self, taskid, casetag, provider):
-        query = self._session.query(table.TblLatency).filter(and_(table.TblTestList.CaseTag == casetag,
-                                                                  table.TblTestList.Provider == provider,
-                                                                  table.TblTestList.TaskID == taskid,
-                                                                  table.TblTestList.TestID == table.TblLatency.TestID))
+        query = self._session.query(
+            table.TblLatency).filter(
+            and_(
+                table.TblTestList.CaseTag == casetag,
+                table.TblTestList.Provider == provider,
+                table.TblTestList.TaskID == taskid,
+                table.TblTestList.TestID == table.TblLatency.TestID))
         return query.all()
 
     def query_case_type_count(self, taskid, casetag, ptype):
         return query.all()
 
     def query_case_type_count(self, taskid, casetag, ptype):
-        query = self._session.query(table.TblTestList).filter(and_(table.TblTestList.CaseTag == casetag,
-                                                                   table.TblTestList.Type == ptype,
-                                                                   table.TblTestList.TaskID == taskid))
+        query = self._session.query(
+            table.TblTestList).filter(
+            and_(
+                table.TblTestList.CaseTag == casetag,
+                table.TblTestList.Type == ptype,
+                table.TblTestList.TaskID == taskid))
 
         return query.count()
 
     def query_case_provider_count(self, taskid, casetag, provider):
 
         return query.count()
 
     def query_case_provider_count(self, taskid, casetag, provider):
-        query = self._session.query(table.TblTestList).filter(and_(table.TblTestList.CaseTag == casetag,
-                                                                   table.TblTestList.Provider == provider,
-                                                                   table.TblTestList.TaskID == taskid))
+        query = self._session.query(
+            table.TblTestList).filter(
+            and_(
+                table.TblTestList.CaseTag == casetag,
+                table.TblTestList.Provider == provider,
+                table.TblTestList.TaskID == taskid))
         return query.count()
 
     def query_case_type_provider_count(self, taskid, casetag, provider, ptype):
         return query.count()
 
     def query_case_type_provider_count(self, taskid, casetag, provider, ptype):
-        query = self._session.query(table.TblTestList).filter(and_(table.TblTestList.CaseTag == casetag,
-                                                                   table.TblTestList.Type == ptype,
-                                                                   table.TblTestList.Provider == provider,
-                                                                   table.TblTestList.TaskID == taskid))
+        query = self._session.query(
+            table.TblTestList).filter(
+            and_(
+                table.TblTestList.CaseTag == casetag,
+                table.TblTestList.Type == ptype,
+                table.TblTestList.Provider == provider,
+                table.TblTestList.TaskID == taskid))
 
         return query.count()
 
     def query_exten_info(self, taskid):
 
         return query.count()
 
     def query_exten_info(self, taskid):
-        query = self._session.query(table.TblEXTInfo.EXTName,
-                                    table.TblEXTInfo.EXTContent,
-                                    table.TblEXTInfo.Description).filter(table.TblEXTInfo.TaskID == taskid)
+        query = self._session.query(
+            table.TblEXTInfo.EXTName,
+            table.TblEXTInfo.EXTContent,
+            table.TblEXTInfo.Description).filter(
+            table.TblEXTInfo.TaskID == taskid)
         return query.all()
 
 
         return query.all()
 
 
@@ -534,12 +614,27 @@ def unit_test():
     import time
     dbase = DbManage()
 
     import time
     dbase = DbManage()
 
-    taskid = dbase.create_task("test", str(time.ctime()), "this is a unit test")
-    dbase.add_host_2task(taskid, "hosta", "hw82576", "xxx", "x", "82599", "ubuntu")
+    taskid = dbase.create_task("test", str(
+        time.ctime()), "this is a unit test")
+    dbase.add_host_2task(
+        taskid,
+        "hosta",
+        "hw82576",
+        "xxx",
+        "x",
+        "82599",
+        "ubuntu")
     dbase.add_extent_2task(taskid, "CETH", "driver", "version 2.0")
     dbase.add_extent_2task(taskid, "EVS", "switch", "version 3.0")
 
     dbase.add_extent_2task(taskid, "CETH", "driver", "version 2.0")
     dbase.add_extent_2task(taskid, "EVS", "switch", "version 3.0")
 
-    testid = dbase.add_test_2task(taskid, "Tn-1", 'udp', "throughput", "ovs", None, "netperf")
+    testid = dbase.add_test_2task(
+        taskid,
+        "Tn-1",
+        'udp',
+        "throughput",
+        "ovs",
+        None,
+        "netperf")
     data = {
         '64': {
             'OfferedLoad': 2,
     data = {
         '64': {
             'OfferedLoad': 2,
@@ -557,7 +652,14 @@ def unit_test():
     }
     dbase.add_data_2test(testid, data)
 
     }
     dbase.add_data_2test(testid, data)
 
-    testid = dbase.add_test_2task(taskid, "Tn-1", 'udp', "frameloss", "ovs", None, "netperf")
+    testid = dbase.add_test_2task(
+        taskid,
+        "Tn-1",
+        'udp',
+        "frameloss",
+        "ovs",
+        None,
+        "netperf")
     data = {
         '64': {
             'OfferedLoad': 2,
     data = {
         '64': {
             'OfferedLoad': 2,
@@ -575,13 +677,35 @@ def unit_test():
     }
     dbase.add_data_2test(testid, data)
 
     }
     dbase.add_data_2test(testid, data)
 
-    testid = dbase.add_test_2task(taskid, "Tn-1", 'udp', "latency", "ovs", None, "netperf")
+    testid = dbase.add_test_2task(
+        taskid,
+        "Tn-1",
+        'udp',
+        "latency",
+        "ovs",
+        None,
+        "netperf")
     data = {
     data = {
-        64: {'MaximumLatency': 0.0, 'AverageLatency': 0.0, 'MinimumLatency': 0.0, 'OfferedLoad': 0.0},
-        128: {'MaximumLatency': 0.0, 'AverageLatency': 0.0, 'MinimumLatency': 0.0, 'OfferedLoad': 0.0},
-        512: {'MaximumLatency': 0.0, 'AverageLatency': 0.0, 'MinimumLatency': 0.0, 'OfferedLoad': 0.0},
-        1024: {'MaximumLatency': 0.0, 'AverageLatency': 0.0, 'MinimumLatency': 0.0, 'OfferedLoad': 0.0}
-    }
+        64: {
+            'MaximumLatency': 0.0,
+            'AverageLatency': 0.0,
+            'MinimumLatency': 0.0,
+            'OfferedLoad': 0.0},
+        128: {
+            'MaximumLatency': 0.0,
+            'AverageLatency': 0.0,
+            'MinimumLatency': 0.0,
+            'OfferedLoad': 0.0},
+        512: {
+            'MaximumLatency': 0.0,
+            'AverageLatency': 0.0,
+            'MinimumLatency': 0.0,
+            'OfferedLoad': 0.0},
+        1024: {
+            'MaximumLatency': 0.0,
+            'AverageLatency': 0.0,
+            'MinimumLatency': 0.0,
+            'OfferedLoad': 0.0}}
     dbase.add_data_2test(testid, data)
     query = dbase.query_testlist(1, "Tn")
     for item in query:
     dbase.add_data_2test(testid, data)
     query = dbase.query_testlist(1, "Tn")
     for item in query:
index 55b02e5..92f857a 100644 (file)
@@ -52,7 +52,7 @@ class TblCaseInfo(Base):
                  ScenarioName, FigurePath, Direction, Directiontag,
                  Configure, Description, **kwargs):
         """
                  ScenarioName, FigurePath, Direction, Directiontag,
                  Configure, Description, **kwargs):
         """
-        :param CaseID: 
+        :param CaseID:
         :param CaseTag: ??
         :param CaseName: name of case, like tester-vm
         :param ScenarioName: name of scenario, like Tn
         :param CaseTag: ??
         :param CaseName: name of case, like tester-vm
         :param ScenarioName: name of scenario, like Tn
@@ -135,7 +135,16 @@ class TblTestList(Base):
     Provider = Column(String(const.PROVIDER_LEN))
     Tools = Column(String(const.TOOLS_LEN))
 
     Provider = Column(String(const.PROVIDER_LEN))
     Tools = Column(String(const.TOOLS_LEN))
 
-    def __init__(self, taskid, casetag, protocol, typ, switch, provider, tools, **kwargs):
+    def __init__(
+            self,
+            taskid,
+            casetag,
+            protocol,
+            typ,
+            switch,
+            provider,
+            tools,
+            **kwargs):
         """Table of test"""
         self.TaskID = taskid
         self.CaseTag = casetag
         """Table of test"""
         self.TaskID = taskid
         self.CaseTag = casetag
index b536e3b..acc88d9 100644 (file)
@@ -15,6 +15,7 @@ LOG = logging.getLogger(__name__)
 
 
 class IntentParser(object):
 
 
 class IntentParser(object):
+
     def __init__(self, cfg_file):
         self.cfg_file = cfg_file
         with file(cfg_file) as fp:
     def __init__(self, cfg_file):
         self.cfg_file = cfg_file
         with file(cfg_file) as fp:
@@ -59,7 +60,9 @@ class IntentParser(object):
                 for tap_cfg in vm_cfg['taps']:
                     br_type_set.add(tap_cfg["br_type"])
             if len(br_type_set) > 1:
                 for tap_cfg in vm_cfg['taps']:
                     br_type_set.add(tap_cfg["br_type"])
             if len(br_type_set) > 1:
-                raise Exception("specified more than one type of vswitchfor host:%s" % host_cfg['ip'])
+                raise Exception(
+                    "specified more than one type of vswitchfor host:%s" %
+                    host_cfg['ip'])
             if len(br_type_set) > 0:
                 br_type = br_type_set.pop()
                 host_cfg['br_type'] = br_type
             if len(br_type_set) > 0:
                 br_type = br_type_set.pop()
                 host_cfg['br_type'] = br_type
index 1d201b7..40e25e9 100644 (file)
@@ -17,6 +17,7 @@ LOG = logging.getLogger(__name__)
 
 
 class EnvBuildApi(object):
 
 
 class EnvBuildApi(object):
+
     def __init__(self, conn, config_file):
         LOG.info("welcome to EnvBuilder")
         self.conn = conn
     def __init__(self, conn, config_file):
         LOG.info("welcome to EnvBuilder")
         self.conn = conn
@@ -48,6 +49,7 @@ class EnvBuildApi(object):
 
 
 class TransmitterBuild(object):
 
 
 class TransmitterBuild(object):
+
     def __init__(self, conn, config_file):
         LOG.info("welcome to TransmitterBuild")
         self.conn = conn
     def __init__(self, conn, config_file):
         LOG.info("welcome to TransmitterBuild")
         self.conn = conn
@@ -72,7 +74,9 @@ if __name__ == "__main__":
     import argparse
 
     parser = argparse.ArgumentParser()
     import argparse
 
     parser = argparse.ArgumentParser()
-    parser.add_argument('--rpc_server', help='rabbitmq server for deliver messages.')
+    parser.add_argument(
+        '--rpc_server',
+        help='rabbitmq server for deliver messages.')
     parser.add_argument('--config', help='config file to parse')
     args = parser.parse_args()
     logging.basicConfig(level=logging.INFO)
     parser.add_argument('--config', help='config file to parse')
     args = parser.parse_args()
     logging.basicConfig(level=logging.INFO)
index 6e32a05..7861ad3 100644 (file)
@@ -11,6 +11,7 @@ from vstf.rpc_frame_work import rpc_producer
 
 
 class EnvCollectApi(object):
 
 
 class EnvCollectApi(object):
+
     def __init__(self, rb_mq_server):
         """
         When use collect, a connection of rabbitmq is needed.
     def __init__(self, rb_mq_server):
         """
         When use collect, a connection of rabbitmq is needed.
index 3b1c082..3f6978e 100644 (file)
@@ -12,6 +12,7 @@ import vstf.common.constants as cst
 
 
 class Fabricant(object):
 
 
 class Fabricant(object):
+
     def __init__(self, target, conn):
         self.conn = conn
         self.target = target
     def __init__(self, target, conn):
         self.conn = conn
         self.target = target
@@ -21,7 +22,12 @@ class Fabricant(object):
 
     @property
     def declare_commands(self):
 
     @property
     def declare_commands(self):
-        driver = {"install_drivers", "clean_drivers", "autoneg_on", "autoneg_off", "autoneg_query"}
+        driver = {
+            "install_drivers",
+            "clean_drivers",
+            "autoneg_on",
+            "autoneg_off",
+            "autoneg_query"}
 
         builder = {"build_env", "clean_env"}
 
 
         builder = {"build_env", "clean_env"}
 
@@ -29,7 +35,10 @@ class Fabricant(object):
 
         perf = {"perf_run", "run_vnstat", "kill_vnstat", "force_clean"}
 
 
         perf = {"perf_run", "run_vnstat", "kill_vnstat", "force_clean"}
 
-        device_mgr = {"get_device_detail", "list_nic_devices", "get_device_verbose"}
+        device_mgr = {
+            "get_device_detail",
+            "list_nic_devices",
+            "get_device_verbose"}
 
         netns = {"clean_all_namespace", "config_dev", "recover_dev", "ping"}
 
 
         netns = {"clean_all_namespace", "config_dev", "recover_dev", "ping"}
 
@@ -37,11 +46,22 @@ class Fabricant(object):
 
         cmdline = {"execute"}
 
 
         cmdline = {"execute"}
 
-        spirent = {"send_packet", "stop_flow", "mac_learning", "run_rfc2544suite", "run_rfc2544_throughput",
-                   "run_rfc2544_frameloss", "run_rfc2544_latency"}
-
-        equalizer = {"get_numa_core", "get_nic_numa", "get_nic_interrupt_proc", "get_vm_info", "bind_cpu",
-                     "catch_thread_info"}
+        spirent = {
+            "send_packet",
+            "stop_flow",
+            "mac_learning",
+            "run_rfc2544suite",
+            "run_rfc2544_throughput",
+            "run_rfc2544_frameloss",
+            "run_rfc2544_latency"}
+
+        equalizer = {
+            "get_numa_core",
+            "get_nic_numa",
+            "get_nic_interrupt_proc",
+            "get_vm_info",
+            "bind_cpu",
+            "catch_thread_info"}
 
         return driver | cpu | builder | perf | device_mgr | netns | cmdline | collect | spirent | equalizer
 
 
         return driver | cpu | builder | perf | device_mgr | netns | cmdline | collect | spirent | equalizer
 
index df7d24d..83b8d15 100644 (file)
@@ -6,4 +6,3 @@
 # which accompanies this distribution, and is available at
 # http://www.apache.org/licenses/LICENSE-2.0
 ##############################################################################
 # which accompanies this distribution, and is available at
 # http://www.apache.org/licenses/LICENSE-2.0
 ##############################################################################
-
index df7d24d..83b8d15 100644 (file)
@@ -6,4 +6,3 @@
 # which accompanies this distribution, and is available at
 # http://www.apache.org/licenses/LICENSE-2.0
 ##############################################################################
 # which accompanies this distribution, and is available at
 # http://www.apache.org/licenses/LICENSE-2.0
 ##############################################################################
-
index 6792ad9..c217f9e 100644 (file)
@@ -21,6 +21,7 @@ PASSWD = None
 
 
 class Mail(object):
 
 
 class Mail(object):
+
     def __init__(self, srv=SRV, user=USER, passwd=PASSWD):
         self.srv = srv
         self.user = USER
     def __init__(self, srv=SRV, user=USER, passwd=PASSWD):
         self.srv = srv
         self.user = USER
@@ -81,7 +82,10 @@ class Mail(object):
     def attach_files(self, files):
         for _file in files:
             part = MIMEApplication(open(_file, "rb").read())
     def attach_files(self, files):
         for _file in files:
             part = MIMEApplication(open(_file, "rb").read())
-            part.add_header('Content-Disposition', 'attachment', filename=os.path.basename(_file))
+            part.add_header(
+                'Content-Disposition',
+                'attachment',
+                filename=os.path.basename(_file))
             self._msg.attach(part)
 
     def send(self):
             self._msg.attach(part)
 
     def send(self):
@@ -114,11 +118,11 @@ if __name__ == "__main__":
         <head>
         <title>vstf</title>
         </head>
         <head>
         <title>vstf</title>
         </head>
-        
+
         <body>
             hello vstf
         </body>
         <body>
             hello vstf
         </body>
-        
+
         </html>
     """
     m.attach_text(context, m.HTML)
         </html>
     """
     m.attach_text(context, m.HTML)
index a4d7bb0..42f991a 100644 (file)
@@ -15,6 +15,7 @@ LOG = logging.getLogger(__name__)
 
 
 class SendMail(object):
 
 
 class SendMail(object):
+
     def __init__(self, mail_info):
         self._mail_info = mail_info
 
     def __init__(self, mail_info):
         self._mail_info = mail_info
 
@@ -32,7 +33,9 @@ class SendMail(object):
 
         if 'attach' in self._mail_info['body']:
             send.attach_files(self._mail_info['body']['attach'])
 
         if 'attach' in self._mail_info['body']:
             send.attach_files(self._mail_info['body']['attach'])
-        send.attach_text(self._mail_info['body']['content'], self._mail_info['body']['subtype'])
+        send.attach_text(
+            self._mail_info['body']['content'],
+            self._mail_info['body']['subtype'])
         send.attach_title(self._mail_info['body']['subject'])
         send.send()
 
         send.attach_title(self._mail_info['body']['subject'])
         send.send()
 
@@ -50,11 +53,11 @@ def unit_test():
         <head>
         <title>vstf</title>
         </head>
         <head>
         <title>vstf</title>
         </head>
-        
+
         <body>
             hello vstf
         </body>
         <body>
             hello vstf
         </body>
-        
+
         </html>
     """
     mail_settings.set_subtype('html')
         </html>
     """
     mail_settings.set_subtype('html')
index 547db68..83b8d15 100644 (file)
@@ -6,5 +6,3 @@
 # which accompanies this distribution, and is available at
 # http://www.apache.org/licenses/LICENSE-2.0
 ##############################################################################
 # which accompanies this distribution, and is available at
 # http://www.apache.org/licenses/LICENSE-2.0
 ##############################################################################
-
-
index ea29655..a3285c9 100644 (file)
@@ -16,6 +16,7 @@ LOG = logging.getLogger(__name__)
 
 
 class CandyGenerator(object):
 
 
 class CandyGenerator(object):
+
     def __init__(self, task):
         self._task = task
 
     def __init__(self, task):
         self._task = task
 
@@ -99,7 +100,8 @@ class CandyGenerator(object):
                     "data": scenario_data.get_latency_bardata(case)
                 }
                 table = scenario_data.get_latency_tabledata(case)
                     "data": scenario_data.get_latency_bardata(case)
                 }
                 table = scenario_data.get_latency_tabledata(case)
-            test_section = self.create_test(sectionid, params_info, table, draw)
+            test_section = self.create_test(
+                sectionid, params_info, table, draw)
             scenario_chapter[name] = test_section
 
         return scenario_chapter
             scenario_chapter[name] = test_section
 
         return scenario_chapter
@@ -125,7 +127,10 @@ class CandyGenerator(object):
 
 def main():
     from vstf.common.log import setup_logging
 
 def main():
     from vstf.common.log import setup_logging
-    setup_logging(level=logging.DEBUG, log_file="/var/log/vstf/vstf-candy.log", clevel=logging.INFO)
+    setup_logging(
+        level=logging.DEBUG,
+        log_file="/var/log/vstf/vstf-candy.log",
+        clevel=logging.INFO)
 
     dbase = DbManage()
     taskid = dbase.get_last_taskid()
 
     dbase = DbManage()
     taskid = dbase.get_last_taskid()
@@ -135,4 +140,3 @@ def main():
     creator.create("Tn")
 if __name__ == '__main__':
     main()
     creator.create("Tn")
 if __name__ == '__main__':
     main()
-
index f9fc69d..ded94eb 100644 (file)
@@ -12,12 +12,14 @@ import vstf.common.constants as cst
 
 
 class DataProvider(object):
 
 
 class DataProvider(object):
+
     def __init__(self, taskid, dbase):
         self._dbase = dbase
         self._taskid = taskid
 
 
 class CommonData(DataProvider):
     def __init__(self, taskid, dbase):
         self._dbase = dbase
         self._taskid = taskid
 
 
 class CommonData(DataProvider):
+
     def get_taskname(self):
         return self._dbase.query_taskname(self._taskid)
 
     def get_taskname(self):
         return self._dbase.query_taskname(self._taskid)
 
@@ -67,6 +69,7 @@ class CommonData(DataProvider):
 
 
 class ScenarioData(DataProvider):
 
 
 class ScenarioData(DataProvider):
+
     def __init__(self, taskid, dbase, scenario):
         print "ScenarioData in"
         DataProvider.__init__(self, taskid, dbase)
     def __init__(self, taskid, dbase, scenario):
         print "ScenarioData in"
         DataProvider.__init__(self, taskid, dbase)
@@ -96,13 +99,15 @@ class ScenarioData(DataProvider):
         return query
 
     def is_provider_start(self, case, provider):
         return query
 
     def is_provider_start(self, case, provider):
-        count = self._dbase.query_case_provider_count(self._taskid, case, provider)
+        count = self._dbase.query_case_provider_count(
+            self._taskid, case, provider)
         if count:
             return True
         return False
 
     def is_type_provider_start(self, case, provider, ptype):
         if count:
             return True
         return False
 
     def is_type_provider_start(self, case, provider, ptype):
-        count = self._dbase.query_case_type_provider_count(self._taskid, case, provider, ptype)
+        count = self._dbase.query_case_type_provider_count(
+            self._taskid, case, provider, ptype)
         if count:
             return True
         return False
         if count:
             return True
         return False
@@ -133,7 +138,12 @@ class ScenarioData(DataProvider):
         test_type = "frameloss"
         return self.get_summary_tabledata(case, provider, test_type)
 
         test_type = "frameloss"
         return self.get_summary_tabledata(case, provider, test_type)
 
-    def get_summary_tabledata(self, case, provider, test_type, table_type='pdf'):
+    def get_summary_tabledata(
+            self,
+            case,
+            provider,
+            test_type,
+            table_type='pdf'):
         table_head = []
         table_body = []
         type_title = {
         table_head = []
         table_body = []
         type_title = {
@@ -142,41 +152,77 @@ class ScenarioData(DataProvider):
         }
         tools = self.get_test_tools(case)
         if "spirent" in tools:
         }
         tools = self.get_test_tools(case)
         if "spirent" in tools:
-            table_body = self._dbase.query_summary_table(self._taskid, case, provider, test_type)
+            table_body = self._dbase.query_summary_table(
+                self._taskid, case, provider, test_type)
             if 'pdf' == table_type:
             if 'pdf' == table_type:
-                table_head = [
-                    ["FrameSize (byte)", test_type, "", "", "", "Latency(uSec)", "", ""],
-                    ["", "    Mpps    ", "   " + type_title[test_type] + " (%)   ", "CPU Used (%)", " Mpps/Ghz ",
-                     " Min ", " Max ", " Avg "]
-                ]
+                table_head = [["FrameSize (byte)",
+                               test_type,
+                               "",
+                               "",
+                               "",
+                               "Latency(uSec)",
+                               "",
+                               ""],
+                              ["",
+                               "    Mpps    ",
+                               "   " + type_title[test_type] + " (%)   ",
+                               "CPU Used (%)",
+                               " Mpps/Ghz ",
+                               " Min ",
+                               " Max ",
+                               " Avg "]]
             else:
             else:
-                table_head = [
-                    ["FrameSize (byte)", "    Mpps    ", "   " + type_title[test_type] + " (%)   ", "CPU Used (%)",
-                     " Mpps/Ghz ", "MinLatency(uSec)", "MaxLatency(uSec)", "AvgLatency(uSec)"],
-                ]
+                table_head = [["FrameSize (byte)",
+                               "    Mpps    ",
+                               "   " + type_title[test_type] + " (%)   ",
+                               "CPU Used (%)",
+                               " Mpps/Ghz ",
+                               "MinLatency(uSec)",
+                               "MaxLatency(uSec)",
+                               "AvgLatency(uSec)"],
+                              ]
         else:
         else:
-            table_body = self._dbase.query_summary_simpletable(self._taskid, case, provider, test_type)
+            table_body = self._dbase.query_summary_simpletable(
+                self._taskid, case, provider, test_type)
             if 'pdf' == table_type:
             if 'pdf' == table_type:
-                table_head = [
-                    ["FrameSize (byte)", test_type, "", "", "", "Latency(uSec)"],
-                    ["", "    Mpps    ", "   " + type_title[test_type] + " (%)", "CPU Used (%)", " Mpps/Ghz ",
-                     "  Avg  "]
-                ]
+                table_head = [["FrameSize (byte)",
+                               test_type,
+                               "",
+                               "",
+                               "",
+                               "Latency(uSec)"],
+                              ["",
+                               "    Mpps    ",
+                               "   " + type_title[test_type] + " (%)",
+                               "CPU Used (%)",
+                               " Mpps/Ghz ",
+                               "  Avg  "]]
             else:
             else:
-                table_head = [
-                    ["FrameSize (byte)", "    Mpps    ", "   " + type_title[test_type] + " (%)   ", "CPU Used (%)",
-                     " Mpps/Ghz ", "AvgLatency(uSec)"],
-                ]
+                table_head = [["FrameSize (byte)",
+                               "    Mpps    ",
+                               "   " + type_title[test_type] + " (%)   ",
+                               "CPU Used (%)",
+                               " Mpps/Ghz ",
+                               "AvgLatency(uSec)"],
+                              ]
         return table_head + table_body
 
     def get_ratedata(self, testid, test_type):
         return table_head + table_body
 
     def get_ratedata(self, testid, test_type):
-        table_head = [
-            ["FrameSize (bytes)", "Bandwidth(Mpps)", "Load (%)", "CPU Usage(%)", "Mpps/Ghz", "AvgLatency(uSec)"],
-        ]
+        table_head = [["FrameSize (bytes)",
+                       "Bandwidth(Mpps)",
+                       "Load (%)",
+                       "CPU Usage(%)",
+                       "Mpps/Ghz",
+                       "AvgLatency(uSec)"],
+                      ]
         query = self._dbase.query_testdata(testid, test_type)
         table_body = []
         for item in query:
         query = self._dbase.query_testdata(testid, test_type)
         table_body = []
         for item in query:
-            table_body.append([item.AvgFrameSize, item.Bandwidth, item.OfferedLoad, item.CPU, item.MppspGhz,
+            table_body.append([item.AvgFrameSize,
+                               item.Bandwidth,
+                               item.OfferedLoad,
+                               item.CPU,
+                               item.MppspGhz,
                                item.AverageLatency])
         result = []
         if table_body:
                                item.AverageLatency])
         result = []
         if table_body:
@@ -203,20 +249,29 @@ class ScenarioData(DataProvider):
         for provider in cst.PROVIDERS:
             if self.is_provider_start(case, provider):
                 if item == 'Percent':
         for provider in cst.PROVIDERS:
             if self.is_provider_start(case, provider):
                 if item == 'Percent':
-                    query = self._dbase.query_load(self._taskid, case, provider, test_type)
+                    query = self._dbase.query_load(
+                        self._taskid, case, provider, test_type)
                 elif item == 'Mpps':
                 elif item == 'Mpps':
-                    query = self._dbase.query_bandwidth(self._taskid, case, provider, test_type)
+                    query = self._dbase.query_bandwidth(
+                        self._taskid, case, provider, test_type)
                 else:
                 else:
-                    query = self._dbase.query_avglatency(self._taskid, case, provider, test_type)
+                    query = self._dbase.query_avglatency(
+                        self._taskid, case, provider, test_type)
                 query = map(lambda x: list(x), zip(*query))
                 if query:
                 query = map(lambda x: list(x), zip(*query))
                 if query:
-                    table_head = [[type_dict["FrameSize"]] + map(lambda x: "  %4d  " % (x), query[0])]
+                    table_head = [[type_dict["FrameSize"]] +
+                                  map(lambda x: "  %4d  " % (x), query[0])]
                     if item == "Avg":
                     if item == "Avg":
-                        data = map(lambda x: item_dict[item] + "%.1f" % x + item_dict[item], query[1])
+                        data = map(
+                            lambda x: item_dict[item] + "%.1f" %
+                            x + item_dict[item], query[1])
                     else:
                     else:
-                        data = map(lambda x: item_dict[item] + "%.2f" % x + item_dict[item], query[1])
+                        data = map(
+                            lambda x: item_dict[item] + "%.2f" %
+                            x + item_dict[item], query[1])
                     if item == "Mpps":
                     if item == "Mpps":
-                        line_table = map(lambda x: "%.2f" % (line_speed * 1000 / (8 * (x + 20))), query[0])
+                        line_table = map(lambda x: "%.2f" % (
+                            line_speed * 1000 / (8 * (x + 20))), query[0])
                     table.append([type_dict[provider]] + data)
         if table:
             if item == "Mpps":
                     table.append([type_dict[provider]] + data)
         if table:
             if item == "Mpps":
@@ -260,7 +315,8 @@ class ScenarioData(DataProvider):
         result = []
         if table_data:
             ytitle = "Average Latency (uSec)"
         result = []
         if table_data:
             ytitle = "Average Latency (uSec)"
-            category_names = map(lambda x: "FS:%4d" % int(float(x)) + "LOAD:50", table_data[0][1:])
+            category_names = map(lambda x: "FS:%4d" %
+                                 int(float(x)) + "LOAD:50", table_data[0][1:])
             bar_ = map(lambda x: x[0], table_data[1:])
             data = map(lambda x: x[1:], table_data[1:])
             result = [ytitle, category_names, bar_, data]
             bar_ = map(lambda x: x[0], table_data[1:])
             data = map(lambda x: x[1:], table_data[1:])
             result = [ytitle, category_names, bar_, data]
@@ -268,10 +324,12 @@ class ScenarioData(DataProvider):
 
     def get_bardata(self, case, provider, test_type):
         if test_type == "latency":
 
     def get_bardata(self, case, provider, test_type):
         if test_type == "latency":
-            query = self._dbase.query_avglatency(self._taskid, case, provider, test_type)
+            query = self._dbase.query_avglatency(
+                self._taskid, case, provider, test_type)
             item = "Avg"
         else:
             item = "Avg"
         else:
-            query = self._dbase.query_load(self._taskid, case, provider, test_type)
+            query = self._dbase.query_load(
+                self._taskid, case, provider, test_type)
             item = "Percent"
 
         title_dict = {
             item = "Percent"
 
         title_dict = {
@@ -290,7 +348,9 @@ class ScenarioData(DataProvider):
         query = map(lambda x: list(x), zip(*query))
         result = []
         if query:
         query = map(lambda x: list(x), zip(*query))
         result = []
         if query:
-            category_names = map(lambda x: "FS:%4d" % x + name_dict[item], query[0])
+            category_names = map(
+                lambda x: "FS:%4d" %
+                x + name_dict[item], query[0])
             data = query[1:]
             bar_ = [color_dict[item]]
             result = [ytitle, category_names, bar_, data]
             data = query[1:]
             bar_ = [color_dict[item]]
             result = [ytitle, category_names, bar_, data]
@@ -298,6 +358,7 @@ class ScenarioData(DataProvider):
 
 
 class TaskData(object):
 
 
 class TaskData(object):
+
     def __init__(self, taskid, dbase):
         self.__common = CommonData(taskid, dbase)
         scenario_list = self.__common.get_scenariolist()
     def __init__(self, taskid, dbase):
         self.__common = CommonData(taskid, dbase)
         scenario_list = self.__common.get_scenariolist()
@@ -312,6 +373,7 @@ class TaskData(object):
 
 
 class HistoryData(DataProvider):
 
 
 class HistoryData(DataProvider):
+
     def get_data(self, task_list, case, provider, ttype, item):
         """
         @provider  in ["fastlink", "rdp", "l2switch", ""]
     def get_data(self, task_list, case, provider, ttype, item):
         """
         @provider  in ["fastlink", "rdp", "l2switch", ""]
@@ -324,17 +386,18 @@ class HistoryData(DataProvider):
         sizes = []
         for taskid in task_list:
             if item == 'ratep':
         sizes = []
         for taskid in task_list:
             if item == 'ratep':
-                query = self._dbase.query_bandwidth(taskid, case, provider, ttype)
+                query = self._dbase.query_bandwidth(
+                    taskid, case, provider, ttype)
             else:
             else:
-                query = self._dbase.query_avglatency(taskid, case, provider, ttype)
+                query = self._dbase.query_avglatency(
+                    taskid, case, provider, ttype)
 
             if query:
                 data = {}
                 for size, value in query:
                     data[size] = value
                 sizes.extend(data.keys())
 
             if query:
                 data = {}
                 for size, value in query:
                     data[size] = value
                 sizes.extend(data.keys())
-                sizes = {}.fromkeys(sizes).keys()
-                sizes.sort()
+                sizes = sorted({}.fromkeys(sizes).keys())
                 datas.append({taskid: data})
 
         result = []
                 datas.append({taskid: data})
 
         result = []
@@ -367,7 +430,10 @@ class HistoryData(DataProvider):
         return task_list
 
     def get_history_info(self, case):
         return task_list
 
     def get_history_info(self, case):
-        provider_dict = {"fastlink": "Fast Link ", "l2switch": "L2Switch ", "rdp": "Kernel RDP "}
+        provider_dict = {
+            "fastlink": "Fast Link ",
+            "l2switch": "L2Switch ",
+            "rdp": "Kernel RDP "}
         ttype_dict = {
             "throughput": "Throughput Testing ",
             "frameloss": "Frame Loss Testing ",
         ttype_dict = {
             "throughput": "Throughput Testing ",
             "frameloss": "Frame Loss Testing ",
@@ -390,7 +456,8 @@ class HistoryData(DataProvider):
                 item = "ratep"
 
             for provider in cst.PROVIDERS:
                 item = "ratep"
 
             for provider in cst.PROVIDERS:
-                table_data = self.get_data(task_list, case, provider, ttype, item)
+                table_data = self.get_data(
+                    task_list, case, provider, ttype, item)
                 if table_data:
                     data = {
                         "title": provider_dict[provider] + items_dict[item],
                 if table_data:
                     data = {
                         "title": provider_dict[provider] + items_dict[item],
index df7d24d..83b8d15 100644 (file)
@@ -6,4 +6,3 @@
 # which accompanies this distribution, and is available at
 # http://www.apache.org/licenses/LICENSE-2.0
 ##############################################################################
 # which accompanies this distribution, and is available at
 # http://www.apache.org/licenses/LICENSE-2.0
 ##############################################################################
-
index 5769da7..02606b4 100644 (file)
@@ -13,6 +13,7 @@ import vstf.common.pyhtml as pyhtm
 
 
 class HtmlBase(object):
 
 
 class HtmlBase(object):
+
     def __init__(self, provider):
         self._page = pyhtm.PyHtml('Html Text')
         self._provider = provider
     def __init__(self, provider):
         self._page = pyhtm.PyHtml('Html Text')
         self._provider = provider
index 695ea37..f866f18 100644 (file)
@@ -20,6 +20,7 @@ LOG = logging.getLogger(__name__)
 
 
 class HtmlCreator(HtmlBase):
 
 
 class HtmlCreator(HtmlBase):
+
     def create_story(self):
         self.add_context()
 
     def create_story(self):
         self.add_context()
 
@@ -70,7 +71,10 @@ class HtmlCreator(HtmlBase):
 
 def unit_test():
     from vstf.common.log import setup_logging
 
 def unit_test():
     from vstf.common.log import setup_logging
-    setup_logging(level=logging.DEBUG, log_file="/var/log/html-creator.log", clevel=logging.INFO)
+    setup_logging(
+        level=logging.DEBUG,
+        log_file="/var/log/html-creator.log",
+        clevel=logging.INFO)
 
     out_file = "vstf_report.html"
 
 
     out_file = "vstf_report.html"
 
index df7d24d..83b8d15 100644 (file)
@@ -6,4 +6,3 @@
 # which accompanies this distribution, and is available at
 # http://www.apache.org/licenses/LICENSE-2.0
 ##############################################################################
 # which accompanies this distribution, and is available at
 # http://www.apache.org/licenses/LICENSE-2.0
 ##############################################################################
-
index ef8b54d..6622281 100644 (file)
@@ -32,8 +32,16 @@ from vstf.controller.reporters.report.pdf.styles import *
 class eImage(Image):
     """ an image(digital picture)which contains the function of auto zoom picture """
 
 class eImage(Image):
     """ an image(digital picture)which contains the function of auto zoom picture """
 
-    def __init__(self, filename, width=None, height=None, kind='direct', mask="auto", lazy=1, hAlign='CENTRE',
-                 vAlign='BOTTOM'):
+    def __init__(
+            self,
+            filename,
+            width=None,
+            height=None,
+            kind='direct',
+            mask="auto",
+            lazy=1,
+            hAlign='CENTRE',
+            vAlign='BOTTOM'):
         Image.__init__(self, filename, None, None, kind, mask, lazy)
         print height, width
         print self.drawHeight, self.drawWidth
         Image.__init__(self, filename, None, None, kind, mask, lazy)
         print height, width
         print self.drawHeight, self.drawWidth
@@ -78,6 +86,7 @@ class eTable(object):
 
 
 class eCommonTable(eTable):
 
 
 class eCommonTable(eTable):
+
     def analysisData(self, data):
         self._style = [
             ('ALIGN', (0, 0), (-1, -1), 'CENTER'),
     def analysisData(self, data):
         self._style = [
             ('ALIGN', (0, 0), (-1, -1), 'CENTER'),
@@ -89,6 +98,7 @@ class eCommonTable(eTable):
 
 
 class eConfigTable(eTable):
 
 
 class eConfigTable(eTable):
+
     def analysisData(self, data):
         self._style = [
             ('ALIGN', (0, 0), (-1, -1), 'CENTER'),
     def analysisData(self, data):
         self._style = [
             ('ALIGN', (0, 0), (-1, -1), 'CENTER'),
@@ -113,6 +123,7 @@ class eConfigTable(eTable):
 
 
 class eSummaryTable(eTable):
 
 
 class eSummaryTable(eTable):
+
     def analysisData(self, data):
         self._style = [
             ('ALIGN', (0, 0), (-1, -1), 'CENTER'),
     def analysisData(self, data):
         self._style = [
             ('ALIGN', (0, 0), (-1, -1), 'CENTER'),
@@ -127,6 +138,7 @@ class eSummaryTable(eTable):
 
 
 class eGitInfoTable(eTable):
 
 
 class eGitInfoTable(eTable):
+
     def analysisData(self, data):
         self._style = [
             ('ALIGN', (0, 0), (-1, -1), 'CENTER'),
     def analysisData(self, data):
         self._style = [
             ('ALIGN', (0, 0), (-1, -1), 'CENTER'),
@@ -141,6 +153,7 @@ class eGitInfoTable(eTable):
 
 
 class eScenarioTable(eTable):
 
 
 class eScenarioTable(eTable):
+
     def analysisData(self, data):
         self._style = [
             ('ALIGN', (0, 0), (-1, -1), 'CENTER'),
     def analysisData(self, data):
         self._style = [
             ('ALIGN', (0, 0), (-1, -1), 'CENTER'),
@@ -157,6 +170,7 @@ class eScenarioTable(eTable):
 
 
 class eOptionsTable(eTable):
 
 
 class eOptionsTable(eTable):
+
     def analysisData(self, data):
         self._style = [
             ('ALIGN', (0, 0), (-1, -1), 'CENTER'),
     def analysisData(self, data):
         self._style = [
             ('ALIGN', (0, 0), (-1, -1), 'CENTER'),
@@ -181,6 +195,7 @@ class eOptionsTable(eTable):
 
 
 class eProfileTable(eTable):
 
 
 class eProfileTable(eTable):
+
     def analysisData(self, data):
         self._style = [
             ('ALIGN', (0, 0), (-1, -1), 'CENTER'),
     def analysisData(self, data):
         self._style = [
             ('ALIGN', (0, 0), (-1, -1), 'CENTER'),
@@ -194,6 +209,7 @@ class eProfileTable(eTable):
 
 
 class eDataTable(eTable):
 
 
 class eDataTable(eTable):
+
     def analysisData(self, data):
         result = data
         self._style = [
     def analysisData(self, data):
         result = data
         self._style = [
@@ -229,6 +245,7 @@ class eDataTable(eTable):
 
 
 class eGraphicsTable(eTable):
 
 
 class eGraphicsTable(eTable):
+
     def analysisData(self, data):
         self._style = [
             ('ALIGN', (0, 0), (-1, -1), 'CENTER'),
     def analysisData(self, data):
         self._style = [
             ('ALIGN', (0, 0), (-1, -1), 'CENTER'),
@@ -238,12 +255,14 @@ class eGraphicsTable(eTable):
 
 
 class noScaleXValueAxis(XValueAxis):
 
 
 class noScaleXValueAxis(XValueAxis):
+
     def __init__(self):
         XValueAxis.__init__(self)
 
     def makeTickLabels(self):
         g = Group()
     def __init__(self):
         XValueAxis.__init__(self)
 
     def makeTickLabels(self):
         g = Group()
-        if not self.visibleLabels: return g
+        if not self.visibleLabels:
+            return g
 
         f = self._labelTextFormat  # perhaps someone already set it
         if f is None:
 
         f = self._labelTextFormat  # perhaps someone already set it
         if f is None:
@@ -307,14 +326,17 @@ class noScaleXValueAxis(XValueAxis):
                             txt = f(t)
                     else:
                         raise ValueError('Invalid labelTextFormat %s' % f)
                             txt = f(t)
                     else:
                         raise ValueError('Invalid labelTextFormat %s' % f)
-                    if post: txt = post % txt
+                    if post:
+                        txt = post % txt
                     pos[d] = v
                     label.setOrigin(*pos)
                     label.setText(txt)
 
                     pos[d] = v
                     label.setOrigin(*pos)
                     label.setText(txt)
 
-                    # special property to ensure a label doesn't project beyond the bounds of an x-axis
+                    # special property to ensure a label doesn't project beyond
+                    # the bounds of an x-axis
                     if self.keepTickLabelsInside:
                     if self.keepTickLabelsInside:
-                        if isinstance(self, XValueAxis):  # not done yet for y axes
+                        if isinstance(
+                                self, XValueAxis):  # not done yet for y axes
                             a_x = self._x
                             if not i:  # first one
                                 x0, y0, x1, y1 = label.getBounds()
                             a_x = self._x
                             if not i:  # first one
                                 x0, y0, x1, y1 = label.getBounds()
@@ -324,7 +346,8 @@ class noScaleXValueAxis(XValueAxis):
                                 a_x1 = a_x + self._length
                                 x0, y0, x1, y1 = label.getBounds()
                                 if x1 > a_x1:
                                 a_x1 = a_x + self._length
                                 x0, y0, x1, y1 = label.getBounds()
                                 if x1 > a_x1:
-                                    label = label.clone(dx=label.dx - x1 + a_x1)
+                                    label = label.clone(
+                                        dx=label.dx - x1 + a_x1)
                     g.add(label)
 
         return g
                     g.add(label)
 
         return g
@@ -342,8 +365,10 @@ class noScaleXValueAxis(XValueAxis):
         The chart first configures the axis, then asks it to
         """
         assert self._configured, "Axis cannot scale numbers before it is configured"
         The chart first configures the axis, then asks it to
         """
         assert self._configured, "Axis cannot scale numbers before it is configured"
-        if value is None: value = 0
-        # this could be made more efficient by moving the definition of org and sf into the configuration
+        if value is None:
+            value = 0
+        # this could be made more efficient by moving the definition of org and
+        # sf into the configuration
         org = (self._x, self._y)[self._dataIndex]
         sf = self._length / (len(self._tickValues) + 1)
         if self.reverseDirection:
         org = (self._x, self._y)[self._dataIndex]
         sf = self._length / (len(self._tickValues) + 1)
         if self.reverseDirection:
@@ -353,6 +378,7 @@ class noScaleXValueAxis(XValueAxis):
 
 
 class noScaleLinePlot(LinePlot):
 
 
 class noScaleLinePlot(LinePlot):
+
     def __init__(self):
         LinePlot.__init__(self)
         self.xValueAxis = noScaleXValueAxis()
     def __init__(self):
         LinePlot.__init__(self)
         self.xValueAxis = noScaleXValueAxis()
@@ -373,7 +399,8 @@ class noScaleLinePlot(LinePlot):
             for colNo in range(len_row):
                 datum = self.data[rowNo][colNo]  # x, y value
                 x = self.x + self.width / (len_row + 1) * (colNo + 1)
             for colNo in range(len_row):
                 datum = self.data[rowNo][colNo]  # x, y value
                 x = self.x + self.width / (len_row + 1) * (colNo + 1)
-                self.xValueAxis.labels[colNo].x = self.x + self.width / (len_row + 1) * (colNo + 1)
+                self.xValueAxis.labels[colNo].x = self.x + \
+                    self.width / (len_row + 1) * (colNo + 1)
                 y = self.yValueAxis.scale(datum[1])
                 #               print self.width, " ", x
                 line.append((x, y))
                 y = self.yValueAxis.scale(datum[1])
                 #               print self.width, " ", x
                 line.append((x, y))
@@ -383,6 +410,7 @@ class noScaleLinePlot(LinePlot):
 # def _innerDrawLabel(self, rowNo, colNo, x, y):
 #        return None
 class eLinePlot(object):
 # def _innerDrawLabel(self, rowNo, colNo, x, y):
 #        return None
 class eLinePlot(object):
+
     def __init__(self, data, style):
         self._lpstyle = style
         self._linename = data[0]
     def __init__(self, data, style):
         self._lpstyle = style
         self._linename = data[0]
@@ -485,9 +513,11 @@ class eLinePlot(object):
         for i in range(line_cnts):
             styleIndex = i % sytle_cnts
             lp.lines[i].strokeColor = self._lpstyle.linestyle[styleIndex][0]
         for i in range(line_cnts):
             styleIndex = i % sytle_cnts
             lp.lines[i].strokeColor = self._lpstyle.linestyle[styleIndex][0]
-            lp.lines[i].symbol = makeMarker(self._lpstyle.linestyle[styleIndex][1])
+            lp.lines[i].symbol = makeMarker(
+                self._lpstyle.linestyle[styleIndex][1])
             lp.lines[i].strokeWidth = self._lpstyle.linestyle[styleIndex][2]
             lp.lines[i].strokeWidth = self._lpstyle.linestyle[styleIndex][2]
-            color_paris.append((self._lpstyle.linestyle[styleIndex][0], self._linename[i]))
+            color_paris.append(
+                (self._lpstyle.linestyle[styleIndex][0], self._linename[i]))
         #            lp.lineLabels[i].strokeColor = self._lpstyle.linestyle[styleIndex][0]
 
         lp.lineLabelFormat = self._lpstyle.format[0]
         #            lp.lineLabels[i].strokeColor = self._lpstyle.linestyle[styleIndex][0]
 
         lp.lineLabelFormat = self._lpstyle.format[0]
@@ -501,8 +531,6 @@ class eLinePlot(object):
 
         lp.yValueAxis.valueMin, lp.yValueAxis.valueMax, lp.yValueAxis.valueSteps = self._yvalue
 
 
         lp.yValueAxis.valueMin, lp.yValueAxis.valueMax, lp.yValueAxis.valueSteps = self._yvalue
 
-
-
         #       lp.xValueAxis.forceZero = 0
         #       lp.xValueAxis.avoidBoundFrac = 1
         #       lp.xValueAxis.tickDown = 3
         #       lp.xValueAxis.forceZero = 0
         #       lp.xValueAxis.avoidBoundFrac = 1
         #       lp.xValueAxis.tickDown = 3
@@ -540,6 +568,7 @@ class eLinePlot(object):
 
 
 class eHorizontalLineChart(object):
 
 
 class eHorizontalLineChart(object):
+
     def __init__(self, data, style):
         self._lcstyle = style
         if len(data) < 1:
     def __init__(self, data, style):
         self._lcstyle = style
         if len(data) < 1:
@@ -630,9 +659,11 @@ class eHorizontalLineChart(object):
         for i in range(line_cnts):
             styleIndex = i % sytle_cnts
             lc.lines[i].strokeColor = self._lcstyle.linestyle[styleIndex][0]
         for i in range(line_cnts):
             styleIndex = i % sytle_cnts
             lc.lines[i].strokeColor = self._lcstyle.linestyle[styleIndex][0]
-            lc.lines[i].symbol = makeMarker(self._lcstyle.linestyle[styleIndex][1])
+            lc.lines[i].symbol = makeMarker(
+                self._lcstyle.linestyle[styleIndex][1])
             lc.lines[i].strokeWidth = self._lcstyle.linestyle[styleIndex][2]
             lc.lines[i].strokeWidth = self._lcstyle.linestyle[styleIndex][2]
-            color_paris.append((self._lcstyle.linestyle[styleIndex][0], self._linename[i]))
+            color_paris.append(
+                (self._lcstyle.linestyle[styleIndex][0], self._linename[i]))
 
         lc.lineLabels.fontSize = self._lcstyle.labelsfont - 2
 
 
         lc.lineLabels.fontSize = self._lcstyle.labelsfont - 2
 
@@ -660,6 +691,7 @@ class eHorizontalLineChart(object):
 
 
 class eBarChartColumn(object):
 
 
 class eBarChartColumn(object):
+
     def __init__(self, data, style):
         self._bcstyle = style
         if len(data) < 4:
     def __init__(self, data, style):
         self._bcstyle = style
         if len(data) < 4:
@@ -702,7 +734,10 @@ class eBarChartColumn(object):
         color_paris = []
         for i in range(bar_cnt):
             bc.bars[i].fillColor = self._bcstyle.pillarstyle[self._bar[i]][0]
         color_paris = []
         for i in range(bar_cnt):
             bc.bars[i].fillColor = self._bcstyle.pillarstyle[self._bar[i]][0]
-            color_paris.append((self._bcstyle.pillarstyle[self._bar[i]][0], self._bar[i]))
+            color_paris.append(
+                (self._bcstyle.pillarstyle[
+                    self._bar[i]][0],
+                    self._bar[i]))
 
         bc.fillColor = self._bcstyle.background
         bc.barLabels.fontName = 'Helvetica'
 
         bc.fillColor = self._bcstyle.background
         bc.barLabels.fontName = 'Helvetica'
@@ -761,6 +796,7 @@ class eBarChartColumn(object):
 
 
 class eParagraph(object):
 
 
 class eParagraph(object):
+
     def __init__(self, data, style):
         self._pstyle = style
         self._data = self.analysisData(data)
     def __init__(self, data, style):
         self._pstyle = style
         self._data = self.analysisData(data)
index c33974e..67f988c 100644 (file)
@@ -22,6 +22,7 @@ LOG = logging.getLogger(__name__)
 
 
 class PdfCreator(object):
 
 
 class PdfCreator(object):
+
     def __init__(self, provider):
         self._provider = provider
         self._story = []
     def __init__(self, provider):
         self._provider = provider
         self._story = []
@@ -114,7 +115,10 @@ class PdfCreator(object):
 
 def main():
     from vstf.common.log import setup_logging
 
 def main():
     from vstf.common.log import setup_logging
-    setup_logging(level=logging.DEBUG, log_file="/var/log/pdf-creator.log", clevel=logging.INFO)
+    setup_logging(
+        level=logging.DEBUG,
+        log_file="/var/log/pdf-creator.log",
+        clevel=logging.INFO)
 
     out_file = "vstf_report.pdf"
 
 
     out_file = "vstf_report.pdf"
 
index 69c6540..7e28781 100644 (file)
@@ -13,9 +13,11 @@ from reportlab.platypus.doctemplate import SimpleDocTemplate
 from reportlab.platypus import PageBreak
 from vstf.controller.reporters.report.pdf.styles import TemplateStyle, ps_head_lv1, ps_head_lv2, ps_head_lv3
 import vstf.common.constants as cst
 from reportlab.platypus import PageBreak
 from vstf.controller.reporters.report.pdf.styles import TemplateStyle, ps_head_lv1, ps_head_lv2, ps_head_lv3
 import vstf.common.constants as cst
+from functools import reduce
 
 
 class BaseDocTemplate(SimpleDocTemplate):
 
 
 class BaseDocTemplate(SimpleDocTemplate):
+
     def __init__(self, filename, **kw):
         self.allowSplitting = 0
         SimpleDocTemplate.__init__(self, filename, **kw)
     def __init__(self, filename, **kw):
         self.allowSplitting = 0
         SimpleDocTemplate.__init__(self, filename, **kw)
@@ -34,6 +36,7 @@ class BaseDocTemplate(SimpleDocTemplate):
 
 
 class PdfTemplate(object):
 
 
 class PdfTemplate(object):
+
     def __init__(self, title, logo, header, footer, note=[], style="default"):
         self._style = TemplateStyle(name=style)
         self._title = title
     def __init__(self, title, logo, header, footer, note=[], style="default"):
         self._style = TemplateStyle(name=style)
         self._title = title
@@ -41,7 +44,8 @@ class PdfTemplate(object):
         #self._header = header[0]
         self._footer = footer
         self._note = note
         #self._header = header[0]
         self._footer = footer
         self._note = note
-        info = " Generated on %s " % time.strftime(cst.TIME_FORMAT2, time.localtime())
+        info = " Generated on %s " % time.strftime(
+            cst.TIME_FORMAT2, time.localtime())
         self._note += [info]
 
     def myFirstPage(self, canvas, doc):
         self._note += [info]
 
     def myFirstPage(self, canvas, doc):
@@ -54,46 +58,78 @@ class PdfTemplate(object):
         sizes = (self._style.page_wight, self._style.page_height)
         doc = BaseDocTemplate(output, pagesize=sizes)
         #    doc.build(story, onFirstPage=self.myFirstPage, onLaterPages=self.myLaterPages)
         sizes = (self._style.page_wight, self._style.page_height)
         doc = BaseDocTemplate(output, pagesize=sizes)
         #    doc.build(story, onFirstPage=self.myFirstPage, onLaterPages=self.myLaterPages)
-        doc.multiBuild(story, onFirstPage=self.myFirstPage, onLaterPages=self.myLaterPages)
+        doc.multiBuild(
+            story,
+            onFirstPage=self.myFirstPage,
+            onLaterPages=self.myLaterPages)
 
 
 class PdfVswitch(PdfTemplate):
 
 
 class PdfVswitch(PdfTemplate):
+
     def myFirstPage(self, canvas, doc):
         canvas.saveState()
         title_lines = len(self._title)
         line_size = [self._style.title_size] * title_lines
         line_size.append(0)
 
     def myFirstPage(self, canvas, doc):
         canvas.saveState()
         title_lines = len(self._title)
         line_size = [self._style.title_size] * title_lines
         line_size.append(0)
 
-        canvas.drawImage(self._logo,
-                         (self._style.page_wight - self._style.logo_width) / 2.0,
-                         self._style.page_height / 2.0 + (1 + self._style.title_leading) * reduce(lambda x, y: x + y,
-                                                                                                  line_size),
-                         self._style.logo_width,
-                         self._style.logo_height
-                         )
+        canvas.drawImage(
+            self._logo,
+            (self._style.page_wight -
+             self._style.logo_width) /
+            2.0,
+            self._style.page_height /
+            2.0 +
+            (
+                1 +
+                self._style.title_leading) *
+            reduce(
+                lambda x,
+                y: x +
+                y,
+                line_size),
+            self._style.logo_width,
+            self._style.logo_height)
         for i in range(title_lines):
             canvas.setFont(self._style.title_font, line_size[i])
         for i in range(title_lines):
             canvas.setFont(self._style.title_font, line_size[i])
-            canvas.drawCentredString(self._style.page_wight / 2.0,
-                                     self._style.page_height / 2.0 + (1 + self._style.title_leading) * reduce(
-                                         lambda x, y: x + y, line_size[i + 1:]),
-                                     self._title[i]
-                                     )
+            canvas.drawCentredString(
+                self._style.page_wight /
+                2.0,
+                self._style.page_height /
+                2.0 +
+                (
+                    1 +
+                    self._style.title_leading) *
+                reduce(
+                    lambda x,
+                    y: x +
+                    y,
+                    line_size[
+                        i +
+                        1:]),
+                self._title[i])
         size = self._style.body_size
         canvas.setFont(self._style.body_font, size)
         note_line = len(self._note)
 
         for i in range(note_line):
             print self._note[i]
         size = self._style.body_size
         canvas.setFont(self._style.body_font, size)
         note_line = len(self._note)
 
         for i in range(note_line):
             print self._note[i]
-            canvas.drawCentredString(self._style.page_wight / 2.0,
-                                     self._style.page_height / 5.0 + (1 + self._style.body_leading) * size * (
-                                     note_line - i - 1),
-                                     self._note[i]
-                                     )
+            canvas.drawCentredString(self._style.page_wight /
+                                     2.0, self._style.page_height /
+                                     5.0 +
+                                     (1 +
+                                      self._style.body_leading) *
+                                     size *
+                                     (note_line -
+                                         i -
+                                         1), self._note[i])
         size = self._style.body_size - 2
         canvas.setFont(self._style.body_font, size)
         size = self._style.body_size - 2
         canvas.setFont(self._style.body_font, size)
-        canvas.drawCentredString(self._style.page_wight / 2.0,
-                                 self._style.page_bottom / 2.0 + (1 + self._style.body_leading) * size,
-                                 self._footer[0])
+        canvas.drawCentredString(self._style.page_wight /
+                                 2.0, self._style.page_bottom /
+                                 2.0 +
+                                 (1 +
+                                  self._style.body_leading) *
+                                 size, self._footer[0])
         canvas.restoreState()
 
     def myLaterPages(self, canvas, doc):
         canvas.restoreState()
 
     def myLaterPages(self, canvas, doc):
@@ -106,9 +142,7 @@ class PdfVswitch(PdfTemplate):
                     )
         size = self._style.body_size - 2
         canvas.setFont(self._style.body_font, size)
                     )
         size = self._style.body_size - 2
         canvas.setFont(self._style.body_font, size)
-        canvas.drawCentredString(self._style.page_wight / 2.0,
-                                 self._style.page_bottom - 24,
-                                 "%s%s Page %2d " % (self._footer[0], " " * 8, doc.page - 1)
-                                 )
+        canvas.drawCentredString(
+            self._style.page_wight / 2.0, self._style.page_bottom - 24, "%s%s Page %2d " %
+            (self._footer[0], " " * 8, doc.page - 1))
         canvas.restoreState()
         canvas.restoreState()
-
index 940c20f..f1442fe 100644 (file)
@@ -21,6 +21,7 @@ from element import *
 
 
 class Story(object):
 
 
 class Story(object):
+
     def __init__(self):
         self._storylist = []
 
     def __init__(self):
         self._storylist = []
 
@@ -30,6 +31,7 @@ class Story(object):
 
 
 class StoryDecorator(Story):
 
 
 class StoryDecorator(Story):
+
     def __init__(self, story, data=None, style=None):
         self._story = story
         self._data = data
     def __init__(self, story, data=None, style=None):
         self._story = story
         self._data = data
@@ -47,6 +49,7 @@ class StoryDecorator(Story):
 
 
 class ImageStory(StoryDecorator):
 
 
 class ImageStory(StoryDecorator):
+
     def new_story(self):
         print "Image Story"
         for filename in self._data:
     def new_story(self):
         print "Image Story"
         for filename in self._data:
@@ -60,7 +63,12 @@ class ImageStory(StoryDecorator):
                 image_hAlign = style.image_hAlign
                 image_vAlign = style.image_vAlign
                 self._story.storylist.append(
                 image_hAlign = style.image_hAlign
                 image_vAlign = style.image_vAlign
                 self._story.storylist.append(
-                    eImage(filename, image_width, image_height, hAlign=image_hAlign, vAlign=image_vAlign))
+                    eImage(
+                        filename,
+                        image_width,
+                        image_height,
+                        hAlign=image_hAlign,
+                        vAlign=image_vAlign))
             else:
                 style = is_default
                 image_height = style.image_height
             else:
                 style = is_default
                 image_height = style.image_height
@@ -69,22 +77,30 @@ class ImageStory(StoryDecorator):
                 image_vAlign = style.image_vAlign
                 #    self._story.storylist.append(eGraphicsTable([[' ' * 5, eImage(filename, image_width, image_height, hAlign=image_hAlign, vAlign=image_vAlign)]], ts_left).table)
                 self._story.storylist.append(
                 image_vAlign = style.image_vAlign
                 #    self._story.storylist.append(eGraphicsTable([[' ' * 5, eImage(filename, image_width, image_height, hAlign=image_hAlign, vAlign=image_vAlign)]], ts_left).table)
                 self._story.storylist.append(
-                    eImage(filename, image_width, image_height, hAlign=image_hAlign, vAlign=image_vAlign))
+                    eImage(
+                        filename,
+                        image_width,
+                        image_height,
+                        hAlign=image_hAlign,
+                        vAlign=image_vAlign))
 
 
 class HeaderStory(StoryDecorator):
 
 
 class HeaderStory(StoryDecorator):
+
     def new_story(self):
         print "header story"
         self._story.storylist.append(PageBreak())
 
 
 class PageBreakStory(StoryDecorator):
     def new_story(self):
         print "header story"
         self._story.storylist.append(PageBreak())
 
 
 class PageBreakStory(StoryDecorator):
+
     def new_story(self):
         print "PageBreak story"
         self._story.storylist.append(PageBreak())
 
 
 class TableOfContentsStory(StoryDecorator):
     def new_story(self):
         print "PageBreak story"
         self._story.storylist.append(PageBreak())
 
 
 class TableOfContentsStory(StoryDecorator):
+
     def new_story(self):
         print "TableOfContents story"
         self._data = [" ", " ", "Table Of Contents", ""]
     def new_story(self):
         print "TableOfContents story"
         self._data = [" ", " ", "Table Of Contents", ""]
@@ -96,35 +112,43 @@ class TableOfContentsStory(StoryDecorator):
 
 
 class SpaceStory(StoryDecorator):
 
 
 class SpaceStory(StoryDecorator):
+
     def new_story(self):
         style = ps_space
         self._story.storylist.append(eParagraph([" ", " "], style).para)
 
 
 class TableStory(StoryDecorator):
     def new_story(self):
         style = ps_space
         self._story.storylist.append(eParagraph([" ", " "], style).para)
 
 
 class TableStory(StoryDecorator):
+
     def new_story(self):
         print "table story"
         style = ts_default
         if self._style == 1:
             self._story.storylist.append(eDataTable(self._data, style).table)
     def new_story(self):
         print "table story"
         style = ts_default
         if self._style == 1:
             self._story.storylist.append(eDataTable(self._data, style).table)
-        elif self._style ==2:
+        elif self._style == 2:
             style = ts_left
             self._story.storylist.append(eCommonTable(self._data, style).table)
         elif self._style == 3:
             self._story.storylist.append(eConfigTable(self._data, style).table)
         elif self._style == 4:
             style = ts_left
             self._story.storylist.append(eCommonTable(self._data, style).table)
         elif self._style == 3:
             self._story.storylist.append(eConfigTable(self._data, style).table)
         elif self._style == 4:
-            self._story.storylist.append(eOptionsTable(self._data, style).table)
+            self._story.storylist.append(
+                eOptionsTable(self._data, style).table)
         elif self._style == 5:
         elif self._style == 5:
-            self._story.storylist.append(eProfileTable(self._data, style).table)
+            self._story.storylist.append(
+                eProfileTable(self._data, style).table)
         elif self._style == 6:
         elif self._style == 6:
-            self._story.storylist.append(eSummaryTable(self._data, style).table)
+            self._story.storylist.append(
+                eSummaryTable(self._data, style).table)
         elif self._style == 7:
         elif self._style == 7:
-            self._story.storylist.append(eScenarioTable(self._data, style).table)
+            self._story.storylist.append(
+                eScenarioTable(self._data, style).table)
         elif self._style == 8:
         elif self._style == 8:
-            self._story.storylist.append(eGitInfoTable(self._data, style).table)
+            self._story.storylist.append(
+                eGitInfoTable(self._data, style).table)
 
 
 class LinePlotStory(StoryDecorator):
 
 
 class LinePlotStory(StoryDecorator):
+
     def new_story(self):
         print "LinePlot"
         style = lps_default
     def new_story(self):
         print "LinePlot"
         style = lps_default
@@ -137,18 +161,21 @@ class LinePlotStory(StoryDecorator):
 
 
 class LineChartStory(StoryDecorator):
 
 
 class LineChartStory(StoryDecorator):
+
     def new_story(self):
         print "LineChartStory: "
         style = lcs_default
         if not self._data:
             print "data error "
             return
     def new_story(self):
         print "LineChartStory: "
         style = lcs_default
         if not self._data:
             print "data error "
             return
-        data = eGraphicsTable([[eHorizontalLineChart(self._data, style).draw]]).table
+        data = eGraphicsTable(
+            [[eHorizontalLineChart(self._data, style).draw]]).table
         if data:
             self._story.storylist.append(data)
 
 
 class BarChartStory(StoryDecorator):
         if data:
             self._story.storylist.append(data)
 
 
 class BarChartStory(StoryDecorator):
+
     def new_story(self):
         print "BarChartStory: "
         style = bcs_default
     def new_story(self):
         print "BarChartStory: "
         style = bcs_default
@@ -156,12 +183,14 @@ class BarChartStory(StoryDecorator):
             print "data error "
             return
 
             print "data error "
             return
 
-        data = eGraphicsTable([[eBarChartColumn(self._data, style).draw]]).table
+        data = eGraphicsTable(
+            [[eBarChartColumn(self._data, style).draw]]).table
         if data:
             self._story.storylist.append(data)
 
 
 class ParagraphStory(StoryDecorator):
         if data:
             self._story.storylist.append(data)
 
 
 class ParagraphStory(StoryDecorator):
+
     def new_story(self):
         print "Paragraph Story"
         style = ps_body
     def new_story(self):
         print "Paragraph Story"
         style = ps_body
@@ -174,6 +203,7 @@ class ParagraphStory(StoryDecorator):
 
 
 class TitleStory(StoryDecorator):
 
 
 class TitleStory(StoryDecorator):
+
     def new_story(self):
         print "Paragraph Story"
         if self._style - 1 in range(9):
     def new_story(self):
         print "Paragraph Story"
         if self._style - 1 in range(9):
index 74c4c59..2e0863e 100644 (file)
@@ -15,6 +15,7 @@ from vstf.controller.settings.template_settings import TemplateSettings
 
 
 class HtmlProvider(object):
 
 
 class HtmlProvider(object):
+
     def __init__(self, info, style):
         self._info = info
         self._style = style
     def __init__(self, info, style):
         self._info = info
         self._style = style
@@ -32,7 +33,10 @@ class HtmlProvider(object):
 
 def main():
     from vstf.common.log import setup_logging
 
 def main():
     from vstf.common.log import setup_logging
-    setup_logging(level=logging.DEBUG, log_file="/var/log/html-provder.log", clevel=logging.INFO)
+    setup_logging(
+        level=logging.DEBUG,
+        log_file="/var/log/html-provder.log",
+        clevel=logging.INFO)
 
     html_settings = HtmlSettings()
     LOG.info(html_settings.settings)
 
     html_settings = HtmlSettings()
     LOG.info(html_settings.settings)
@@ -42,4 +46,4 @@ def main():
     LOG.info(provider.get_context)
 
 if __name__ == '__main__':
     LOG.info(provider.get_context)
 
 if __name__ == '__main__':
-    main()
\ No newline at end of file
+    main()
index e1cb09e..f775201 100644 (file)
@@ -15,6 +15,7 @@ from vstf.controller.settings.template_settings import TemplateSettings
 
 
 class PdfProvider(object):
 
 
 class PdfProvider(object):
+
     def __init__(self, info):
         self._info = info
 
     def __init__(self, info):
         self._info = info
 
@@ -37,7 +38,10 @@ class PdfProvider(object):
 
 def main():
     from vstf.common.log import setup_logging
 
 def main():
     from vstf.common.log import setup_logging
-    setup_logging(level=logging.DEBUG, log_file="/var/log/pdf-provider.log", clevel=logging.INFO)
+    setup_logging(
+        level=logging.DEBUG,
+        log_file="/var/log/pdf-provider.log",
+        clevel=logging.INFO)
 
     info = TemplateSettings()
     provider = PdfProvider(info.settings)
 
     info = TemplateSettings()
     provider = PdfProvider(info.settings)
@@ -46,4 +50,4 @@ def main():
     LOG.info(provider.get_context)
 
 if __name__ == '__main__':
     LOG.info(provider.get_context)
 
 if __name__ == '__main__':
-    main()
\ No newline at end of file
+    main()
index 654c9b8..ea0a1ad 100644 (file)
@@ -30,6 +30,7 @@ LOG = logging.getLogger(__name__)
 
 
 class Report(object):
 
 
 class Report(object):
+
     def __init__(self, dbase, rpath):
         """
 
     def __init__(self, dbase, rpath):
         """
 
@@ -47,7 +48,10 @@ class Report(object):
         creator = CandyGenerator(task)
         attach_list = []
         for scenario in scenario_list:
         creator = CandyGenerator(task)
         attach_list = []
         for scenario in scenario_list:
-            out_file = os.path.join(self._rpath, "vstf_report_%s_%s.pdf" % (scenario, time.strftime(cst.TIME_FORMAT3)))
+            out_file = os.path.join(
+                self._rpath, "vstf_report_%s_%s.pdf" %
+                (scenario, time.strftime(
+                    cst.TIME_FORMAT3)))
             LOG.info(out_file)
             creator.create(scenario)
             info = TemplateSettings()
             LOG.info(out_file)
             creator.create(scenario)
             info = TemplateSettings()
@@ -90,7 +94,10 @@ class Report(object):
 
 def main():
     from vstf.common.log import setup_logging
 
 def main():
     from vstf.common.log import setup_logging
-    setup_logging(level=logging.DEBUG, log_file="/var/log/vstf/vstf-reporter.log", clevel=logging.INFO)
+    setup_logging(
+        level=logging.DEBUG,
+        log_file="/var/log/vstf/vstf-reporter.log",
+        clevel=logging.INFO)
 
     parser = argparse.ArgumentParser(add_help=True)
     parser.add_argument('-rpath',
 
     parser = argparse.ArgumentParser(add_help=True)
     parser.add_argument('-rpath',
index a25af4c..9589e11 100644 (file)
@@ -18,6 +18,7 @@ LOG = logging.getLogger(__name__)
 
 
 class CpuSettings(sets.Settings):
 
 
 class CpuSettings(sets.Settings):
+
     def __init__(self, path="/etc/vstf/perf/",
                  filename="sw_perf.cpu-settings",
                  mode=sets.SETS_SINGLE):
     def __init__(self, path="/etc/vstf/perf/",
                  filename="sw_perf.cpu-settings",
                  mode=sets.SETS_SINGLE):
@@ -32,9 +33,22 @@ class CpuSettings(sets.Settings):
         for item in body:
             item = item.encode()
             func_name = "set_%s" % item
         for item in body:
             item = item.encode()
             func_name = "set_%s" % item
-            setattr(self, func_name, self._setting_file(func_name, self._mset['affctl'], self._fset['affctl'], item))
+            setattr(
+                self,
+                func_name,
+                self._setting_file(
+                    func_name,
+                    self._mset['affctl'],
+                    self._fset['affctl'],
+                    item))
             func_name = "mset_%s" % item
             func_name = "mset_%s" % item
-            setattr(self, func_name, self._setting_memory(func_name, self._mset['affctl'], item))
+            setattr(
+                self,
+                func_name,
+                self._setting_memory(
+                    func_name,
+                    self._mset['affctl'],
+                    item))
 
         LOG.debug(self.__dict__)
 
 
         LOG.debug(self.__dict__)
 
@@ -59,8 +73,10 @@ class CpuSettings(sets.Settings):
 
 def unit_test():
     from vstf.common.log import setup_logging
 
 def unit_test():
     from vstf.common.log import setup_logging
-    setup_logging(level=logging.DEBUG, log_file="/var/log/vstf/vstf-cpu-settings.log", clevel=logging.INFO)
+    setup_logging(
+        level=logging.DEBUG,
+        log_file="/var/log/vstf/vstf-cpu-settings.log",
+        clevel=logging.INFO)
 
 if __name__ == '__main__':
     unit_test()
 
 if __name__ == '__main__':
     unit_test()
-
index 25f2c5b..5fe3976 100644 (file)
@@ -15,6 +15,7 @@ LOG = logging.getLogger(__name__)
 
 
 class DeviceSettings(sets.Settings):
 
 
 class DeviceSettings(sets.Settings):
+
     def __init__(self, path="/etc/vstf/perf/",
                  filename="sw_perf.device-settings",
                  mode=sets.SETS_SINGLE):
     def __init__(self, path="/etc/vstf/perf/",
                  filename="sw_perf.device-settings",
                  mode=sets.SETS_SINGLE):
index 9cd1a1b..f28d5b5 100644 (file)
@@ -15,6 +15,7 @@ LOG = logging.getLogger(__name__)
 
 
 class FlowsSettings(sets.Settings):
 
 
 class FlowsSettings(sets.Settings):
+
     def __init__(self, path="/etc/vstf/perf/",
                  filename="sw_perf.flownodes-settings",
                  mode=sets.SETS_SINGLE):
     def __init__(self, path="/etc/vstf/perf/",
                  filename="sw_perf.flownodes-settings",
                  mode=sets.SETS_SINGLE):
@@ -27,16 +28,44 @@ class FlowsSettings(sets.Settings):
         for actor in self._check_actors:
             actor = actor.encode()
             func_name = "add_%s" % actor
         for actor in self._check_actors:
             actor = actor.encode()
             func_name = "add_%s" % actor
-            setattr(self, func_name, self._adding_file(func_name, self._mset, self._fset, actor, self._check_add))
+            setattr(
+                self,
+                func_name,
+                self._adding_file(
+                    func_name,
+                    self._mset,
+                    self._fset,
+                    actor,
+                    self._check_add))
             func_name = "madd_%s" % actor
             func_name = "madd_%s" % actor
-            setattr(self, func_name, self._adding_memory(func_name, self._mset, actor, self._check_add))
+            setattr(
+                self,
+                func_name,
+                self._adding_memory(
+                    func_name,
+                    self._mset,
+                    actor,
+                    self._check_add))
 
         for actor in self._nocheck_actors:
             actor = actor.encode()
             func_name = "add_%s" % actor
 
         for actor in self._nocheck_actors:
             actor = actor.encode()
             func_name = "add_%s" % actor
-            setattr(self, func_name, self._adding_file(func_name, self._mset, self._fset, actor))
+            setattr(
+                self,
+                func_name,
+                self._adding_file(
+                    func_name,
+                    self._mset,
+                    self._fset,
+                    actor))
             func_name = "madd_%s" % actor
             func_name = "madd_%s" % actor
-            setattr(self, func_name, self._adding_memory(func_name, self._mset, actor))
+            setattr(
+                self,
+                func_name,
+                self._adding_memory(
+                    func_name,
+                    self._mset,
+                    actor))
 
         LOG.debug(self.__dict__.keys())
 
 
         LOG.debug(self.__dict__.keys())
 
@@ -70,7 +99,10 @@ class FlowsSettings(sets.Settings):
 
 def unit_test():
     from vstf.common.log import setup_logging
 
 def unit_test():
     from vstf.common.log import setup_logging
-    setup_logging(level=logging.DEBUG, log_file="/var/log/vstf/vstf-flows-settings.log", clevel=logging.INFO)
+    setup_logging(
+        level=logging.DEBUG,
+        log_file="/var/log/vstf/vstf-flows-settings.log",
+        clevel=logging.INFO)
 
     flows_settings = FlowsSettings()
     LOG.info(flows_settings.settings)
 
     flows_settings = FlowsSettings()
     LOG.info(flows_settings.settings)
@@ -113,7 +145,7 @@ def unit_test():
 
     cpu = {
         "agent": "192.168.188.16",
 
     cpu = {
         "agent": "192.168.188.16",
-        "affctl":{
+        "affctl": {
             "policy": 2
         }
     }
             "policy": 2
         }
     }
index 636ddfd..138337c 100644 (file)
@@ -15,6 +15,7 @@ LOG = logging.getLogger(__name__)
 
 
 class ForwardingSettings(sets.Settings):
 
 
 class ForwardingSettings(sets.Settings):
+
     def __init__(self, path="/etc/vstf/perf/",
                  filename="sw_perf.forwarding-settings",
                  mode=sets.SETS_SINGLE):
     def __init__(self, path="/etc/vstf/perf/",
                  filename="sw_perf.forwarding-settings",
                  mode=sets.SETS_SINGLE):
index ce87733..89af7a5 100644 (file)
@@ -15,13 +15,21 @@ LOG = logging.getLogger(__name__)
 
 
 class HtmlSettings(sets.Settings):
 
 
 class HtmlSettings(sets.Settings):
-    def __init__(self, path="/etc/vstf/", filename="reporters.html-settings", mode=sets.SETS_DEFAULT):
+
+    def __init__(
+            self,
+            path="/etc/vstf/",
+            filename="reporters.html-settings",
+            mode=sets.SETS_DEFAULT):
         super(HtmlSettings, self).__init__(path, filename, mode)
 
 
 def unit_test():
     from vstf.common.log import setup_logging
         super(HtmlSettings, self).__init__(path, filename, mode)
 
 
 def unit_test():
     from vstf.common.log import setup_logging
-    setup_logging(level=logging.DEBUG, log_file="/var/log/html-settings.log", clevel=logging.DEBUG)
+    setup_logging(
+        level=logging.DEBUG,
+        log_file="/var/log/html-settings.log",
+        clevel=logging.DEBUG)
     html_settings = HtmlSettings()
     style = {
         'table': {
     html_settings = HtmlSettings()
     style = {
         'table': {
@@ -36,13 +44,13 @@ def unit_test():
                 'border': '1px solid green',
                 'padding': '8px',
                 'word-wrap': 'break-all'
                 'border': '1px solid green',
                 'padding': '8px',
                 'word-wrap': 'break-all'
-            },
+        },
         'th':
             {
                 'background-color': '#EAF2D3',
                 'border': '1px solid green',
                 'padding': '8px'
         'th':
             {
                 'background-color': '#EAF2D3',
                 'border': '1px solid green',
                 'padding': '8px'
-            }
+        }
     }
 
     html_settings.set_style(style)
     }
 
     html_settings.set_style(style)
index db01097..967aa60 100644 (file)
@@ -18,7 +18,12 @@ LOG = logging.getLogger(__name__)
 
 
 class MailSettings(sets.Settings):
 
 
 class MailSettings(sets.Settings):
-    def __init__(self, path="/etc/vstf", filename="reporters.mail.mail-settings", mode=sets.SETS_DEFAULT):
+
+    def __init__(
+            self,
+            path="/etc/vstf",
+            filename="reporters.mail.mail-settings",
+            mode=sets.SETS_DEFAULT):
         super(MailSettings, self).__init__(path, filename, mode)
 
     def _register_func(self):
         super(MailSettings, self).__init__(path, filename, mode)
 
     def _register_func(self):
@@ -30,11 +35,24 @@ class MailSettings(sets.Settings):
         for item in body:
             item = item.encode()
             func_name = "set_%s" % item
         for item in body:
             item = item.encode()
             func_name = "set_%s" % item
-            setattr(self, func_name, self._setting_file(func_name, self._mset['body'], self._fset['body'], item))
+            setattr(
+                self,
+                func_name,
+                self._setting_file(
+                    func_name,
+                    self._mset['body'],
+                    self._fset['body'],
+                    item))
         other = {"attach", "content", "subtype"}
         for item in other:
             func_name = "mset_%s" % item
         other = {"attach", "content", "subtype"}
         for item in other:
             func_name = "mset_%s" % item
-            setattr(self, func_name, self._setting_memory(func_name, self._mset['body'], item))
+            setattr(
+                self,
+                func_name,
+                self._setting_memory(
+                    func_name,
+                    self._mset['body'],
+                    item))
 
         LOG.debug(self.__dict__)
 
 
         LOG.debug(self.__dict__)
 
@@ -80,7 +98,10 @@ class MailSettings(sets.Settings):
 
 def unit_test():
     from vstf.common.log import setup_logging
 
 def unit_test():
     from vstf.common.log import setup_logging
-    setup_logging(level=logging.DEBUG, log_file="/var/log/vstf/vstf-mail-settings.log", clevel=logging.INFO)
+    setup_logging(
+        level=logging.DEBUG,
+        log_file="/var/log/vstf/vstf-mail-settings.log",
+        clevel=logging.INFO)
 
     mail_settings = MailSettings()
     mail_settings.sinput()
 
     mail_settings = MailSettings()
     mail_settings.sinput()
index 610cb4a..adc8dee 100644 (file)
@@ -20,6 +20,7 @@ LOG = logging.getLogger(__name__)
 
 
 class PerfSettings(sets.Settings):
 
 
 class PerfSettings(sets.Settings):
+
     def __init__(self, path="/etc/vstf/perf/",
                  filename="sw_perf.batch-settings",
                  mode=sets.SETS_SINGLE):
     def __init__(self, path="/etc/vstf/perf/",
                  filename="sw_perf.batch-settings",
                  mode=sets.SETS_SINGLE):
@@ -42,14 +43,23 @@ class PerfSettings(sets.Settings):
         if not scenario:
             LOG.warn("not support the case:%s", value["case"])
             return
         if not scenario:
             LOG.warn("not support the case:%s", value["case"])
             return
-        self._adding_file("add", self._mset, self._fset, scenario, check=self._check_add)(value)
+        self._adding_file(
+            "add",
+            self._mset,
+            self._fset,
+            scenario,
+            check=self._check_add)(value)
 
     def madd_case(self, case):
         scenario = self.dbconn.query_scenario(case)
         if not scenario:
             LOG.warn("not support the case:%s", case)
             return
 
     def madd_case(self, case):
         scenario = self.dbconn.query_scenario(case)
         if not scenario:
             LOG.warn("not support the case:%s", case)
             return
-        self._adding_memory("madd", self._mset, scenario, check=self._check_add)(case)
+        self._adding_memory(
+            "madd",
+            self._mset,
+            scenario,
+            check=self._check_add)(case)
 
     @deco.dcheck('sizes')
     @deco.dcheck("type", choices=cst.TTYPES)
 
     @deco.dcheck('sizes')
     @deco.dcheck("type", choices=cst.TTYPES)
@@ -74,7 +84,7 @@ class PerfSettings(sets.Settings):
         pprint.pprint(self.settings)
         print "+++++++++++++++++++++++++++++++++++"
         return True
         pprint.pprint(self.settings)
         print "+++++++++++++++++++++++++++++++++++"
         return True
-    
+
     @deco.vstf_input('sizes', types=list)
     @deco.vstf_input("type", types=str, choices=cst.TTYPES)
     @deco.vstf_input("profile", types=str, choices=cst.PROVIDERS)
     @deco.vstf_input('sizes', types=list)
     @deco.vstf_input("type", types=str, choices=cst.TTYPES)
     @deco.vstf_input("profile", types=str, choices=cst.PROVIDERS)
@@ -98,7 +108,10 @@ def unit_test():
     perf_settings.sinput()
 
     from vstf.common.log import setup_logging
     perf_settings.sinput()
 
     from vstf.common.log import setup_logging
-    setup_logging(level=logging.DEBUG, log_file="/var/log/vstf/vstf-perf-settings.log", clevel=logging.DEBUG)
+    setup_logging(
+        level=logging.DEBUG,
+        log_file="/var/log/vstf/vstf-perf-settings.log",
+        clevel=logging.DEBUG)
 
 
 if __name__ == '__main__':
 
 
 if __name__ == '__main__':
index 2c712bb..a01689d 100644 (file)
@@ -31,7 +31,8 @@ def dict2object(dic):
         module_name = dic.pop('__module__')
         module = __import__(module_name)
         class_ = getattr(module, class_name)
         module_name = dic.pop('__module__')
         module = __import__(module_name)
         class_ = getattr(module, class_name)
-        args = dict((key.encode('ascii'), value) for key, value in dic.items())  # get args
+        args = dict((key.encode('ascii'), value)
+                    for key, value in dic.items())  # get args
         inst = class_(**args)  # create new instance
     else:
         inst = dic
         inst = class_(**args)  # create new instance
     else:
         inst = dic
@@ -52,6 +53,7 @@ def filter_comments(filename, flags="//"):
 
 
 class BaseSettings(object):
 
 
 class BaseSettings(object):
+
     def _load(self, fullname):
         data = filter_comments(fullname)
         LOG.debug(fullname)
     def _load(self, fullname):
         data = filter_comments(fullname)
         LOG.debug(fullname)
@@ -68,7 +70,11 @@ class BaseSettings(object):
                 for litem in ldata:
                     if rdata:
                         for ritem in rdata:
                 for litem in ldata:
                     if rdata:
                         for ritem in rdata:
-                            if isinstance(litem, dict) or isinstance(litem, list):
+                            if isinstance(
+                                    litem,
+                                    dict) or isinstance(
+                                    litem,
+                                    list):
                                 tmp = self._sub(litem, ritem)
                             else:
                                 tmp = ritem
                                 tmp = self._sub(litem, ritem)
                             else:
                                 tmp = ritem
@@ -104,15 +110,22 @@ class BaseSettings(object):
         if os.path.exists(filename):
             os.remove(filename)
         with open(filename, 'w') as ofile:
         if os.path.exists(filename):
             os.remove(filename)
         with open(filename, 'w') as ofile:
-            content = json.dumps(data, sort_keys=True, indent=4, separators=(',', ':'))
+            content = json.dumps(
+                data,
+                sort_keys=True,
+                indent=4,
+                separators=(
+                    ',',
+                    ':'))
             ofile.write(content)
 
 
 class DefaultSettings(BaseSettings):
             ofile.write(content)
 
 
 class DefaultSettings(BaseSettings):
+
     def __init__(self, path):
         self._default = os.path.join(path, 'default')
         self._user = os.path.join(path, 'user')
     def __init__(self, path):
         self._default = os.path.join(path, 'default')
         self._user = os.path.join(path, 'user')
-    
+
     def load(self, filename):
         dfile = os.path.join(self._default, filename)
         if os.path.exists(dfile):
     def load(self, filename):
         dfile = os.path.join(self._default, filename)
         if os.path.exists(dfile):
@@ -137,6 +150,7 @@ class DefaultSettings(BaseSettings):
 
 
 class SingleSettings(BaseSettings):
 
 
 class SingleSettings(BaseSettings):
+
     def __init__(self, path):
         self._path = path
 
     def __init__(self, path):
         self._path = path
 
@@ -161,6 +175,7 @@ SETTINGS = [SETS_SINGLE, SETS_DEFAULT]
 
 
 class Settings(object):
 
 
 class Settings(object):
+
     def __init__(self, path, filename, mode=SETS_SINGLE):
         if mode not in SETTINGS:
             raise Exception("error Settings mode : %s" % (mode))
     def __init__(self, path, filename, mode=SETS_SINGLE):
         if mode not in SETTINGS:
             raise Exception("error Settings mode : %s" % (mode))
@@ -257,23 +272,65 @@ class Settings(object):
             for item in items:
                 item = item.encode()
                 func_name = "set_%s" % item
             for item in items:
                 item = item.encode()
                 func_name = "set_%s" % item
-                setattr(self, func_name, self._setting_file(func_name, self._mset, self._fset, item))
+                setattr(
+                    self,
+                    func_name,
+                    self._setting_file(
+                        func_name,
+                        self._mset,
+                        self._fset,
+                        item))
                 func_name = "mset_%s" % item
                 func_name = "mset_%s" % item
-                setattr(self, func_name, self._setting_memory(func_name, self._mset, item))
+                setattr(
+                    self,
+                    func_name,
+                    self._setting_memory(
+                        func_name,
+                        self._mset,
+                        item))
         elif isinstance(self._fset, list):
             func_name = "set"
         elif isinstance(self._fset, list):
             func_name = "set"
-            setattr(self, func_name, self._setting_file(func_name, self._mset, self._fset, None))
+            setattr(
+                self,
+                func_name,
+                self._setting_file(
+                    func_name,
+                    self._mset,
+                    self._fset,
+                    None))
             func_name = "mset"
             func_name = "mset"
-            setattr(self, func_name, self._setting_memory(func_name, self._mset, None))
+            setattr(
+                self,
+                func_name,
+                self._setting_memory(
+                    func_name,
+                    self._mset,
+                    None))
             func_name = "add"
             func_name = "add"
-            setattr(self, func_name, self._adding_file(func_name, self._mset, self._fset, None))
+            setattr(
+                self,
+                func_name,
+                self._adding_file(
+                    func_name,
+                    self._mset,
+                    self._fset,
+                    None))
             func_name = "madd"
             func_name = "madd"
-            setattr(self, func_name, self._adding_memory(func_name, self._mset, None))
+            setattr(
+                self,
+                func_name,
+                self._adding_memory(
+                    func_name,
+                    self._mset,
+                    None))
 
 
 def unit_test():
     from vstf.common.log import setup_logging
 
 
 def unit_test():
     from vstf.common.log import setup_logging
-    setup_logging(level=logging.DEBUG, log_file="/var/log/vstf-settings.log", clevel=logging.INFO)
+    setup_logging(
+        level=logging.DEBUG,
+        log_file="/var/log/vstf-settings.log",
+        clevel=logging.INFO)
 
     path = '/etc/vstf'
     setting = DefaultSettings(path)
 
     path = '/etc/vstf'
     setting = DefaultSettings(path)
index b677c53..2e449fe 100644 (file)
@@ -16,6 +16,7 @@ LOG = logging.getLogger(__name__)
 
 
 class TemplateSettings(sets.Settings):
 
 
 class TemplateSettings(sets.Settings):
+
     def __init__(self, path="/etc/vstf/reporter/",
                  filename="reporters.template-settings",
                  mode=sets.SETS_SINGLE):
     def __init__(self, path="/etc/vstf/reporter/",
                  filename="reporters.template-settings",
                  mode=sets.SETS_SINGLE):
index 554c804..5d64d29 100644 (file)
@@ -16,6 +16,7 @@ LOG = logging.getLogger(__name__)
 
 
 class TesterSettings(sets.Settings):
 
 
 class TesterSettings(sets.Settings):
+
     def __init__(self, path="/etc/vstf/env/",
                  filename="tester.json",
                  mode=sets.SETS_SINGLE):
     def __init__(self, path="/etc/vstf/env/",
                  filename="tester.json",
                  mode=sets.SETS_SINGLE):
index a84bc59..aed3306 100644 (file)
@@ -18,7 +18,12 @@ LOG = logging.getLogger(__name__)
 
 
 class ToolSettings(sets.Settings):
 
 
 class ToolSettings(sets.Settings):
-    def __init__(self, path="/etc/vstf", filename="sw_perf.tool-settings", mode=sets.SETS_DEFAULT):
+
+    def __init__(
+            self,
+            path="/etc/vstf",
+            filename="sw_perf.tool-settings",
+            mode=sets.SETS_DEFAULT):
         super(ToolSettings, self).__init__(path, filename, mode)
 
     def _register_func(self):
         super(ToolSettings, self).__init__(path, filename, mode)
 
     def _register_func(self):
@@ -29,8 +34,15 @@ class ToolSettings(sets.Settings):
         for item in body:
             item = item.encode()
             func_name = "set_%s" % (item)
         for item in body:
             item = item.encode()
             func_name = "set_%s" % (item)
-            setattr(self, func_name,
-                    self._setting_file(func_name, self._mset, self._fset, item, check=self._check_keys))
+            setattr(
+                self,
+                func_name,
+                self._setting_file(
+                    func_name,
+                    self._mset,
+                    self._fset,
+                    item,
+                    check=self._check_keys))
 
     def _check_keys(self, value):
         keys = ['threads', 'wait', 'time']
 
     def _check_keys(self, value):
         keys = ['threads', 'wait', 'time']
@@ -70,7 +82,10 @@ class ToolSettings(sets.Settings):
 
 def unit_test():
     from vstf.common.log import setup_logging
 
 def unit_test():
     from vstf.common.log import setup_logging
-    setup_logging(level=logging.DEBUG, log_file="/var/log/vstf/tool-settings.log", clevel=logging.INFO)
+    setup_logging(
+        level=logging.DEBUG,
+        log_file="/var/log/vstf/tool-settings.log",
+        clevel=logging.INFO)
     tool_settings = ToolSettings()
     value = {
         "time": 10,
     tool_settings = ToolSettings()
     value = {
         "time": 10,
index df7d24d..83b8d15 100644 (file)
@@ -6,4 +6,3 @@
 # which accompanies this distribution, and is available at
 # http://www.apache.org/licenses/LICENSE-2.0
 ##############################################################################
 # which accompanies this distribution, and is available at
 # http://www.apache.org/licenses/LICENSE-2.0
 ##############################################################################
-
index d4c5be6..610b27d 100644 (file)
@@ -16,6 +16,7 @@ LOG = logging.getLogger(__name__)
 
 
 class spirentSTC(object):
 
 
 class spirentSTC(object):
+
     def __init__(self):
         super(spirentSTC, self).__init__()
         self.runmodel = None
     def __init__(self):
         super(spirentSTC, self).__init__()
         self.runmodel = None
@@ -25,7 +26,7 @@ class spirentSTC(object):
         :param str    conner: the spirent tester, the agent id of spirent vm
         :param list   measurand: the tested host's agent id
         :param str    model: the model used of the tested host
         :param str    conner: the spirent tester, the agent id of spirent vm
         :param list   measurand: the tested host's agent id
         :param str    model: the model used of the tested host
-        
+
         """
         mgr = stevedore.driver.DriverManager(namespace="spirent.model.plugins",
                                              name=model,
         """
         mgr = stevedore.driver.DriverManager(namespace="spirent.model.plugins",
                                              name=model,
@@ -41,7 +42,7 @@ class spirentSTC(object):
 
 
 def run(config):
 
 
 def run(config):
-    # test option parser 
+    # test option parser
     if not os.path.exists(config['configfile']):
         LOG.error('The config file %s does exist.', config.get("configfile"))
         return False
     if not os.path.exists(config['configfile']):
         LOG.error('The config file %s does exist.', config.get("configfile"))
         return False
@@ -54,7 +55,7 @@ def run(config):
         LOG.error("[ERROR]Check parameter invalid.")
         return False
 
         LOG.error("[ERROR]Check parameter invalid.")
         return False
 
-    # check logical parameter in the 
+    # check logical parameter in the
     flag = runmodel.check_logic_invalid
     if not flag:
         LOG.error("[ERROR]Check logic parameter with host invalid.")
     flag = runmodel.check_logic_invalid
     if not flag:
         LOG.error("[ERROR]Check logic parameter with host invalid.")
@@ -86,7 +87,8 @@ def run(config):
             LOG.error("[ERROR]Restructure the test data failed.")
         perfdata = getResult(result_dict)
         columndata = getResultColumn(result_dict)
             LOG.error("[ERROR]Restructure the test data failed.")
         perfdata = getResult(result_dict)
         columndata = getResultColumn(result_dict)
-        column_array, data_array = analysis_instance.analyseResult(suite, columndata, perfdata)
+        column_array, data_array = analysis_instance.analyseResult(
+            suite, columndata, perfdata)
         temp = {'columns': column_array, 'data': data_array}
         result[suite] = temp
     return result
         temp = {'columns': column_array, 'data': data_array}
         result[suite] = temp
     return result
index a29794f..38bfa70 100644 (file)
@@ -24,6 +24,7 @@ reverse_dict = {
 
 
 class BaseModel(object):
 
 
 class BaseModel(object):
+
     def __init__(self, config):
         self.config = config
 
     def __init__(self, config):
         self.config = config
 
@@ -117,6 +118,7 @@ def _tranfer_array_to_range(array):
 
 
 class TnV(BaseModel):
 
 
 class TnV(BaseModel):
+
     def __init__(self, config):
         super(TnV, self).__init__(config)
         self.config = config
     def __init__(self, config):
         super(TnV, self).__init__(config)
         self.config = config
@@ -178,7 +180,8 @@ class TnV(BaseModel):
                 info = self.handle.get(option, 'macs')
                 macs = info.split()
                 if len(macs) != int(self.config['virtenv']) or macs == []:
                 info = self.handle.get(option, 'macs')
                 macs = info.split()
                 if len(macs) != int(self.config['virtenv']) or macs == []:
-                    print("[ERROR]The macs number is not equal to vms or containers.")
+                    print(
+                        "[ERROR]The macs number is not equal to vms or containers.")
                     return False
                 for mac in macs:
                     # check mac valid
                     return False
                 for mac in macs:
                     # check mac valid
@@ -211,12 +214,12 @@ class TnV(BaseModel):
     @property
     def check_logic_invalid(self):
         return self.flow_match() and self.match_virt_env() and \
     @property
     def check_logic_invalid(self):
         return self.flow_match() and self.match_virt_env() and \
-               self.match_flows_and_nic and self.check_mac_valid() and \
-               self.check_vlan_valid()
+            self.match_flows_and_nic and self.check_mac_valid() and \
+            self.check_vlan_valid()
 
     @property
     def read_flow_init(self):
 
     @property
     def read_flow_init(self):
-        # The 
+        # The
         temp_flow = {}
         src_macs = self._get_range('send', 'macs')
         dst_macs = self._get_range('recv', 'macs')
         temp_flow = {}
         src_macs = self._get_range('send', 'macs')
         dst_macs = self._get_range('recv', 'macs')
@@ -233,7 +236,7 @@ class TnV(BaseModel):
         temp_flow['tester_ip'] = self._get_nic_from_file('common', 'tester_ip')
         vlan = src_vlan
         avg_flow = int(self.config['flows']) / int(self.config['virtenv'])
         temp_flow['tester_ip'] = self._get_nic_from_file('common', 'tester_ip')
         vlan = src_vlan
         avg_flow = int(self.config['flows']) / int(self.config['virtenv'])
-        # build the main dictionary 
+        # build the main dictionary
         for _direct in sorted(fwd[self.config['direct']]):
             i = 0
             j = 0
         for _direct in sorted(fwd[self.config['direct']]):
             i = 0
             j = 0
@@ -267,9 +270,11 @@ class TnV(BaseModel):
 
                     temp_flow['qemu_thread_list'] = _vm_info['qemu_thread']
                     forward_core = {
 
                     temp_flow['qemu_thread_list'] = _vm_info['qemu_thread']
                     forward_core = {
-                        "forward": _vm_info['qemu_thread'][_queue + avg_flow * vm_index],
-                        "reverse": _vm_info['qemu_thread'][_queue + avg_flow * vm_index + int(self.config['flows'])]
-                    }
+                        "forward": _vm_info['qemu_thread'][
+                            _queue + avg_flow * vm_index],
+                        "reverse": _vm_info['qemu_thread'][
+                            _queue + avg_flow * vm_index + int(
+                                self.config['flows'])]}
                     temp_flow['fwd_thread'] = forward_core[_direct]
 
                     temp_flow['fwd_vhost'] = None
                     temp_flow['fwd_thread'] = forward_core[_direct]
 
                     temp_flow['fwd_vhost'] = None
@@ -280,7 +285,8 @@ class TnV(BaseModel):
                     temp_flow['dst_nic'] = dst_nic
                     # above all
                     j += 1
                     temp_flow['dst_nic'] = dst_nic
                     # above all
                     j += 1
-                    self.init_flows[_direct + '_' + _vm + '_' + str(_queue)] = copy.deepcopy(temp_flow)
+                    self.init_flows[_direct + '_' + _vm + '_' +
+                                    str(_queue)] = copy.deepcopy(temp_flow)
                 i += 1
             src_nic_irq, dst_nic_irq = dst_nic_irq, src_nic_irq
             vlan = dst_vlan
                 i += 1
             src_nic_irq, dst_nic_irq = dst_nic_irq, src_nic_irq
             vlan = dst_vlan
@@ -323,30 +329,67 @@ class TnV(BaseModel):
                         try:
                             i += 1
                             thread_info = None
                         try:
                             i += 1
                             thread_info = None
-                            self.mac_learning(self.init_flows[_direct + '_' + _vm + '_' + str(_queue)],
-                                              self.init_flows[reverse_dict[_direct] + '_' + _vm + '_' + str(_queue)])
-                            streamblock = self.send_packet(self.init_flows[_direct + '_' + _vm + '_' + str(_queue)])
+                            self.mac_learning(
+                                self.init_flows[
+                                    _direct +
+                                    '_' +
+                                    _vm +
+                                    '_' +
+                                    str(_queue)],
+                                self.init_flows[
+                                    reverse_dict[_direct] +
+                                    '_' +
+                                    _vm +
+                                    '_' +
+                                    str(_queue)])
+                            streamblock = self.send_packet(
+                                self.init_flows[_direct + '_' + _vm + '_' + str(_queue)])
                             time.sleep(1)
                             result, thread_info = self.catch_thread_info()
                             thread_info = eval(thread_info)
                             time.sleep(1)
                             result, thread_info = self.catch_thread_info()
                             thread_info = eval(thread_info)
-                            self.stop_flow(streamblock, self.init_flows[_direct + '_' + _vm + '_' + str(_queue)])
+                            self.stop_flow(
+                                streamblock, self.init_flows[
+                                    _direct + '_' + _vm + '_' + str(_queue)])
                             time.sleep(1)
                             if not result:
                                 print("[ERROR]Catch the thread info failed.")
                                 break
                         except:
                             time.sleep(1)
                             if not result:
                                 print("[ERROR]Catch the thread info failed.")
                                 break
                         except:
-                            print("[ERROR]send flow failed error or get host thread info failed.")
+                            print(
+                                "[ERROR]send flow failed error or get host thread info failed.")
 
                         # compare the got thread info to
 
                         # compare the got thread info to
-                        if check_dict(thread_info, self.init_flows[_direct + '_' + _vm + '_' + str(_queue)]):
-                            self.set_thread2flow(thread_info, self.init_flows[_direct + '_' + _vm + '_' + str(_queue)])
-                            print("[INFO]Flow %s_%s_%s :     fwd_vhost %s    src_recv_irq %s   dst_send_irq %s"
-                                  % (_direct, _vm, _queue, thread_info['fwd_vhost'], thread_info['src_recv_irq'],
-                                     thread_info['dst_send_irq']))
-                            print("%s" % (self.init_flows[_direct + '_' + _vm + '_' + str(_queue)]))
+                        if check_dict(
+                            thread_info, self.init_flows[
+                                _direct + '_' + _vm + '_' + str(_queue)]):
+                            self.set_thread2flow(
+                                thread_info, self.init_flows[
+                                    _direct + '_' + _vm + '_' + str(_queue)])
+                            print(
+                                "[INFO]Flow %s_%s_%s :     fwd_vhost %s    src_recv_irq %s   dst_send_irq %s" %
+                                (_direct,
+                                 _vm,
+                                 _queue,
+                                 thread_info['fwd_vhost'],
+                                    thread_info['src_recv_irq'],
+                                    thread_info['dst_send_irq']))
+                            print(
+                                "%s" %
+                                (self.init_flows[
+                                    _direct +
+                                    '_' +
+                                    _vm +
+                                    '_' +
+                                    str(_queue)]))
                             break
                         else:
                             break
                         else:
-                            dst_ip_update(self.init_flows[_direct + '_' + _vm + '_' + str(_queue)])
+                            dst_ip_update(
+                                self.init_flows[
+                                    _direct +
+                                    '_' +
+                                    _vm +
+                                    '_' +
+                                    str(_queue)])
         return self.init_flows
 
     def affinity_bind(self, aff_strategy):
         return self.init_flows
 
     def affinity_bind(self, aff_strategy):
@@ -361,7 +404,8 @@ class TnV(BaseModel):
         # recognize the thread id
         for flowname in sorted(self.init_flows.keys()):
             tmp_thread = self.init_flows[flowname]['fwd_thread']
         # recognize the thread id
         for flowname in sorted(self.init_flows.keys()):
             tmp_thread = self.init_flows[flowname]['fwd_thread']
-            qemu_other = qemu_other + copy.deepcopy(self.init_flows[flowname]['qemu_thread_list'])
+            qemu_other = qemu_other + \
+                copy.deepcopy(self.init_flows[flowname]['qemu_thread_list'])
             qemu_list.append(tmp_thread)
             if self.init_flows[flowname]['direct'] == 'forward':
                 dst_vhost.append(self.init_flows[flowname]['fwd_vhost'])
             qemu_list.append(tmp_thread)
             if self.init_flows[flowname]['direct'] == 'forward':
                 dst_vhost.append(self.init_flows[flowname]['fwd_vhost'])
@@ -386,16 +430,38 @@ class TnV(BaseModel):
         handle = ConfigParser.ConfigParser()
         handle.read(self.config['strategyfile'])
         try:
         handle = ConfigParser.ConfigParser()
         handle.read(self.config['strategyfile'])
         try:
-            qemu_numa = handle.get('strategy' + self.config['strategy'], 'qemu_numa')
-            src_vhost_numa = handle.get('strategy' + self.config['strategy'], 'src_vhost_numa')
-            dst_vhost_numa = handle.get('strategy' + self.config['strategy'], 'dst_vhost_numa')
-            src_irq_numa = handle.get('strategy' + self.config['strategy'], 'src_irq_numa')
-            dst_irq_numa = handle.get('strategy' + self.config['strategy'], 'dst_irq_numa')
-            loan_numa = handle.get('strategy' + self.config['strategy'], 'loan_numa')
+            qemu_numa = handle.get(
+                'strategy' +
+                self.config['strategy'],
+                'qemu_numa')
+            src_vhost_numa = handle.get(
+                'strategy' + self.config['strategy'],
+                'src_vhost_numa')
+            dst_vhost_numa = handle.get(
+                'strategy' + self.config['strategy'],
+                'dst_vhost_numa')
+            src_irq_numa = handle.get(
+                'strategy' +
+                self.config['strategy'],
+                'src_irq_numa')
+            dst_irq_numa = handle.get(
+                'strategy' +
+                self.config['strategy'],
+                'dst_irq_numa')
+            loan_numa = handle.get(
+                'strategy' +
+                self.config['strategy'],
+                'loan_numa')
         except:
             print("[ERROR]Parse the strategy file failed or get the options failed.")
 
         except:
             print("[ERROR]Parse the strategy file failed or get the options failed.")
 
-        for value in [qemu_numa, src_vhost_numa, dst_vhost_numa, src_irq_numa, dst_irq_numa, loan_numa]:
+        for value in [
+                qemu_numa,
+                src_vhost_numa,
+                dst_vhost_numa,
+                src_irq_numa,
+                dst_irq_numa,
+                loan_numa]:
             if value is not None or value == '':
                 raise ValueError('some option in the strategy file is none.')
         # cores mapping thread
             if value is not None or value == '':
                 raise ValueError('some option in the strategy file is none.')
         # cores mapping thread
@@ -407,26 +473,39 @@ class TnV(BaseModel):
         for node in numa_topo.keys():
             numa_topo[node]['process'] = []
             if 'node' + src_irq_numa == node:
         for node in numa_topo.keys():
             numa_topo[node]['process'] = []
             if 'node' + src_irq_numa == node:
-                numa_topo[node]['process'] = numa_topo[node]['process'] + src_irq
+                numa_topo[node]['process'] = numa_topo[
+                    node]['process'] + src_irq
             if 'node' + dst_irq_numa == node:
             if 'node' + dst_irq_numa == node:
-                numa_topo[node]['process'] = numa_topo[node]['process'] + dst_irq
+                numa_topo[node]['process'] = numa_topo[
+                    node]['process'] + dst_irq
             if 'node' + src_vhost_numa == node:
             if 'node' + src_vhost_numa == node:
-                numa_topo[node]['process'] = numa_topo[node]['process'] + src_vhost
+                numa_topo[node]['process'] = numa_topo[
+                    node]['process'] + src_vhost
             if 'node' + dst_vhost_numa == node:
             if 'node' + dst_vhost_numa == node:
-                numa_topo[node]['process'] = numa_topo[node]['process'] + dst_vhost
+                numa_topo[node]['process'] = numa_topo[
+                    node]['process'] + dst_vhost
             if 'node' + qemu_numa == node:
             if 'node' + qemu_numa == node:
-                numa_topo[node]['process'] = numa_topo[node]['process'] + qemu_list
+                numa_topo[node]['process'] = numa_topo[
+                    node]['process'] + qemu_list
         loan_cores = ''
         for node in numa_topo.keys():
         loan_cores = ''
         for node in numa_topo.keys():
-            if len(numa_topo[node]['process']) > len(numa_topo[node]['phy_cores']):
+            if len(
+                    numa_topo[node]['process']) > len(
+                    numa_topo[node]['phy_cores']):
                 # length distance
                 # length distance
-                diff = len(numa_topo[node]['process']) - len(numa_topo[node]['phy_cores'])
+                diff = len(numa_topo[node]['process']) - \
+                    len(numa_topo[node]['phy_cores'])
                 # first deep copy
                 # first deep copy
-                numa_topo['node' + loan_numa]['process'] = numa_topo['node' + loan_numa]['process'] + copy.deepcopy(
-                    numa_topo[node]['process'][-diff:])
-                cores_str = _tranfer_array_to_range(numa_topo['node' + loan_numa]['phy_cores'][diff:])
+                numa_topo['node' + loan_numa]['process'] = numa_topo['node' + loan_numa][
+                    'process'] + copy.deepcopy(numa_topo[node]['process'][-diff:])
+                cores_str = _tranfer_array_to_range(
+                    numa_topo[
+                        'node' +
+                        loan_numa]['phy_cores'][
+                        diff:])
                 loan_cores = ','.join([loan_cores, cores_str])
                 loan_cores = ','.join([loan_cores, cores_str])
-                numa_topo[node]['process'] = numa_topo[node]['process'][0:-diff]
+                numa_topo[node]['process'] = numa_topo[
+                    node]['process'][0:-diff]
         loan_cores = loan_cores[1:]
         loan_bind_list = {}
         for proc_loan in qemu_other:
         loan_cores = loan_cores[1:]
         loan_bind_list = {}
         for proc_loan in qemu_other:
@@ -435,7 +514,8 @@ class TnV(BaseModel):
         bind_list = {}
         for node in numa_topo.keys():
             for i in range(len(numa_topo[node]['process'])):
         bind_list = {}
         for node in numa_topo.keys():
             for i in range(len(numa_topo[node]['process'])):
-                bind_list[numa_topo[node]['process'][i]] = str(numa_topo[node]['phy_cores'][i])
+                bind_list[numa_topo[node]['process'][i]] = str(
+                    numa_topo[node]['phy_cores'][i])
         bind_list.update(loan_bind_list)
         for key in bind_list.keys():
             self.host_instance.bind_cpu(bind_list[key], key)
         bind_list.update(loan_bind_list)
         for key in bind_list.keys():
             self.host_instance.bind_cpu(bind_list[key], key)
@@ -459,8 +539,10 @@ class TnV(BaseModel):
 
         if suite == "throughput":
             print("[INFO]!!!!!!!!!!!!!!!Now begin to throughput test")
 
         if suite == "throughput":
             print("[INFO]!!!!!!!!!!!!!!!Now begin to throughput test")
-            ret, result = self.send_instace.run_rfc2544_throughput(forward_init_flows, reverse_init_flows)
+            ret, result = self.send_instace.run_rfc2544_throughput(
+                forward_init_flows, reverse_init_flows)
         elif suite == "frameloss":
             print("[INFO]!!!!!!!!!!!1!!!Now begin to frameloss test")
         elif suite == "frameloss":
             print("[INFO]!!!!!!!!!!!1!!!Now begin to frameloss test")
-            ret, result = self.send_instace.run_rfc2544_frameloss(forward_init_flows, reverse_init_flows)
+            ret, result = self.send_instace.run_rfc2544_frameloss(
+                forward_init_flows, reverse_init_flows)
         return ret, result
         return ret, result
index b09a846..9003f06 100644 (file)
@@ -29,9 +29,11 @@ def restrucData(data_string):
     try:
         data_dict = {}
         p = re.compile('-Columns.*-Output')
     try:
         data_dict = {}
         p = re.compile('-Columns.*-Output')
-        data_dict['Columns'] = p.findall(data_string)[0].strip('-Columns {} -Output')
+        data_dict['Columns'] = p.findall(
+            data_string)[0].strip('-Columns {} -Output')
         p = re.compile('-Output.*-State')
         p = re.compile('-Output.*-State')
-        data_dict['Output'] = p.findall(data_string)[0].strip('-Output {} -State')
+        data_dict['Output'] = p.findall(
+            data_string)[0].strip('-Output {} -State')
         if data_dict['Columns'] is not None or data_dict['Output'] is not None:
             return False, None
         return True, data_dict
         if data_dict['Columns'] is not None or data_dict['Output'] is not None:
             return False, None
         return True, data_dict
@@ -90,6 +92,7 @@ def framelossData(column, perfdata):
 
 
 class analysis(object):
 
 
 class analysis(object):
+
     def __init__(self):
         pass
 
     def __init__(self):
         pass
 
@@ -148,7 +151,8 @@ class analysis(object):
                     line[column_name_dict['FrameSize']],
                     line[column_name_dict['Load(%)']],
                     line[column_name_dict['Result']],
                     line[column_name_dict['FrameSize']],
                     line[column_name_dict['Load(%)']],
                     line[column_name_dict['Result']],
-                    str(float(line[column_name_dict['ForwardingRate(mpps)']]) / 1000000),
+                    str(float(line[column_name_dict[
+                        'ForwardingRate(mpps)']]) / 1000000),
                     line[column_name_dict['TxFrameCount']],
                     line[column_name_dict['RxFrameCount']],
                     line[column_name_dict['AverageLatency(us)']],
                     line[column_name_dict['TxFrameCount']],
                     line[column_name_dict['RxFrameCount']],
                     line[column_name_dict['AverageLatency(us)']],
index df7d24d..83b8d15 100644 (file)
@@ -6,4 +6,3 @@
 # which accompanies this distribution, and is available at
 # http://www.apache.org/licenses/LICENSE-2.0
 ##############################################################################
 # which accompanies this distribution, and is available at
 # http://www.apache.org/licenses/LICENSE-2.0
 ##############################################################################
-
index 4a3b02c..a4bd146 100644 (file)
@@ -21,6 +21,7 @@ LOG = logging.getLogger(__name__)
 
 
 class FlowsProducer(object):
 
 
 class FlowsProducer(object):
+
     def __init__(self, conn, flows_settings):
         self._perf = flows_settings
         self._forwarding = ForwardingSettings().settings
     def __init__(self, conn, flows_settings):
         self._perf = flows_settings
         self._forwarding = ForwardingSettings().settings
@@ -43,12 +44,13 @@ class FlowsProducer(object):
             raise Exception("error devs :%s", devs)
         LOG.info(agent)
         LOG.info(name)
             raise Exception("error devs :%s", devs)
         LOG.info(agent)
         LOG.info(name)
-        if not self._devs_map.has_key((agent, name)):
+        if (agent, name) not in self._devs_map:
             query = Fabricant(agent, self._conn)
             query.clean_all_namespace()
             dev_info = query.get_device_verbose(identity=name)
             if not isinstance(dev_info, dict):
             query = Fabricant(agent, self._conn)
             query.clean_all_namespace()
             dev_info = query.get_device_verbose(identity=name)
             if not isinstance(dev_info, dict):
-                err = "get device detail failed, agent:%s net:%s" % (agent, name)
+                err = "get device detail failed, agent:%s net:%s" % (
+                    agent, name)
                 raise Exception(err)
             dev = {
                 "agent": agent,
                 raise Exception(err)
             dev = {
                 "agent": agent,
@@ -127,7 +129,10 @@ class FlowsProducer(object):
 def unit_test():
     from vstf.rpc_frame_work.rpc_producer import Server
     from vstf.common.log import setup_logging
 def unit_test():
     from vstf.rpc_frame_work.rpc_producer import Server
     from vstf.common.log import setup_logging
-    setup_logging(level=logging.INFO, log_file="/var/log/vstf/vstf-producer.log", clevel=logging.INFO)
+    setup_logging(
+        level=logging.INFO,
+        log_file="/var/log/vstf/vstf-producer.log",
+        clevel=logging.INFO)
 
     conn = Server("192.168.188.10")
     flow_settings = FlowsSettings()
 
     conn = Server("192.168.188.10")
     flow_settings = FlowsSettings()
index c49df04..8e3e7b2 100644 (file)
@@ -17,6 +17,7 @@ LOG = logging.getLogger(__name__)
 
 
 class NetDeviceMgr(Fabricant):
 
 
 class NetDeviceMgr(Fabricant):
+
     @classmethod
     def add(cls, dst, conn, dev):
         self = cls(dst, conn)
     @classmethod
     def add(cls, dst, conn, dev):
         self = cls(dst, conn)
@@ -38,6 +39,7 @@ class NetDeviceMgr(Fabricant):
 
 
 class Actor(Fabricant):
 
 
 class Actor(Fabricant):
+
     def __init__(self, dst, conn, tool, params):
         super(Actor, self).__init__(dst, conn)
         self._tool = tool
     def __init__(self, dst, conn, tool, params):
         super(Actor, self).__init__(dst, conn)
         self._tool = tool
@@ -46,12 +48,13 @@ class Actor(Fabricant):
 
     def __repr__(self):
         repr_dict = self.__dict__
 
     def __repr__(self):
         repr_dict = self.__dict__
-        repr_keys = list(repr_dict.keys())
-        repr_keys.sort()
-        return '%s(%s)' % (self.__class__.__name__, ', '.join(['%s=%r' % (k, repr_dict[k]) for k in repr_keys]))
+        repr_keys = sorted(repr_dict.keys())
+        return '%s(%s)' % (self.__class__.__name__, ', '.join(
+            ['%s=%r' % (k, repr_dict[k]) for k in repr_keys]))
 
 
 class Sender(Actor):
 
 
 class Sender(Actor):
+
     def start(self, pktsize, **kwargs):
         LOG.info("Sender.start")
         if 'ratep' in kwargs and kwargs['ratep']:
     def start(self, pktsize, **kwargs):
         LOG.info("Sender.start")
         if 'ratep' in kwargs and kwargs['ratep']:
@@ -106,6 +109,7 @@ class Sender(Actor):
 
 
 class Receiver(Actor):
 
 
 class Receiver(Actor):
+
     def start(self, **kwargs):
         LOG.info("Receiver.start")
         ret, info = self.perf_run(
     def start(self, **kwargs):
         LOG.info("Receiver.start")
         ret, info = self.perf_run(
@@ -136,6 +140,7 @@ class Receiver(Actor):
 
 
 class NicWatcher(Fabricant):
 
 
 class NicWatcher(Fabricant):
+
     def __init__(self, dst, conn, params):
         super(NicWatcher, self).__init__(dst, conn)
         self._params = params
     def __init__(self, dst, conn, params):
         super(NicWatcher, self).__init__(dst, conn)
         self._params = params
@@ -144,7 +149,9 @@ class NicWatcher(Fabricant):
 
     def start(self):
         print "NicWatcher.start"
 
     def start(self):
         print "NicWatcher.start"
-        self._pid = self.run_vnstat(device=self._params["iface"], namespace=self._params["namespace"])
+        self._pid = self.run_vnstat(
+            device=self._params["iface"],
+            namespace=self._params["namespace"])
         print self._pid
 
     def stop(self):
         print self._pid
 
     def stop(self):
@@ -161,6 +168,7 @@ class NicWatcher(Fabricant):
 
 
 class CpuWatcher(Fabricant):
 
 
 class CpuWatcher(Fabricant):
+
     def __init__(self, dst, conn):
         super(CpuWatcher, self).__init__(dst, conn)
         self._pid = None
     def __init__(self, dst, conn):
         super(CpuWatcher, self).__init__(dst, conn)
         self._pid = None
index 396e6ee..3fdbad6 100644 (file)
@@ -26,6 +26,7 @@ def get_agent_dict(nodes):
 
 
 class PerfProvider(object):
 
 
 class PerfProvider(object):
+
     def __init__(self, flows_info, tool_info, tester_info):
         self._flows_info = flows_info
         self._tool_info = tool_info
     def __init__(self, flows_info, tool_info, tester_info):
         self._flows_info = flows_info
         self._tool_info = tool_info
@@ -33,7 +34,8 @@ class PerfProvider(object):
 
     def _islation(self):
         flows = self._flows_info["flows"]
 
     def _islation(self):
         flows = self._flows_info["flows"]
-        if flows == 2 and self._flows_info["senders"][0]["agent"] == self._flows_info["senders"][1]["agent"]:
+        if flows == 2 and self._flows_info["senders"][0][
+                "agent"] == self._flows_info["senders"][1]["agent"]:
             return True
         return False
 
             return True
         return False
 
@@ -53,8 +55,10 @@ class PerfProvider(object):
                 }
             }
             for i in range(flows):
                 }
             }
             for i in range(flows):
-                sender['params']['src'].append(self._flows_info["senders"][i]['dev'])
-                sender['params']['dst'].append(self._flows_info["receivers"][i]['dev'])
+                sender['params']['src'].append(
+                    self._flows_info["senders"][i]['dev'])
+                sender['params']['dst'].append(
+                    self._flows_info["receivers"][i]['dev'])
             result.append(sender)
         else:
             for i in range(flows):
             result.append(sender)
         else:
             for i in range(flows):
@@ -63,12 +67,12 @@ class PerfProvider(object):
                     "params": {
                         "protocol": protocol,
                         "namespace": None if "netmap" == tool else self._flows_info["senders"][i]['dev']['namespace'],
                     "params": {
                         "protocol": protocol,
                         "namespace": None if "netmap" == tool else self._flows_info["senders"][i]['dev']['namespace'],
-                        "src": [self._flows_info["senders"][i]['dev']],
-                        "dst": [self._flows_info["receivers"][i]['dev']],
+                        "src": [
+                            self._flows_info["senders"][i]['dev']],
+                        "dst": [
+                            self._flows_info["receivers"][i]['dev']],
                         "time": self._tool_info[tool]["time"],
                         "time": self._tool_info[tool]["time"],
-                        "threads": self._tool_info[tool]["threads"]
-                    }
-                }
+                        "threads": self._tool_info[tool]["threads"]}}
                 result.append(sender)
         return result
 
                 result.append(sender)
         return result
 
@@ -91,9 +95,8 @@ class PerfProvider(object):
                     "params": {
                         "namespace": None if "netmap" == tool else self._flows_info["receivers"][i]['dev']['namespace'],
                         "protocol": protocol,
                     "params": {
                         "namespace": None if "netmap" == tool else self._flows_info["receivers"][i]['dev']['namespace'],
                         "protocol": protocol,
-                        "dst": [self._flows_info["receivers"][i]['dev']]
-                    }
-                }
+                        "dst": [
+                            self._flows_info["receivers"][i]['dev']]}}
                 result.append(receiver)
         return result
 
                 result.append(receiver)
         return result
 
@@ -104,9 +107,10 @@ class PerfProvider(object):
                 "agent": watcher["agent"],
                 "params": {
                     "iface": watcher['dev']["iface"],
                 "agent": watcher["agent"],
                 "params": {
                     "iface": watcher['dev']["iface"],
-                    "namespace": None if tool in ["pktgen", "netmap"] else watcher['dev']["namespace"],
-                }
-            }
+                    "namespace": None if tool in [
+                        "pktgen",
+                        "netmap"] else watcher['dev']["namespace"],
+                }}
             result.append(node)
         return result
 
             result.append(node)
         return result
 
@@ -118,10 +122,12 @@ class PerfProvider(object):
                 "agent": watcher["agent"],
                 "params": {
                     "iface": watcher['dev']["iface"],
                 "agent": watcher["agent"],
                 "params": {
                     "iface": watcher['dev']["iface"],
-                    "namespace": watcher['dev']["namespace"] if tool not in ["pktgen", "netmap"] else None,
-                    "ip": watcher['dev']["ip"] + '/24',
-                }
-            }
+                    "namespace": watcher['dev']["namespace"] if tool not in [
+                        "pktgen",
+                        "netmap"] else None,
+                    "ip": watcher['dev']["ip"] +
+                    '/24',
+                }}
             result.append(node)
         return result
 
             result.append(node)
         return result
 
@@ -176,7 +182,10 @@ class PerfProvider(object):
 
 def unit_test():
     from vstf.common.log import setup_logging
 
 def unit_test():
     from vstf.common.log import setup_logging
-    setup_logging(level=logging.DEBUG, log_file="/var/log/vstf/vstf-perf-provider.log", clevel=logging.INFO)
+    setup_logging(
+        level=logging.DEBUG,
+        log_file="/var/log/vstf/vstf-perf-provider.log",
+        clevel=logging.INFO)
 
     from vstf.controller.settings.flows_settings import FlowsSettings
     from vstf.controller.settings.tool_settings import ToolSettings
 
     from vstf.controller.settings.flows_settings import FlowsSettings
     from vstf.controller.settings.tool_settings import ToolSettings
@@ -186,7 +195,10 @@ def unit_test():
     tool_settings = ToolSettings()
     tester_settings = TesterSettings()
 
     tool_settings = ToolSettings()
     tester_settings = TesterSettings()
 
-    provider = PerfProvider(flows_settings.settings, tool_settings.settings, tester_settings.settings)
+    provider = PerfProvider(
+        flows_settings.settings,
+        tool_settings.settings,
+        tester_settings.settings)
 
     tools = ['pktgen']
     protocols = ['udp_bw', 'udp_lat']
 
     tools = ['pktgen']
     protocols = ['udp_bw', 'udp_lat']
index 3fe91e9..7dc426f 100644 (file)
@@ -29,6 +29,7 @@ LOG = logging.getLogger(__name__)
 
 
 class Performance(object):
 
 
 class Performance(object):
+
     def __init__(self, conn, provider):
         self._provider = provider
         self._conn = conn
     def __init__(self, conn, provider):
         self._provider = provider
         self._conn = conn
@@ -221,7 +222,8 @@ class Performance(object):
                 lat_tool = "qperf"
                 lat_type = 'latency'
                 lat_tpro = protocol + '_lat'
                 lat_tool = "qperf"
                 lat_type = 'latency'
                 lat_tpro = protocol + '_lat'
-                self.run_latency_test(lat_tool, lat_tpro, size, ratep=realspeed)
+                self.run_latency_test(
+                    lat_tool, lat_tpro, size, ratep=realspeed)
                 lat_result = self.result(tool, lat_type)
                 LOG.info(bw_result)
                 LOG.info(lat_result)
                 lat_result = self.result(tool, lat_type)
                 LOG.info(bw_result)
                 LOG.info(lat_result)
@@ -272,23 +274,32 @@ class Performance(object):
                 record[mark.txMbps] += nic_data['txmB/s'] * 8
 
             if record[mark.rxMbps] > record[mark.txMbps]:
                 record[mark.txMbps] += nic_data['txmB/s'] * 8
 
             if record[mark.rxMbps] > record[mark.txMbps]:
-                record[mark.rxMbps], record[mark.txMbps] = record[mark.txMbps], record[mark.rxMbps]
+                record[
+                    mark.rxMbps], record[
+                    mark.txMbps] = record[
+                    mark.txMbps], record[
+                    mark.rxMbps]
 
             if record[mark.rxCount] > record[mark.txCount]:
 
             if record[mark.rxCount] > record[mark.txCount]:
-                record[mark.rxCount], record[mark.txCount] = record[mark.txCount], record[mark.rxCount]
+                record[
+                    mark.rxCount], record[
+                    mark.txCount] = record[
+                    mark.txCount], record[
+                    mark.rxCount]
 
             if record[mark.txCount]:
 
             if record[mark.txCount]:
-                record[mark.percentLoss] = round(100 * (1 - record[mark.rxCount] / record[mark.txCount]),
-                                                 cst.PKTLOSS_ROUND)
+                record[mark.percentLoss] = round(
+                    100 * (1 - record[mark.rxCount] / record[mark.txCount]), cst.PKTLOSS_ROUND)
             else:
                 record[mark.percentLoss] = 100
 
             record[mark.bandwidth] /= 1000000.0
             if cpu_mhz and record[mark.cpu]:
             else:
                 record[mark.percentLoss] = 100
 
             record[mark.bandwidth] /= 1000000.0
             if cpu_mhz and record[mark.cpu]:
-                record[mark.mppsGhz] = round(record[mark.bandwidth] / (record[mark.cpu] * cpu_mhz / 100000),
-                                             cst.CPU_USAGE_ROUND)
+                record[mark.mppsGhz] = round(
+                    record[mark.bandwidth] / (record[mark.cpu] * cpu_mhz / 100000), cst.CPU_USAGE_ROUND)
 
 
-            record[mark.bandwidth] = round(record[mark.bandwidth], cst.RATEP_ROUND)
+            record[mark.bandwidth] = round(
+                record[mark.bandwidth], cst.RATEP_ROUND)
 
         elif ttype in {'latency'}:
             record = {
 
         elif ttype in {'latency'}:
             record = {
@@ -319,7 +330,10 @@ class Performance(object):
 
 def unit_test():
     from vstf.common.log import setup_logging
 
 def unit_test():
     from vstf.common.log import setup_logging
-    setup_logging(level=logging.DEBUG, log_file="/var/log/vstf/vstf-sw_perf.log", clevel=logging.INFO)
+    setup_logging(
+        level=logging.DEBUG,
+        log_file="/var/log/vstf/vstf-sw_perf.log",
+        clevel=logging.INFO)
 
     conn = Server("192.168.188.10")
     perf_settings = PerfSettings()
 
     conn = Server("192.168.188.10")
     perf_settings = PerfSettings()
@@ -327,7 +341,10 @@ def unit_test():
     tool_settings = ToolSettings()
     tester_settings = TesterSettings()
     flow_producer = FlowsProducer(conn, flows_settings)
     tool_settings = ToolSettings()
     tester_settings = TesterSettings()
     flow_producer = FlowsProducer(conn, flows_settings)
-    provider = PerfProvider(flows_settings.settings, tool_settings.settings, tester_settings.settings)
+    provider = PerfProvider(
+        flows_settings.settings,
+        tool_settings.settings,
+        tester_settings.settings)
     perf = Performance(conn, provider)
     tests = perf_settings.settings
     for scenario, cases in tests.items():
     perf = Performance(conn, provider)
     tests = perf_settings.settings
     for scenario, cases in tests.items():
@@ -348,7 +365,10 @@ def unit_test():
 
 def main():
     from vstf.common.log import setup_logging
 
 def main():
     from vstf.common.log import setup_logging
-    setup_logging(level=logging.DEBUG, log_file="/var/log/vstf/vstf-performance.log", clevel=logging.INFO)
+    setup_logging(
+        level=logging.DEBUG,
+        log_file="/var/log/vstf/vstf-performance.log",
+        clevel=logging.INFO)
     from vstf.controller.database.dbinterface import DbManage
     parser = argparse.ArgumentParser(add_help=True)
     parser.add_argument("case",
     from vstf.controller.database.dbinterface import DbManage
     parser = argparse.ArgumentParser(add_help=True)
     parser.add_argument("case",
@@ -374,9 +394,10 @@ def main():
                         action="store",
                         default="64",
                         help='test size list "64 128"')
                         action="store",
                         default="64",
                         help='test size list "64 128"')
-    parser.add_argument("--affctl",
-                        action="store_true",
-                        help="when input '--affctl', the performance will do affctl before testing")
+    parser.add_argument(
+        "--affctl",
+        action="store_true",
+        help="when input '--affctl', the performance will do affctl before testing")
     parser.add_argument("--monitor",
                         dest="monitor",
                         default="localhost",
     parser.add_argument("--monitor",
                         dest="monitor",
                         default="localhost",
@@ -399,7 +420,10 @@ def main():
     tool_settings = ToolSettings()
     tester_settings = TesterSettings()
     flow_producer = FlowsProducer(conn, flows_settings)
     tool_settings = ToolSettings()
     tester_settings = TesterSettings()
     flow_producer = FlowsProducer(conn, flows_settings)
-    provider = PerfProvider(flows_settings.settings, tool_settings.settings, tester_settings.settings)
+    provider = PerfProvider(
+        flows_settings.settings,
+        tool_settings.settings,
+        tester_settings.settings)
     perf = Performance(conn, provider)
     scenario = db_mgr.query_scenario(casetag)
     flow_producer.create(scenario, casetag)
     perf = Performance(conn, provider)
     scenario = db_mgr.query_scenario(casetag)
     flow_producer.create(scenario, casetag)
index aefb863..828981d 100644 (file)
@@ -15,6 +15,7 @@ LOG = logging.getLogger(__name__)
 
 
 class RawDataProcess(object):
 
 
 class RawDataProcess(object):
+
     def __init__(self):
         pass
 
     def __init__(self):
         pass
 
@@ -24,7 +25,9 @@ class RawDataProcess(object):
         buf = ' '.join(buf)
         m = {}
         digits = re.compile(r"\d{1,}\.?\d*")
         buf = ' '.join(buf)
         m = {}
         digits = re.compile(r"\d{1,}\.?\d*")
-        units = re.compile(r"(?:gib|mib|kib|kbit/s|gbit/s|mbit/s|p/s)", re.IGNORECASE | re.MULTILINE)
+        units = re.compile(
+            r"(?:gib|mib|kib|kbit/s|gbit/s|mbit/s|p/s)",
+            re.IGNORECASE | re.MULTILINE)
         units_arr = units.findall(buf)
         LOG.debug(units_arr)
         digits_arr = digits.findall(buf)
         units_arr = units.findall(buf)
         LOG.debug(units_arr)
         digits_arr = digits.findall(buf)
@@ -96,9 +99,9 @@ class RawDataProcess(object):
             m = self.process_vnstat(data)
         if tool == 'sar' and data_type == 'cpu':
             m = self.process_sar_cpu(data)
             m = self.process_vnstat(data)
         if tool == 'sar' and data_type == 'cpu':
             m = self.process_sar_cpu(data)
-            if raw.has_key('cpu_num'):
+            if 'cpu_num' in raw:
                 m['cpu_num'] = raw['cpu_num']
                 m['cpu_num'] = raw['cpu_num']
-            if raw.has_key('cpu_mhz'):
+            if 'cpu_mhz' in raw:
                 m['cpu_mhz'] = raw['cpu_mhz']
         if tool == 'qperf':
             m = self.process_qperf(data)
                 m['cpu_mhz'] = raw['cpu_mhz']
         if tool == 'qperf':
             m = self.process_qperf(data)
@@ -121,7 +124,10 @@ if __name__ == '__main__':
     print p.process_vnstat(data)
 
     cmd = "sar -u 2"
     print p.process_vnstat(data)
 
     cmd = "sar -u 2"
-    child = subprocess.Popen(cmd.split(), stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+    child = subprocess.Popen(
+        cmd.split(),
+        stdout=subprocess.PIPE,
+        stderr=subprocess.PIPE)
     import time
     import os
     from signal import SIGINT
     import time
     import os
     from signal import SIGINT
index df7d24d..83b8d15 100644 (file)
@@ -6,4 +6,3 @@
 # which accompanies this distribution, and is available at
 # http://www.apache.org/licenses/LICENSE-2.0
 ##############################################################################
 # which accompanies this distribution, and is available at
 # http://www.apache.org/licenses/LICENSE-2.0
 ##############################################################################
-
index 6312efa..5131e8d 100644 (file)
@@ -18,4 +18,4 @@ source_repo = {
     "passwd": "root",
     "ip": "192.168.188.10",
     "user": "root"
     "passwd": "root",
     "ip": "192.168.188.10",
     "user": "root"
-}
\ No newline at end of file
+}
index 4c38973..b6e37ff 100644 (file)
@@ -28,4 +28,4 @@ class Test(unittest.TestCase):
 
 if __name__ == "__main__":
     #import sys;sys.argv = ['', 'Test.testName']
 
 if __name__ == "__main__":
     #import sys;sys.argv = ['', 'Test.testName']
-    unittest.main()
\ No newline at end of file
+    unittest.main()
index a08607b..a0cf2a3 100644 (file)
@@ -15,6 +15,7 @@ from vstf.controller.env_build.cfg_intent_parse import IntentParser
 
 
 class Test(model.Test):
 
 
 class Test(model.Test):
+
     def setUp(self):
         super(Test, self).setUp()
         self.dir = os.path.dirname(__file__)
     def setUp(self):
         super(Test, self).setUp()
         self.dir = os.path.dirname(__file__)
@@ -33,4 +34,4 @@ if __name__ == "__main__":
     import logging
 
     logging.basicConfig(level=logging.INFO)
     import logging
 
     logging.basicConfig(level=logging.INFO)
-    unittest.main()
\ No newline at end of file
+    unittest.main()
index 3e84d01..e8e9dd8 100644 (file)
@@ -15,31 +15,41 @@ from vstf.controller.unittest import model
 
 
 class TestCollect(model.Test):
 
 
 class TestCollect(model.Test):
-    
+
     def setUp(self):
         super(TestCollect, self).setUp()
         self.obj = env_collect.EnvCollectApi(self.conn)
     def setUp(self):
         super(TestCollect, self).setUp()
         self.obj = env_collect.EnvCollectApi(self.conn)
-        
+
     def test_collect_host_info(self):
     def test_collect_host_info(self):
-        ret_str = json.dumps(self.obj.collect_host_info(self.tester_host), indent = 4)
-        for key in ("CPU INFO","MEMORY INFO","HW_INFO","OS INFO"):
-            self.assertTrue(key in ret_str, "collect_host_info failed, ret_str = %s" % ret_str)
-            
+        ret_str = json.dumps(
+            self.obj.collect_host_info(
+                self.tester_host), indent=4)
+        for key in ("CPU INFO", "MEMORY INFO", "HW_INFO", "OS INFO"):
+            self.assertTrue(
+                key in ret_str,
+                "collect_host_info failed, ret_str = %s" %
+                ret_str)
+
     def test_list_nic_devices(self):
     def test_list_nic_devices(self):
-        ret_str = json.dumps(self.obj.list_nic_devices(self.tester_host), indent = 4)
-        for key in ("device","mac","bdf","desc"):
-            self.assertTrue(key in ret_str, "list_nic_devices failed, ret_str = %s" % ret_str)
+        ret_str = json.dumps(
+            self.obj.list_nic_devices(
+                self.tester_host), indent=4)
+        for key in ("device", "mac", "bdf", "desc"):
+            self.assertTrue(
+                key in ret_str,
+                "list_nic_devices failed, ret_str = %s" %
+                ret_str)
         print ret_str
         print ret_str
-    
+
     def test_get_device_detail(self):
         identity = "01:00.0"
         ret = self.obj.get_device_detail(self.tester_host, "01:00.0")
     def test_get_device_detail(self):
         identity = "01:00.0"
         ret = self.obj.get_device_detail(self.tester_host, "01:00.0")
-        for key in ("device","mac","bdf","desc"):
+        for key in ("device", "mac", "bdf", "desc"):
             self.assertTrue(key in ret)
         self.assertTrue(ret['bdf'] == identity)
 
 
 if __name__ == "__main__":
     import logging
             self.assertTrue(key in ret)
         self.assertTrue(ret['bdf'] == identity)
 
 
 if __name__ == "__main__":
     import logging
-    logging.basicConfig(level = logging.INFO)
-    unittest.main()
\ No newline at end of file
+    logging.basicConfig(level=logging.INFO)
+    unittest.main()
index 8d45c7b..a8b1b01 100644 (file)
@@ -14,19 +14,22 @@ from vstf.controller.functiontest.driver.drivertest import config_setup
 from vstf.controller.unittest import model
 
 
 from vstf.controller.unittest import model
 
 
-class TestDriverFunction(model.Test):   
+class TestDriverFunction(model.Test):
+
     def setUp(self):
         logging.info("start driver function test unit test.")
     def setUp(self):
         logging.info("start driver function test unit test.")
-        
+
     def test_config_setup(self):
     def test_config_setup(self):
-        config ,_ = config_setup()
-        for key in ("test_scene","bond_flag","switch_module"):
-            self.assertTrue(key in config.keys(), "config_setup function failure.")
+        config, _ = config_setup()
+        for key in ("test_scene", "bond_flag", "switch_module"):
+            self.assertTrue(
+                key in config.keys(),
+                "config_setup function failure.")
 
     def teardown(self):
         logging.info("stop driver function test unit test.")
 
 if __name__ == "__main__":
     import logging
 
     def teardown(self):
         logging.info("stop driver function test unit test.")
 
 if __name__ == "__main__":
     import logging
-    logging.basicConfig(level = logging.INFO)
-    unittest.main()
\ No newline at end of file
+    logging.basicConfig(level=logging.INFO)
+    unittest.main()
index e4529e4..5f9d047 100644 (file)
@@ -15,45 +15,46 @@ from vstf.controller.env_build import env_build
 
 
 class TestEnvBuilder(model.Test):
 
 
 class TestEnvBuilder(model.Test):
+
     def setUp(self):
         super(TestEnvBuilder, self).setUp()
         self.dir = os.path.dirname(__file__)
     def setUp(self):
         super(TestEnvBuilder, self).setUp()
         self.dir = os.path.dirname(__file__)
-        
+
     @unittest.skip('for now')
     def test_build_tn(self):
     @unittest.skip('for now')
     def test_build_tn(self):
-        filepath = os.path.join(self.dir,'../../../etc/vstf/env/Tn.json')
+        filepath = os.path.join(self.dir, '../../../etc/vstf/env/Tn.json')
         self.mgr = env_build.EnvBuildApi(self.conn, filepath)
         ret = self.mgr.build()
         self.assertTrue(ret, "build_tn failed,ret = %s" % ret)
         self.mgr = env_build.EnvBuildApi(self.conn, filepath)
         ret = self.mgr.build()
         self.assertTrue(ret, "build_tn failed,ret = %s" % ret)
-        
+
     @unittest.skip('for now')
     def test_build_tn1v(self):
     @unittest.skip('for now')
     def test_build_tn1v(self):
-        filepath = os.path.join(self.dir,'../../../etc/vstf/env/Tnv.json')
+        filepath = os.path.join(self.dir, '../../../etc/vstf/env/Tnv.json')
         self.mgr = env_build.EnvBuildApi(self.conn, filepath)
         ret = self.mgr.build()
         self.assertTrue(ret, "build_tn1v failed,ret = %s" % ret)
         self.mgr = env_build.EnvBuildApi(self.conn, filepath)
         ret = self.mgr.build()
         self.assertTrue(ret, "build_tn1v failed,ret = %s" % ret)
-        
+
     @unittest.skip('for now')
     def test_build_ti(self):
     @unittest.skip('for now')
     def test_build_ti(self):
-        filepath = os.path.join(self.dir,'../../../etc/vstf/env/Ti.json')
+        filepath = os.path.join(self.dir, '../../../etc/vstf/env/Ti.json')
         self.mgr = env_build.EnvBuildApi(self.conn, filepath)
         ret = self.mgr.build()
         self.assertTrue(ret, "build_ti failed,ret = %s" % ret)
         self.mgr = env_build.EnvBuildApi(self.conn, filepath)
         ret = self.mgr.build()
         self.assertTrue(ret, "build_ti failed,ret = %s" % ret)
-        
+
     @unittest.skip('for now')
     def test_build_tu(self):
     @unittest.skip('for now')
     def test_build_tu(self):
-        filepath = os.path.join(self.dir,'../../../etc/vstf/env/Tu.json')
+        filepath = os.path.join(self.dir, '../../../etc/vstf/env/Tu.json')
         self.mgr = env_build.EnvBuildApi(self.conn, filepath)
         ret = self.mgr.build()
         self.assertTrue(ret, "build_tu failed,ret = %s" % ret)
         self.mgr = env_build.EnvBuildApi(self.conn, filepath)
         ret = self.mgr.build()
         self.assertTrue(ret, "build_tu failed,ret = %s" % ret)
-    
+
     def test_build_tu_bridge(self):
     def test_build_tu_bridge(self):
-        filepath = os.path.join(self.dir,'../../../etc/vstf/env/Tu_br.json')
+        filepath = os.path.join(self.dir, '../../../etc/vstf/env/Tu_br.json')
         self.mgr = env_build.EnvBuildApi(self.conn, filepath)
         ret = self.mgr.build()
         self.assertTrue(ret, "build_tu failed,ret = %s" % ret)
         self.mgr = env_build.EnvBuildApi(self.conn, filepath)
         ret = self.mgr.build()
         self.assertTrue(ret, "build_tu failed,ret = %s" % ret)
-           
+
 if __name__ == "__main__":
     import logging
 if __name__ == "__main__":
     import logging
-    logging.basicConfig(level = logging.INFO)
-    unittest.main()
\ No newline at end of file
+    logging.basicConfig(level=logging.INFO)
+    unittest.main()
index 0258ab6..4e2a2ea 100644 (file)
@@ -27,7 +27,7 @@ LOG = logging.getLogger(__name__)
 
 
 class TestPerf(model.Test):
 
 
 class TestPerf(model.Test):
-    
+
     def setUp(self):
         LOG.info("start performance unit test.")
         super(TestPerf, self).setUp()
     def setUp(self):
         LOG.info("start performance unit test.")
         super(TestPerf, self).setUp()
@@ -48,7 +48,10 @@ class TestPerf(model.Test):
         tool_settings = ToolSettings(path=self.base_path)
         tester_settings = TesterSettings(path=self.base_path)
         flow_producer = FlowsProducer(self.conn, flows_settings)
         tool_settings = ToolSettings(path=self.base_path)
         tester_settings = TesterSettings(path=self.base_path)
         flow_producer = FlowsProducer(self.conn, flows_settings)
-        provider = PerfProvider(flows_settings.settings, tool_settings.settings, tester_settings.settings)
+        provider = PerfProvider(
+            flows_settings.settings,
+            tool_settings.settings,
+            tester_settings.settings)
         perf = pf.Performance(self.conn, provider)
         tests = perf_settings.settings
         for scenario, cases in tests.items():
         perf = pf.Performance(self.conn, provider)
         tests = perf_settings.settings
         for scenario, cases in tests.items():
@@ -120,5 +123,8 @@ class TestPerf(model.Test):
 
 
 if __name__ == "__main__":
 
 
 if __name__ == "__main__":
-    setup_logging(level=logging.INFO, log_file="/var/log/vstf/vstf-unit-test.log", clevel=logging.INFO)
-    unittest.main()
\ No newline at end of file
+    setup_logging(
+        level=logging.INFO,
+        log_file="/var/log/vstf/vstf-unit-test.log",
+        clevel=logging.INFO)
+    unittest.main()
index f240355..c9aadb0 100644 (file)
@@ -21,16 +21,14 @@ class Test(model.Test):
         self.user = self.source_repo["user"]
         self.passwd = self.source_repo["passwd"]
 
         self.user = self.source_repo["user"]
         self.passwd = self.source_repo["passwd"]
 
-
     def tearDown(self):
         super(Test, self).tearDown()
 
     def tearDown(self):
         super(Test, self).tearDown()
 
-
     def test_run_cmd(self):
         ssh.run_cmd(self.host, self.user, self.passwd, 'ls')
 
 
 if __name__ == "__main__":
     import logging
     def test_run_cmd(self):
         ssh.run_cmd(self.host, self.user, self.passwd, 'ls')
 
 
 if __name__ == "__main__":
     import logging
-    logging.basicConfig(level = logging.INFO)
-    unittest.main()
\ No newline at end of file
+    logging.basicConfig(level=logging.INFO)
+    unittest.main()
index 86641e7..1546b47 100644 (file)
@@ -36,7 +36,8 @@ def make_msg(method, **kwargs):
     return {"method": method, "args": kwargs}
 
 
     return {"method": method, "args": kwargs}
 
 
-@cliutil.arg("--host", dest="host", default="", action="store", help="list nic devices of specified host")
+@cliutil.arg("--host", dest="host", default="", action="store",
+             help="list nic devices of specified host")
 def do_list_devs(args):
     """List the host's all netdev."""
     ret = call(make_msg("list_devs", host=args.host))
 def do_list_devs(args):
     """List the host's all netdev."""
     ret = call(make_msg("list_devs", host=args.host))
@@ -51,19 +52,44 @@ def do_src_install(args):
     """work agent to pull source code and compile.
     use git as underlying mechanism, please make sure the host has access to git repo.
     """
     """work agent to pull source code and compile.
     use git as underlying mechanism, please make sure the host has access to git repo.
     """
-    ret = call(make_msg("src_install", host=args.host, config_file=args.config_file))
+    ret = call(
+        make_msg(
+            "src_install",
+            host=args.host,
+            config_file=args.config_file))
     print_stdout(ret)
 
 
     print_stdout(ret)
 
 
-@cliutil.arg("--host", dest="host", action="store", default=None,
-             help="which host to build, must exists in your config file, use default[None] value to build all hosts.")
-@cliutil.arg("--model", dest="model", action="store", choices=('Tn', 'Ti', 'Tu', 'Tnv'),
-             help="which model to build, if specified, the according config file /etc/vstf/env/{model}.json must exist.")
-@cliutil.arg("--config_file", dest="config_file", action="store", default=None,
-             help="if specified, the config file will replace the default config file from /etc/vstf/env.")
+@cliutil.arg(
+    "--host",
+    dest="host",
+    action="store",
+    default=None,
+    help="which host to build, must exists in your config file, use default[None] value to build all hosts.")
+@cliutil.arg(
+    "--model",
+    dest="model",
+    action="store",
+    choices=(
+        'Tn',
+        'Ti',
+        'Tu',
+        'Tnv'),
+    help="which model to build, if specified, the according config file /etc/vstf/env/{model}.json must exist.")
+@cliutil.arg(
+    "--config_file",
+    dest="config_file",
+    action="store",
+    default=None,
+    help="if specified, the config file will replace the default config file from /etc/vstf/env.")
 def do_apply_model(args):
     """Apply model to the host."""
 def do_apply_model(args):
     """Apply model to the host."""
-    ret = call(make_msg("apply_model", host=args.host, model=args.model, config_file=args.config_file))
+    ret = call(
+        make_msg(
+            "apply_model",
+            host=args.host,
+            model=args.model,
+            config_file=args.config_file))
     print_stdout(ret)
 
 
     print_stdout(ret)
 
 
@@ -73,7 +99,11 @@ def do_apply_model(args):
              help="configuration file for image creation.")
 def do_create_images(args):
     """create images on host, images are configed by configuration file."""
              help="configuration file for image creation.")
 def do_create_images(args):
     """create images on host, images are configed by configuration file."""
-    ret = call(make_msg("create_images", host=args.host, config_file=args.config_file))
+    ret = call(
+        make_msg(
+            "create_images",
+            host=args.host,
+            config_file=args.config_file))
     print_stdout(ret)
 
 
     print_stdout(ret)
 
 
@@ -83,23 +113,49 @@ def do_create_images(args):
              help="configuration file for images.")
 def do_clean_images(args):
     """clean images on host, images are configed by configuration file."""
              help="configuration file for images.")
 def do_clean_images(args):
     """clean images on host, images are configed by configuration file."""
-    ret = call(make_msg("clean_images", host=args.host, config_file=args.config_file))
+    ret = call(
+        make_msg(
+            "clean_images",
+            host=args.host,
+            config_file=args.config_file))
     print_stdout(ret)
 
 
     print_stdout(ret)
 
 
-@cliutil.arg("--host", dest="host", action="store", default=None,
-             help="which host to clean, must exists in your config file, use default[None] value to clean all hosts.")
-@cliutil.arg("--model", dest="model", action="store", choices=('Tn', 'Ti', 'Tu', 'Tnv'),
-             help="if specified, the according config file /etc/vstf/env/{model}.json must exist.")
-@cliutil.arg("--config_file", dest="config_file", action="store", default=None,
-             help="if specified, the config file will replace the default config file from /etc/vstf/env.")
+@cliutil.arg(
+    "--host",
+    dest="host",
+    action="store",
+    default=None,
+    help="which host to clean, must exists in your config file, use default[None] value to clean all hosts.")
+@cliutil.arg(
+    "--model",
+    dest="model",
+    action="store",
+    choices=(
+        'Tn',
+        'Ti',
+        'Tu',
+        'Tnv'),
+    help="if specified, the according config file /etc/vstf/env/{model}.json must exist.")
+@cliutil.arg(
+    "--config_file",
+    dest="config_file",
+    action="store",
+    default=None,
+    help="if specified, the config file will replace the default config file from /etc/vstf/env.")
 def do_disapply_model(args):
     """Apply model to the host."""
 def do_disapply_model(args):
     """Apply model to the host."""
-    ret = call(make_msg("disapply_model", host=args.host, model=args.model, config_file=args.config_file))
+    ret = call(
+        make_msg(
+            "disapply_model",
+            host=args.host,
+            model=args.model,
+            config_file=args.config_file))
     print_stdout(ret)
 
 
     print_stdout(ret)
 
 
-@cliutil.arg("--host", dest="host", action="store", help="collect host information about cpu/mem etc")
+@cliutil.arg("--host", dest="host", action="store",
+             help="collect host information about cpu/mem etc")
 def do_collect_host_info(args):
     """Show the host's CPU/MEN info"""
     ret = call(make_msg("collect_host_info", target=args.host))
 def do_collect_host_info(args):
     """Show the host's CPU/MEN info"""
     ret = call(make_msg("collect_host_info", target=args.host))
@@ -113,12 +169,22 @@ def do_show_tasks(args):
     print_stdout(ret)
 
 
     print_stdout(ret)
 
 
-@cliutil.arg("case", action="store", help="test case like Ti-1, Tn-1, Tnv-1, Tu-1, see case definition in documents")
+@cliutil.arg(
+    "case",
+    action="store",
+    help="test case like Ti-1, Tn-1, Tnv-1, Tu-1, see case definition in documents")
 @cliutil.arg("tool", action="store", choices=cst.TOOLS)
 @cliutil.arg("protocol", action="store", choices=cst.TPROTOCOLS)
 @cliutil.arg("type", action="store", choices=cst.TTYPES)
 @cliutil.arg("tool", action="store", choices=cst.TOOLS)
 @cliutil.arg("protocol", action="store", choices=cst.TPROTOCOLS)
 @cliutil.arg("type", action="store", choices=cst.TTYPES)
-@cliutil.arg("sizes", action="store", default="64", help='test size list "64 128"')
-@cliutil.arg("--affctl", action="store_true", help="when affctl is True, it will do affctl before testing")
+@cliutil.arg(
+    "sizes",
+    action="store",
+    default="64",
+    help='test size list "64 128"')
+@cliutil.arg(
+    "--affctl",
+    action="store_true",
+    help="when affctl is True, it will do affctl before testing")
 def do_perf_test(args):
     """Runs a quick single software performance test without envbuild and generating reports.
     Outputs the result to the stdout immediately."""
 def do_perf_test(args):
     """Runs a quick single software performance test without envbuild and generating reports.
     Outputs the result to the stdout immediately."""
@@ -270,7 +336,10 @@ def main():
     args = parser.parse_args()
     if args.func is None:
         sys.exit(-1)
     args = parser.parse_args()
     if args.func is None:
         sys.exit(-1)
-    setup_logging(level=logging.DEBUG, log_file="/var/log/vstf/vstf-adm.log", clevel=logging.INFO)
+    setup_logging(
+        level=logging.DEBUG,
+        log_file="/var/log/vstf/vstf-adm.log",
+        clevel=logging.INFO)
     # connect to manage
     global CONN
     try:
     # connect to manage
     global CONN
     try:
index 547db68..83b8d15 100644 (file)
@@ -6,5 +6,3 @@
 # which accompanies this distribution, and is available at
 # http://www.apache.org/licenses/LICENSE-2.0
 ##############################################################################
 # which accompanies this distribution, and is available at
 # http://www.apache.org/licenses/LICENSE-2.0
 ##############################################################################
-
-
index fb54e5d..049b86f 100644 (file)
@@ -57,7 +57,8 @@ class VstfConsumer(object):
         self.srv = host
         self.port = port
         self.agent_id = agent_id
         self.srv = host
         self.port = port
         self.agent_id = agent_id
-        self.url = 'amqp://' + self.user + ':' + self.passwd + '@' + self.srv + ':' + self.port + '/%2F'
+        self.url = 'amqp://' + self.user + ':' + self.passwd + \
+            '@' + self.srv + ':' + self.port + '/%2F'
 
         # load the agent_funcs
         try:
 
         # load the agent_funcs
         try:
@@ -122,8 +123,10 @@ class VstfConsumer(object):
         if self._closing:
             self._connection.ioloop.stop()
         else:
         if self._closing:
             self._connection.ioloop.stop()
         else:
-            LOGGER.warning('Connection closed, reopening in 2 seconds: (%s) %s',
-                           reply_code, reply_text)
+            LOGGER.warning(
+                'Connection closed, reopening in 2 seconds: (%s) %s',
+                reply_code,
+                reply_text)
             self._connection.add_timeout(2, self.reconnect)
 
     def reconnect(self):
             self._connection.add_timeout(2, self.reconnect)
 
     def reconnect(self):
@@ -206,7 +209,10 @@ class VstfConsumer(object):
         :param str|unicode exchange_name: The name of the exchange to declare
 
         """
         :param str|unicode exchange_name: The name of the exchange to declare
 
         """
-        LOGGER.info('Declaring %s exchange %s', constant.DIRECT, constant.exchange_d)
+        LOGGER.info(
+            'Declaring %s exchange %s',
+            constant.DIRECT,
+            constant.exchange_d)
         self._channel.exchange_declare(self.on_direct_exchange_declareok,
                                        constant.exchange_d,
                                        constant.DIRECT)
         self._channel.exchange_declare(self.on_direct_exchange_declareok,
                                        constant.exchange_d,
                                        constant.DIRECT)
@@ -342,14 +348,15 @@ class VstfConsumer(object):
                                       'args': e.args}}
         finally:
             response = message.add_context(response, **head)
                                       'args': e.args}}
         finally:
             response = message.add_context(response, **head)
-            LOGGER.debug("response the msg: head:%(h)s, body:%(b)s",
-                         {'h': response.get('head'), 'b': response.get('body')})
-
-            respone_chanl.basic_publish(exchange=constant.exchange_d,
-                                        routing_key=properties.reply_to,
-                                        properties=pika.BasicProperties(correlation_id=properties.correlation_id),
-                                        body=message.encode(response)
-                                        )
+            LOGGER.debug("response the msg: head:%(h)s, body:%(b)s", {
+                         'h': response.get('head'), 'b': response.get('body')})
+
+            respone_chanl.basic_publish(
+                exchange=constant.exchange_d,
+                routing_key=properties.reply_to,
+                properties=pika.BasicProperties(
+                    correlation_id=properties.correlation_id),
+                body=message.encode(response))
             # no matter what happend, tell the mq-server to drop this msg.
 
         self.acknowledge_message(basic_deliver.delivery_tag)
             # no matter what happend, tell the mq-server to drop this msg.
 
         self.acknowledge_message(basic_deliver.delivery_tag)
index abf2a7f..cb72b45 100644 (file)
@@ -22,6 +22,7 @@ LOG = logging.getLogger(__name__)
 
 
 class RpcProxy(object):
 
 
 class RpcProxy(object):
+
     def __init__(self, host,
                  user='guest',
                  passwd='guest',
     def __init__(self, host,
                  user='guest',
                  passwd='guest',
@@ -39,7 +40,8 @@ class RpcProxy(object):
         self.passwd = passwd
         self.srv = host
         self.port = port
         self.passwd = passwd
         self.srv = host
         self.port = port
-        self.url = 'amqp://' + self.user + ':' + self.passwd + '@' + self.srv + ':' + self.port + '/%2F'
+        self.url = 'amqp://' + self.user + ':' + self.passwd + \
+            '@' + self.srv + ':' + self.port + '/%2F'
         try:
             self.connect(host, self.setup_vstf_producer)
         except Exception as e:
         try:
             self.connect(host, self.setup_vstf_producer)
         except Exception as e:
@@ -51,13 +53,14 @@ class RpcProxy(object):
 
     def connect(self, host, ok_callback):
         """Create a Blocking connection to the rabbitmq-server
 
     def connect(self, host, ok_callback):
         """Create a Blocking connection to the rabbitmq-server
-        
+
         :param str    host: the rabbitmq-server's host
         :param obj    ok_callback: if connect success than do this function
         :param str    host: the rabbitmq-server's host
         :param obj    ok_callback: if connect success than do this function
-        
+
         """
         LOG.info("Connect to the server %s", host)
         """
         LOG.info("Connect to the server %s", host)
-        self._connection = pika.BlockingConnection(pika.URLParameters(self.url))
+        self._connection = pika.BlockingConnection(
+            pika.URLParameters(self.url))
         if self._connection:
             ok_callback()
 
         if self._connection:
             ok_callback()
 
@@ -80,7 +83,9 @@ class RpcProxy(object):
         LOG.info("Declare queue %s and bind it to exchange %s",
                  self._queue, constant.exchange_d)
         self._channel.queue_declare(queue=self._queue, exclusive=True)
         LOG.info("Declare queue %s and bind it to exchange %s",
                  self._queue, constant.exchange_d)
         self._channel.queue_declare(queue=self._queue, exclusive=True)
-        self._channel.queue_bind(exchange=constant.exchange_d, queue=self._queue)
+        self._channel.queue_bind(
+            exchange=constant.exchange_d,
+            queue=self._queue)
 
     def start_consumer(self):
         LOG.info("Start response consumer")
 
     def start_consumer(self):
         LOG.info("Start response consumer")
@@ -121,8 +126,8 @@ class RpcProxy(object):
         self.response = None
         if self.corr_id == props.correlation_id:
             self.response = json.loads(body)
         self.response = None
         if self.corr_id == props.correlation_id:
             self.response = json.loads(body)
-            LOG.debug("Proxy producer reciver the msg: head:%(h)s, body:%(b)s",
-                      {'h': self.response.get('head'), 'b': self.response.get('body')})
+            LOG.debug("Proxy producer reciver the msg: head:%(h)s, body:%(b)s", {
+                      'h': self.response.get('head'), 'b': self.response.get('body')})
         else:
             LOG.warn("Proxy producer Drop the msg "
                      "because of the wrong correlation id, %s\n" % body)
         else:
             LOG.warn("Proxy producer Drop the msg "
                      "because of the wrong correlation id, %s\n" % body)
@@ -130,8 +135,11 @@ class RpcProxy(object):
     def publish(self, target, corrid, body):
         properties = pika.BasicProperties(reply_to=self._queue,
                                           correlation_id=corrid)
     def publish(self, target, corrid, body):
         properties = pika.BasicProperties(reply_to=self._queue,
                                           correlation_id=corrid)
-        LOG.debug("start to publish message to the exchange=%s, target=%s, msg=%s"
-                  , constant.exchange_d, target, body)
+        LOG.debug(
+            "start to publish message to the exchange=%s, target=%s, msg=%s",
+            constant.exchange_d,
+            target,
+            body)
         return self._channel.basic_publish(exchange=constant.exchange_d,
                                            routing_key=target,
                                            mandatory=True,
         return self._channel.basic_publish(exchange=constant.exchange_d,
                                            routing_key=target,
                                            mandatory=True,
@@ -149,7 +157,7 @@ class RpcProxy(object):
         queue = constant.queue_common + target
         # the msg request and respone must be match by corr_id
         self.corr_id = str(uuid.uuid4())
         queue = constant.queue_common + target
         # the msg request and respone must be match by corr_id
         self.corr_id = str(uuid.uuid4())
-        # same msg format 
+        # same msg format
         msg = message.add_context(msg, corrid=self.corr_id)
 
         # send msg to the queue
         msg = message.add_context(msg, corrid=self.corr_id)
 
         # send msg to the queue
@@ -182,7 +190,7 @@ class RpcProxy(object):
         # deal with exceptions
         if msg_body \
                 and isinstance(msg_body, dict) \
         # deal with exceptions
         if msg_body \
                 and isinstance(msg_body, dict) \
-                and msg_body.has_key('exception'):
+                and 'exception' in msg_body:
             ename = str(msg_body['exception'].get('name'))
             if hasattr(exceptions, ename):
                 e = getattr(exceptions, ename)()
             ename = str(msg_body['exception'].get('name'))
             if hasattr(exceptions, ename):
                 e = getattr(exceptions, ename)()
@@ -199,6 +207,7 @@ class RpcProxy(object):
 
 
 class Server(object):
 
 
 class Server(object):
+
     def __init__(self, host=None,
                  user='guest',
                  passwd='guest',
     def __init__(self, host=None,
                  user='guest',
                  passwd='guest',
@@ -206,7 +215,8 @@ class Server(object):
         super(Server, self).__init__()
         # Default use salt's master ip as rabbit rpc server ip
         if host is None:
         super(Server, self).__init__()
         # Default use salt's master ip as rabbit rpc server ip
         if host is None:
-            raise Exception("Can not create rpc proxy because of the None rabbitmq server address.")
+            raise Exception(
+                "Can not create rpc proxy because of the None rabbitmq server address.")
 
         self.host = host
         self.port = port
 
         self.host = host
         self.port = port
index d4b2ccf..e71a1e8 100644 (file)
@@ -13,6 +13,7 @@ import sys
 from rubbos_collector import RubbosCollector
 from uploader import Uploader
 
 from rubbos_collector import RubbosCollector
 from uploader import Uploader
 
+
 def printUsage():
     print "Usage: python process_data.py required_params(**) optional_params([])"
     print "       ** -i|--input   input_data_dir"
 def printUsage():
     print "Usage: python process_data.py required_params(**) optional_params([])"
     print "       ** -i|--input   input_data_dir"
@@ -21,12 +22,14 @@ def printUsage():
     print "       [] -o|--output  output_file"
     print "       [] -u|--upload  yes|no"
 
     print "       [] -o|--output  output_file"
     print "       [] -u|--upload  yes|no"
 
+
 def process(input_dir, suite_name):
     result = dict()
     if suite_name == "rubbos":
         result = RubbosCollector().collect_data(input_dir)
     return result
 
 def process(input_dir, suite_name):
     result = dict()
     if suite_name == "rubbos":
         result = RubbosCollector().collect_data(input_dir)
     return result
 
+
 def writeResult(output_file, result):
     f = open(output_file, "w")
     if isinstance(result, list):
 def writeResult(output_file, result):
     f = open(output_file, "w")
     if isinstance(result, list):
@@ -34,41 +37,43 @@ def writeResult(output_file, result):
             f.write(str(elem) + "\n")
     f.close()
 
             f.write(str(elem) + "\n")
     f.close()
 
+
 def uploadResult(conf, suite_name, result):
     Uploader(conf).upload_result(suite_name, result)
 
 def uploadResult(conf, suite_name, result):
     Uploader(conf).upload_result(suite_name, result)
 
+
 def main():
     if len(sys.argv) < 7 or len(sys.argv) % 2 == 0:
         printUsage()
 def main():
     if len(sys.argv) < 7 or len(sys.argv) % 2 == 0:
         printUsage()
-        exit (1)
+        exit(1)
     i = 1
     params = dict()
     while (i < len(sys.argv)):
     i = 1
     params = dict()
     while (i < len(sys.argv)):
-        if sys.argv[i]=="-i" or sys.argv[i]=="--input":
-            params["input"] = sys.argv[i+1]
-        if sys.argv[i]=="-s" or sys.argv[i]=="--suite":
-            params["suite"] = sys.argv[i+1]
-        if sys.argv[i]=="-c" or sys.argv[i]=="--conf":
-            params["conf"] = sys.argv[i+1]
-        if sys.argv[i]=="-o" or sys.argv[i]=="--output":
-            params["output"] = sys.argv[i+1]
-        if sys.argv[i]=="-u" or sys.argv[i]=="--upload":
-            params["upload"] = sys.argv[i+1]
-        i = i+2
-    if not(params.has_key("input") and params.has_key("suite") and params.has_key("conf")):
+        if sys.argv[i] == "-i" or sys.argv[i] == "--input":
+            params["input"] = sys.argv[i + 1]
+        if sys.argv[i] == "-s" or sys.argv[i] == "--suite":
+            params["suite"] = sys.argv[i + 1]
+        if sys.argv[i] == "-c" or sys.argv[i] == "--conf":
+            params["conf"] = sys.argv[i + 1]
+        if sys.argv[i] == "-o" or sys.argv[i] == "--output":
+            params["output"] = sys.argv[i + 1]
+        if sys.argv[i] == "-u" or sys.argv[i] == "--upload":
+            params["upload"] = sys.argv[i + 1]
+        i = i + 2
+    if not("input" in params and "suite" in params and "conf" in params):
         print "Lack some required parameters."
         print "Lack some required parameters."
-        exit (1)
+        exit(1)
 
     result = process(params["input"], params["suite"])
     print "Results:"
     for elem in result:
         print elem
 
 
     result = process(params["input"], params["suite"])
     print "Results:"
     for elem in result:
         print elem
 
-    if params.has_key("output"):
-        writeResult(params["output"],result)
+    if "output" in params:
+        writeResult(params["output"], result)
 
 
-    if params.has_key("upload") and params["upload"].lower()=="yes":
+    if "upload" in params and params["upload"].lower() == "yes":
         uploadResult(params["conf"], params["suite"], result)
 
         uploadResult(params["conf"], params["suite"], result)
 
-if __name__=="__main__":
+if __name__ == "__main__":
     main()
     main()
index c985173..d9f8603 100755 (executable)
 
 import subprocess as subp
 
 
 import subprocess as subp
 
+
 def exec_shell(cmd):
 def exec_shell(cmd):
-    out,err = subp.Popen(cmd, stdout=subp.PIPE, shell=True).communicate()
+    out, err = subp.Popen(cmd, stdout=subp.PIPE, shell=True).communicate()
     return out.strip()
 
     return out.strip()
 
+
 def get_onetime_data(dir_name):
     cmd = "grep -in 'remote client nodes' %s/index.html|awk '{print $5}'|awk -F '<' '{print $1}'" % dir_name
     client_node_num = int(exec_shell(cmd))
     cmd = "grep -n 'Number of clients' %s/index.html|awk '{print $5}'|awk -F '<' '{print $1}'" % dir_name
     each_client_num = int(exec_shell(cmd))
 def get_onetime_data(dir_name):
     cmd = "grep -in 'remote client nodes' %s/index.html|awk '{print $5}'|awk -F '<' '{print $1}'" % dir_name
     client_node_num = int(exec_shell(cmd))
     cmd = "grep -n 'Number of clients' %s/index.html|awk '{print $5}'|awk -F '<' '{print $1}'" % dir_name
     each_client_num = int(exec_shell(cmd))
-    total_client = (client_node_num+1) * each_client_num
+    total_client = (client_node_num + 1) * each_client_num
 
     cmd = 'grep -n "throughput" %s/stat_client*.html |awk -F "<B>" \'{if (FNR%%2==0 && FNR%%4!=0) {printf "%%s\\n", $3}}\'|awk \'BEGIN{sum=0;}{sum=sum+$1;}END{print sum}\'' % dir_name
     throughput = int(exec_shell(cmd))
 
     cmd = 'grep -n "throughput" %s/stat_client*.html |awk -F "<B>" \'{if (FNR%%2==0 && FNR%%4!=0) {printf "%%s\\n", $3}}\'|awk \'BEGIN{sum=0;}{sum=sum+$1;}END{print sum}\'' % dir_name
     throughput = int(exec_shell(cmd))
@@ -39,11 +41,15 @@ class RubbosCollector(object):
         pass
 
     def collect_data(self, data_home):
         pass
 
     def collect_data(self, data_home):
-        cmd =  'ls -l %s |grep ^d|awk \'{print $9}\'' % data_home
+        cmd = 'ls -l %s |grep ^d|awk \'{print $9}\'' % data_home
         result = []
         for subdir in exec_shell(cmd).split('\n'):
         result = []
         for subdir in exec_shell(cmd).split('\n'):
-            total_client, throughput, request, error_request = get_onetime_data(data_home+'/'+subdir)
-            result.append({'client':total_client, 'throughput':throughput, 'request':request, 'error_request':error_request})
-        result.sort(key=lambda x:x['client'])
-
-        return result;
+            total_client, throughput, request, error_request = get_onetime_data(
+                data_home + '/' + subdir)
+            result.append({'client': total_client,
+                           'throughput': throughput,
+                           'request': request,
+                           'error_request': error_request})
+        result.sort(key=lambda x: x['client'])
+
+        return result
index 07862fe..a468656 100755 (executable)
@@ -29,7 +29,6 @@ class Uploader(object):
         self.result['version'] = dashboard_conf['version']
         self.target = dashboard_conf['target']
 
         self.result['version'] = dashboard_conf['version']
         self.target = dashboard_conf['target']
 
-
     def upload_result(self, case_name, raw_data):
         if self.target == '':
             print('No target was set, so no data will be posted.')
     def upload_result(self, case_name, raw_data):
         if self.target == '':
             print('No target was set, so no data will be posted.')
@@ -43,7 +42,9 @@ class Uploader(object):
                                 data=json.dumps(self.result),
                                 headers=self.headers,
                                 timeout=self.timeout)
                                 data=json.dumps(self.result),
                                 headers=self.headers,
                                 timeout=self.timeout)
-            print('Test result posting finished with status code %d.' % res.status_code)
+            print(
+                'Test result posting finished with status code %d.' %
+                res.status_code)
         except Exception as err:
             print ('Failed to record result data: %s', err)
 
         except Exception as err:
             print ('Failed to record result data: %s', err)
 
@@ -55,10 +56,9 @@ def _test():
         print ("no argumens input!!")
         exit(1)
 
         print ("no argumens input!!")
         exit(1)
 
-    with open(sys.argv[1],'r') as stream:
+    with open(sys.argv[1], 'r') as stream:
         data = json.load(stream)
         Uploader().upload_result(data)
 
 if __name__ == "__main__":
     _test()
         data = json.load(stream)
         Uploader().upload_result(data)
 
 if __name__ == "__main__":
     _test()
-
index 71830bf..cb907e5 100644 (file)
@@ -1,4 +1,4 @@
-#Copyright 2013: Mirantis Inc.
+# Copyright 2013: Mirantis Inc.
 # All Rights Reserved.
 #
 #    Licensed under the Apache License, Version 2.0 (the "License"); you may
 # All Rights Reserved.
 #
 #    Licensed under the Apache License, Version 2.0 (the "License"); you may
index 24de893..c4a7824 100755 (executable)
@@ -34,6 +34,7 @@ TEMPLATE_EXTENSION = None
 # Initialization and Input 'heat_templates/'validation
 # ------------------------------------------------------
 
 # Initialization and Input 'heat_templates/'validation
 # ------------------------------------------------------
 
+
 def init(api=False):
     global BASE_DIR
     # BASE_DIR = os.getcwd()
 def init(api=False):
     global BASE_DIR
     # BASE_DIR = os.getcwd()
@@ -46,6 +47,7 @@ def init(api=False):
     log_init()
     general_vars_init(api)
 
     log_init()
     general_vars_init(api)
 
+
 def conf_file_init(api=False):
     global CONF_FILE
     if api:
 def conf_file_init(api=False):
     global CONF_FILE
     if api:
@@ -66,24 +68,24 @@ def general_vars_init(api=False):
 
     # Check Section in Configuration File
     InputValidation.validate_configuration_file_section(
 
     # Check Section in Configuration File
     InputValidation.validate_configuration_file_section(
-            files.GENERAL,
-            "Section " + files.GENERAL +
-            "is not present in configuration file")
+        files.GENERAL,
+        "Section " + files.GENERAL +
+        "is not present in configuration file")
 
     InputValidation.validate_configuration_file_section(
 
     InputValidation.validate_configuration_file_section(
-            files.OPENSTACK,
-            "Section " + files.OPENSTACK +
-            "is not present in configuration file")
+        files.OPENSTACK,
+        "Section " + files.OPENSTACK +
+        "is not present in configuration file")
 
     TEMPLATE_DIR = '/tmp/heat_templates/'
 
     if not api:
         # Validate template name
         InputValidation.validate_configuration_file_parameter(
 
     TEMPLATE_DIR = '/tmp/heat_templates/'
 
     if not api:
         # Validate template name
         InputValidation.validate_configuration_file_parameter(
-                files.GENERAL,
-                files.TEMPLATE_NAME,
-                "Parameter " + files.TEMPLATE_NAME +
-                "is not present in configuration file")
+            files.GENERAL,
+            files.TEMPLATE_NAME,
+            "Parameter " + files.TEMPLATE_NAME +
+            "is not present in configuration file")
         TEMPLATE_NAME = CONF_FILE.get_variable(files.GENERAL,
                                                files.TEMPLATE_NAME)
         InputValidation.validate_file_exist(
         TEMPLATE_NAME = CONF_FILE.get_variable(files.GENERAL,
                                                files.TEMPLATE_NAME)
         InputValidation.validate_file_exist(
@@ -112,6 +114,7 @@ def log_init():
 # Configuration file access
 # ------------------------------------------------------
 
 # Configuration file access
 # ------------------------------------------------------
 
+
 class ConfigurationFile:
     """
     Used to extract data from the configuration file
 class ConfigurationFile:
     """
     Used to extract data from the configuration file
@@ -188,6 +191,7 @@ class ConfigurationFile:
 # Manage files
 # ------------------------------------------------------
 
 # Manage files
 # ------------------------------------------------------
 
+
 def get_heat_template_params():
     """
     Returns the list of deployment parameters from the configuration file
 def get_heat_template_params():
     """
     Returns the list of deployment parameters from the configuration file
@@ -203,6 +207,7 @@ def get_heat_template_params():
             files.DEPLOYMENT_PARAMETERS, param)
     return testcase_parameters
 
             files.DEPLOYMENT_PARAMETERS, param)
     return testcase_parameters
 
+
 def get_testcase_params():
     """
     Returns the list of testcase parameters from the configuration file
 def get_testcase_params():
     """
     Returns the list of testcase parameters from the configuration file
@@ -216,6 +221,7 @@ def get_testcase_params():
             files.TESTCASE_PARAMETERS, param)
     return testcase_parameters
 
             files.TESTCASE_PARAMETERS, param)
     return testcase_parameters
 
+
 def get_file_first_line(file_name):
     """
     Returns the first line of a file
 def get_file_first_line(file_name):
     """
     Returns the first line of a file
@@ -254,6 +260,8 @@ def replace_in_file(file, text_to_search, text_to_replace):
 # ------------------------------------------------------
 # Shell interaction
 # ------------------------------------------------------
 # ------------------------------------------------------
 # Shell interaction
 # ------------------------------------------------------
+
+
 def run_command(command):
     LOG.info("Running command: {}".format(command))
     return os.system(command)
 def run_command(command):
     LOG.info("Running command: {}".format(command))
     return os.system(command)
@@ -262,15 +270,19 @@ def run_command(command):
 # Expose variables to other modules
 # ------------------------------------------------------
 
 # Expose variables to other modules
 # ------------------------------------------------------
 
+
 def get_base_dir():
     return BASE_DIR
 
 def get_base_dir():
     return BASE_DIR
 
+
 def get_template_dir():
     return TEMPLATE_DIR
 
 # ------------------------------------------------------
 # Configuration Variables from Config File
 # ------------------------------------------------------
 def get_template_dir():
     return TEMPLATE_DIR
 
 # ------------------------------------------------------
 # Configuration Variables from Config File
 # ------------------------------------------------------
+
+
 def get_deployment_configuration_variables_from_conf_file():
     variables = dict()
     types = dict()
 def get_deployment_configuration_variables_from_conf_file():
     variables = dict()
     types = dict()
@@ -289,13 +301,17 @@ def get_deployment_configuration_variables_from_conf_file():
 # ------------------------------------------------------
 # benchmarks from Config File
 # ------------------------------------------------------
 # ------------------------------------------------------
 # benchmarks from Config File
 # ------------------------------------------------------
+
+
 def get_benchmarks_from_conf_file():
     requested_benchmarks = list()
 def get_benchmarks_from_conf_file():
     requested_benchmarks = list()
-    benchmarks = CONF_FILE.get_variable(files.GENERAL, files.BENCHMARKS).split(', ')
+    benchmarks = CONF_FILE.get_variable(
+        files.GENERAL, files.BENCHMARKS).split(', ')
     for benchmark in benchmarks:
         requested_benchmarks.append(benchmark)
     return requested_benchmarks
 
     for benchmark in benchmarks:
         requested_benchmarks.append(benchmark)
     return requested_benchmarks
 
+
 class InputValidation(object):
 
     @staticmethod
 class InputValidation(object):
 
     @staticmethod
index 2856650..f148f10 100755 (executable)
@@ -12,6 +12,7 @@
 # ------------------------------------------------------
 GENERAL = 'General'
 
 # ------------------------------------------------------
 GENERAL = 'General'
 
+
 def get_sections():
     return [
         GENERAL,
 def get_sections():
     return [
         GENERAL,
index 5902e8c..f5a9b88 100755 (executable)
@@ -13,6 +13,7 @@ from heatclient.common import template_utils
 
 import heat.common as common
 
 
 import heat.common as common
 
+
 class HeatManager:
 
     def __init__(self, credentials):
 class HeatManager:
 
     def __init__(self, credentials):
@@ -26,14 +27,14 @@ class HeatManager:
 
     def heat_init(self):
         keystone = keystone_client.Client(username=self.user,
 
     def heat_init(self):
         keystone = keystone_client.Client(username=self.user,
-                                         password=self.password,
-                                         tenant_name=self.project_id,
-                                         auth_url=self.auth_uri)
+                                          password=self.password,
+                                          tenant_name=self.project_id,
+                                          auth_url=self.auth_uri)
         auth_token = keystone.auth_token
         self.heat_url = keystone.service_catalog.url_for(
             service_type='orchestration')
         self.heat = heat_client.Client('1', endpoint=self.heat_url,
         auth_token = keystone.auth_token
         self.heat_url = keystone.service_catalog.url_for(
             service_type='orchestration')
         self.heat = heat_client.Client('1', endpoint=self.heat_url,
-                                      token=auth_token)
+                                       token=auth_token)
 
     def stacks_list(self, name=None):
         for stack in self.heat.stacks.list():
 
     def stacks_list(self, name=None):
         for stack in self.heat.stacks.list():
@@ -44,7 +45,8 @@ class HeatManager:
     def stack_generate(self, template_file, stack_name, parameters):
         self.heat_init()
         self.stacks_list()
     def stack_generate(self, template_file, stack_name, parameters):
         self.heat_init()
         self.stacks_list()
-        tpl_files, template = template_utils.get_template_contents(template_file)
+        tpl_files, template = template_utils.get_template_contents(
+            template_file)
 
         fields = {
             'template': template,
 
         fields = {
             'template': template,
index f05831d..f71e916 100755 (executable)
@@ -15,6 +15,7 @@ import shutil
 import common
 import consts.parameters as parameters
 
 import common
 import consts.parameters as parameters
 
+
 class TreeNode:
 
     def __init__(self):
 class TreeNode:
 
     def __init__(self):
@@ -80,6 +81,7 @@ class TreeNode:
 
 template_name = parameters.TEST_TEMPLATE_NAME
 
 
 template_name = parameters.TEST_TEMPLATE_NAME
 
+
 def generates_templates(base_heat_template, deployment_configuration):
     # parameters loaded from file
     template_dir = common.get_template_dir()
 def generates_templates(base_heat_template, deployment_configuration):
     # parameters loaded from file
     template_dir = common.get_template_dir()
@@ -148,7 +150,8 @@ def get_all_heat_templates(template_dir, template_extension):
     template_files = list()
     for dirname, dirnames, filenames in os.walk(template_dir):
         for filename in filenames:
     template_files = list()
     for dirname, dirnames, filenames in os.walk(template_dir):
         for filename in filenames:
-            if template_extension in filename and filename.endswith(template_extension) and template_name in filename:
+            if template_extension in filename and filename.endswith(
+                    template_extension) and template_name in filename:
                 template_files.append(filename)
     template_files.sort()
     return template_files
                 template_files.append(filename)
     template_files.sort()
     return template_files
index d4e0a23..83c905a 100755 (executable)
@@ -16,6 +16,7 @@ sys.path.append("..")
 import template
 import common
 
 import template
 import common
 
+
 def reset_common():
     common.LOG = None
     common.CONF_FILE = None
 def reset_common():
     common.LOG = None
     common.CONF_FILE = None
@@ -26,7 +27,9 @@ def reset_common():
     common.TEMPLATE_NAME = None
     common.TEMPLATE_EXTENSION = None
 
     common.TEMPLATE_NAME = None
     common.TEMPLATE_EXTENSION = None
 
+
 class TestGeneratesTemplate(unittest.TestCase):
 class TestGeneratesTemplate(unittest.TestCase):
+
     def setUp(self):
         self.deployment_configuration = {
             'flavor': ['medium']
     def setUp(self):
         self.deployment_configuration = {
             'flavor': ['medium']
@@ -42,7 +45,8 @@ class TestGeneratesTemplate(unittest.TestCase):
     def test_generates_template_for_success(self, mock_template_dir,
                                             mock_log):
         tmp_generated_templates_dir = '/data/generated_templates/'
     def test_generates_template_for_success(self, mock_template_dir,
                                             mock_log):
         tmp_generated_templates_dir = '/data/generated_templates/'
-        generated_templates_dir = "{}{}".format(os.getcwd(), tmp_generated_templates_dir)
+        generated_templates_dir = "{}{}".format(
+            os.getcwd(), tmp_generated_templates_dir)
         mock_template_dir.return_value = generated_templates_dir
         tmp_test_templates = '/data/test_templates/'
         test_templates = "{}{}".format(os.getcwd(), tmp_test_templates)
         mock_template_dir.return_value = generated_templates_dir
         tmp_test_templates = '/data/test_templates/'
         test_templates = "{}{}".format(os.getcwd(), tmp_test_templates)
@@ -69,7 +73,8 @@ class TestGeneratesTemplate(unittest.TestCase):
     @mock.patch('common.get_template_dir')
     def test_get_all_heat_templates_for_success(self, template_dir):
         tmp_generated_templates = '/data/generated_templates/'
     @mock.patch('common.get_template_dir')
     def test_get_all_heat_templates_for_success(self, template_dir):
         tmp_generated_templates = '/data/generated_templates/'
-        generated_templates = "{}{}".format(os.getcwd(), tmp_generated_templates)
+        generated_templates = "{}{}".format(
+            os.getcwd(), tmp_generated_templates)
         template_dir.return_value = generated_templates
         extension = '.yaml'
         expected = ['test_template_1.yaml']
         template_dir.return_value = generated_templates
         extension = '.yaml'
         expected = ['test_template_1.yaml']