Sysmetrics implementation update 23/1823/3
authorMartin Klozik <martinx.klozik@intel.com>
Sun, 6 Sep 2015 23:44:50 +0000 (00:44 +0100)
committerMaryam Tahhan <maryam.tahhan@intel.com>
Tue, 29 Sep 2015 10:29:55 +0000 (10:29 +0000)
New sysmetrics implementation is based on pidstat command line tool
from sysstat package. Old non-functional implementation was removed.
Reporting was refactored to generate report after each TC from values
already available in memory.
Following files were affected:
modified:   conf/01_testcases.conf
modified:   conf/02_vswitch.conf
modified:   conf/05_collector.conf
deleted:    core/collector_controller.py
modified:   core/component_factory.py
modified:   docs/NEWS.rst
modified:   packages.txt
modified:   requirements.txt
modified:   testcases/testcase.py
modified:   tools/collectors/collector/collector.py
modified:   tools/collectors/sysmetrics/__init__.py
deleted:    tools/collectors/sysmetrics/linuxmetrics.py
new file:   tools/collectors/sysmetrics/pidstat.py
modified:   tools/report/report.jinja
modified:   tools/report/report.py
modified:   tools/systeminfo.py
modified:   vsperf

JIRA: VSPERF-67
Change-Id: I25a79f2afef405b9ac46ae85c18044af167a62a4
Signed-off-by: Martin Klozik (martinx.klozik@intel.com)
Reviewed-by: Billy O Mahony <billy.o.mahony@intel.com>
Reviewed-by: Maryam Tahhan <maryam.tahhan@intel.com>
Reviewed-by: Al Morton <acmorton@att.com>
Reviewed-by: Gurpreet Singh <gurpreet.singh@spirent.com>
Reviewed-by: Tv Rao <tv.rao@freescale.com>
17 files changed:
conf/01_testcases.conf
conf/02_vswitch.conf
conf/05_collector.conf
core/collector_controller.py [deleted file]
core/component_factory.py
docs/to-be-reorganized/NEWS.rst
packages.txt
requirements.txt
testcases/testcase.py
tools/collectors/collector/collector.py
tools/collectors/sysmetrics/__init__.py
tools/collectors/sysmetrics/linuxmetrics.py [deleted file]
tools/collectors/sysmetrics/pidstat.py [new file with mode: 0644]
tools/report/report.jinja
tools/report/report.py
tools/systeminfo.py
vsperf

index cb4f948..815e3a7 100755 (executable)
@@ -20,8 +20,6 @@
 # "Name": "phy2phy_burst",         # A human-readable string identifying the
 #                                  # test.
 # "Traffic Type": "rfc2544",       # One of the supported traffic types.
-# "Collector": "cpu, memory",      # Comma-separated list of Collectors to
-#                                  # be activated during this test.
 # "Deployment": "p2p",             # One of the supported deployment scenarios.
 # "Description": "Lorem ipsum..."  # Optional. A human-readable string
 #                                  # describing the test.
@@ -69,7 +67,6 @@ PERFORMANCE_TESTS = [
     {
         "Name": "phy2phy_tput",
         "Traffic Type": "rfc2544",
-        "Collector": "cpu",
         "Deployment": "p2p",
         "biDirectional": "True",
         "Description": "LTD.Throughput.RFC2544.PacketLossRatio",
@@ -77,7 +74,6 @@ PERFORMANCE_TESTS = [
     {
         "Name": "back2back",
         "Traffic Type": "back2back",
-        "Collector": "cpu",
         "Deployment": "p2p",
         "biDirectional": "True",
         "Description": "LTD.Throughput.RFC2544.BackToBackFrames",
@@ -85,7 +81,6 @@ PERFORMANCE_TESTS = [
     {
         "Name": "phy2phy_tput_mod_vlan",
         "Traffic Type": "rfc2544",
-        "Collector": "cpu",
         "Deployment": "p2p",
         "Frame Modification": "vlan",
         "biDirectional": "False",
@@ -94,7 +89,6 @@ PERFORMANCE_TESTS = [
     {
         "Name": "phy2phy_cont",
         "Traffic Type": "continuous",
-        "Collector": "cpu",
         "Deployment": "p2p",
         "Description": "RFC2544 Phy2Phy Continuous Stream",
         "biDirectional": "True",
@@ -102,7 +96,6 @@ PERFORMANCE_TESTS = [
     {
         "Name": "phy2phy_scalability",
         "Traffic Type": "rfc2544",
-        "Collector": "cpu",
         "Deployment": "p2p",
         "biDirectional": "True",
         "Description": "LTD.Scalability.RFC2544.0PacketLoss",
@@ -111,7 +104,6 @@ PERFORMANCE_TESTS = [
     {
         "Name": "pvp_tput",
         "Traffic Type": "rfc2544",
-        "Collector": "cpu",
         "Deployment": "pvp",
         "Description": "LTD.Throughput.RFC2544.PacketLossRatio",
         "biDirectional": "True",
@@ -119,7 +111,6 @@ PERFORMANCE_TESTS = [
     {
         "Name": "pvp_back2back",
         "Traffic Type": "back2back",
-        "Collector": "cpu",
         "Deployment": "pvp",
         "Description": "LTD.Throughput.RFC2544.BackToBackFrames",
         "biDirectional": "True",
@@ -127,7 +118,6 @@ PERFORMANCE_TESTS = [
     {
         "Name": "phy2phy_cpu_load",
         "Traffic Type": "rfc2544",
-        "Collector": "cpu",
         "Deployment": "p2p",
         "biDirectional": "True",
         "Description": "LTD.CPU.RFC2544.0PacketLoss",
@@ -141,7 +131,6 @@ PERFORMANCE_TESTS = [
     {
         "Name": "phy2phy_mem_load",
         "Traffic Type": "rfc2544",
-        "Collector": "cpu",
         "Deployment": "p2p",
         "biDirectional": "True",
         "Description": "LTD.Memory.RFC2544.0PacketLoss",
index e69511b..73c42a9 100644 (file)
@@ -45,8 +45,8 @@ VSWITCH_VANILLA_KERNEL_MODULES = ['openvswitch']
 # directory where hugepages will be mounted on system init
 HUGEPAGE_DIR = '/dev/hugepages'
 
-# Sets OVS PMD core mask to 2 for CPU core 1 affinitization
-VSWITCH_PMD_CPU_MASK = '2'
+# Sets OVS PMDs core mask to 0x30 for affinitization to 5th and 6th CPU core
+VSWITCH_PMD_CPU_MASK = '0x30'
 VSWITCH_AFFINITIZATION_ON = 1
 
 VSWITCH_FLOW_TIMEOUT = '30000'
index 4dcd816..bda0ac8 100644 (file)
 # Collector configuration
 # ############################
 
-# ############################
-# Sysmetrics configuration
-# ############################
-
-COLLECTOR = 'LinuxMetrics'
+COLLECTOR = 'Pidstat'
 COLLECTOR_DIR = os.path.join(ROOT_DIR, 'tools/collectors')
 
-# the number of seconds between samples when calculating CPU percentage
-SYSMETRICS_LINUX_METRICS_CPU_SAMPLES_INTERVAL = 5
+# processes to be monitored by pidstat
+PIDSTAT_MONITOR = ['ovs-vswitchd', 'ovsdb-server', 'qemu-system-x86_64']
+
+# options which will be passed to pidstat
+PIDSTAT_OPTIONS = '-dur'
 
-# log file for sysmetrics
-LOG_FILE_SYS_METRICS = 'system-metrics.log'
+# sampling interval used by pidstat to collect statistics
+PIDSTAT_SAMPLE_INTERVAL = 1
 
+# prefix of pidstat's log file; separate log file is created
+# for each testcase in the directory with results
+LOG_FILE_PIDSTAT = 'pidstat'
diff --git a/core/collector_controller.py b/core/collector_controller.py
deleted file mode 100644 (file)
index 10c9bce..0000000
+++ /dev/null
@@ -1,55 +0,0 @@
-# Copyright 2015 Intel Corporation.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-"""CollectorController class
-"""
-from core.results.results import IResults
-
-class CollectorController(IResults):
-    """Class which defines a collector controller object.
-
-    Used to set-up and control a collector provider.
-    """
-
-    def __init__(self, collector_class):
-        """Sets up the prerequisites for the Collector.
-
-        :param collector_class: the Collector class to be used.
-        """
-        self._collector = collector_class()
-        self._results = []
-
-    def log_mem_stats(self):
-        """Log memory stats.
-        """
-        self._results.append(self._collector.log_mem_stats())
-
-    def log_cpu_stats(self):
-        """Log CPU stats.
-        """
-        self._results.append(self._collector.log_cpu_stats())
-
-    def get_results(self):
-        """Return collected CPU and memory stats.
-
-        Implements IResults i/f, see IResults for details.
-        """
-        return self._results
-
-    def print_results(self):
-        """Prints collected CPU and memory stats.
-
-        Implements IResults i/f, see IResults for details.
-        """
-        print(self._results)
index f92de20..e8bb4de 100644 (file)
@@ -20,7 +20,6 @@ from core.vswitch_controller_p2p import VswitchControllerP2P
 from core.vswitch_controller_pvp import VswitchControllerPVP
 from core.vnf_controller_p2p import VnfControllerP2P
 from core.vnf_controller_pvp import VnfControllerPVP
-from core.collector_controller import CollectorController
 from tools.load_gen.stress.stress import Stress
 from tools.load_gen.stress_ng.stress_ng import StressNg
 from tools.load_gen.dummy.dummy import DummyLoadGen
@@ -86,20 +85,15 @@ def create_vnf(deployment_scenario, vnf_class):
     elif deployment_scenario.find("pvp") >= 0:
         return VnfControllerPVP(vnf_class)
 
-def create_collector(collector, collector_class):
-    """Return a new CollectorController of the given class
+def create_collector(collector_class, result_dir, test_name):
+    """Return a new Collector of the given class
 
-    Supported collector type strings:
-    'cpu'
-    'memory':
-
-    :param collector: Collector type string
     :param collector_class: The collector class to be used.
+    :param result_dir: Directory with test results
+    :param test_name: Test to be run
     :return: A new CollectorController.
     """
-    collector = collector.lower()
-    if "cpu" in collector or "memory" in collector:
-        return CollectorController(collector_class)
+    return collector_class(result_dir, test_name)
 
 def create_loadgen(loadgen_type, loadgen_cfg):
     """Return a new ILoadGenerator for the loadgen type.
index 3f01e37..93be5da 100644 (file)
@@ -1,3 +1,9 @@
+September 2015
+==============
+New
+---
+- Implementation of system statistics based upon pidstat command line tool.
+
 August 2015
 ===========
 New
index fb641fd..7d68021 100644 (file)
@@ -6,3 +6,4 @@ zlib-devel
 glib2-devel
 pixman-devel
 socat
+sysstat
index 3241072..934340d 100644 (file)
@@ -1,5 +1,4 @@
 pexpect==3.3
-linux-metrics==0.1.4
 tox==1.8.1
 jinja2==2.7.3
 xmlrunner==1.7.7
index 3ea97c3..6d37ce5 100644 (file)
@@ -22,6 +22,7 @@ from collections import OrderedDict
 from core.results.results_constants import ResultsConstants
 import core.component_factory as component_factory
 from core.loader import Loader
+from tools.report import report
 
 class TestCase(object):
     """TestCase base class
@@ -41,7 +42,6 @@ class TestCase(object):
         self.desc = cfg.get('Description', 'No description given.')
         self._traffic_type = cfg['Traffic Type']
         self.deployment = cfg['Deployment']
-        self._collector = cfg['Collector']
         self._bidir = cfg['biDirectional']
         self._frame_mod = cfg.get('Frame Modification', None)
 
@@ -77,17 +77,16 @@ class TestCase(object):
             self.deployment,
             loader.get_vswitch_class(),
             self._bidir)
-        collector_ctl = component_factory.create_collector(
-            self._collector,
-            loader.get_collector_class())
+        collector = component_factory.create_collector(
+            loader.get_collector_class(),
+            self._results_dir, self.name)
         loadgen = component_factory.create_loadgen(
             self._loadgen,
             self._load_cfg)
 
         self._logger.debug("Setup:")
-        collector_ctl.log_cpu_stats()
         with vswitch_ctl, loadgen:
-            with vnf_ctl:
+            with vnf_ctl, collector:
                 traffic = {'traffic_type': self._traffic_type,
                            'bidir': self._bidir,
                            'multistream': self._multistream}
@@ -177,13 +176,15 @@ class TestCase(object):
         traffic_ctl.print_results()
 
         self._logger.debug("Collector Results:")
-        self._logger.debug(collector_ctl.get_results())
+        collector.print_results()
 
-        output_file = "result_" + self.name + "_" + self.deployment +".csv"
+        output_file = os.path.join(self._results_dir, "result_" + self.name +
+                                   "_" + self.deployment + ".csv")
 
-        TestCase._write_result_to_file(
-            self._append_results(traffic_ctl.get_results()),
-            os.path.join(self._results_dir, output_file))
+        tc_results = self._append_results(traffic_ctl.get_results())
+        TestCase._write_result_to_file(tc_results, output_file)
+
+        report.generate(output_file, tc_results, collector.get_results())
 
     def _append_results(self, results):
         """
index 27a0720..998c1f6 100644 (file)
 """Abstract "system metrics logger" model.
 """
 
-CMD_PREFIX = 'metricscmd : '
-
 class ICollector(object):
     """This is an abstract class for system metrics loggers.
     """
 
-    def log_mem_stats(self):
-        """Log memory statistics.
+    def start(self):
+        """Starts data collection. This method must be non-blocking.
+        It means, that collector must be executed as a background process.
 
         Where implemented, this function should raise an exception on
         failure.
         """
         raise NotImplementedError('Please call an implementation.')
 
-    def log_cpu_stats(self):
-        """Log cpu statistics.
+    def stop(self):
+        """Stops data collection.
 
         Where implemented, this function should raise an exception on
         failure.
         """
         raise NotImplementedError('Please call an implementation.')
+
+    def get_results(self):
+        """Returns collected results.
+
+        Where implemented, this function should raise an exception on
+        failure.
+        """
+        raise NotImplementedError('Please call an implementation.')
+
+    def print_results(self):
+        """Logs collected results.
+
+        Where implemented, this function should raise an exception on
+        failure.
+        """
+        raise NotImplementedError('Please call an implementation.')
+
+    def __enter__(self):
+        """Starts up collection of statistics
+        """
+        self.start()
+
+    def __exit__(self, type_, value, traceback):
+        """Stops collection of statistics
+        """
+        self.stop()
index 9ad1bf2..e3e07fd 100755 (executable)
@@ -11,8 +11,3 @@
 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 # See the License for the specific language governing permissions and
 # limitations under the License.
-
-"""Implementation of linux-metrics system metrics logger.
-"""
-
-from tools.collectors.sysmetrics.linuxmetrics import *
diff --git a/tools/collectors/sysmetrics/linuxmetrics.py b/tools/collectors/sysmetrics/linuxmetrics.py
deleted file mode 100644 (file)
index fdf3069..0000000
+++ /dev/null
@@ -1,79 +0,0 @@
-# Copyright 2015 Intel Corporation.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-"""linux-metrics system statistics model.
-
-Provides linux-metrics system statistics generic "helper" functions.
-
-This requires the following setting in your config:
-
-* SYSMETRICS_LINUX_METRICS_CPU_SAMPLES_INTERVAL
-    Number of seconds in between samples to take for CPU percentages
-
-If this doesn't exist, the application will raise an exception
-(EAFP).
-"""
-
-
-import logging
-import os
-from conf import settings
-from tools.collectors.collector import collector
-from linux_metrics import cpu_stat, mem_stat
-
-_ROOT_DIR = os.path.dirname(os.path.realpath(__file__))
-
-class LinuxMetrics(collector.ICollector):
-    """A logger based on the linux-metrics module.
-
-    Currently it supports the logging of memory and CPU statistics
-    """
-    def __init__(self):
-        self._logger = logging.getLogger(__name__)
-        self._num_samples = settings.getValue(
-            'SYSMETRICS_LINUX_METRICS_CPU_SAMPLES_INTERVAL')
-        self._mem_stats = []
-        self._cpu_stats = []
-
-    def log_mem_stats(self):
-        """See ICollector for descripion
-        """
-        self._mem_stats = mem_stat.mem_stats()
-        # pylint: disable=unbalanced-tuple-unpacking
-        mem_active, mem_total, mem_cached, mem_free, swap_total, swap_free = \
-            self._mem_stats
-        self._logger.info('%s mem_active: %s, mem_total: %s, mem_cached: %s, '
-                          'mem_free: %s, swap_total: %s, swap_free: %s',
-                          collector.CMD_PREFIX,
-                          mem_active, mem_total, mem_cached, mem_free,
-                          swap_total, swap_free)
-        return self._mem_stats
-
-    def log_cpu_stats(self):
-        """See ICollector for descripion
-        """
-        self._cpu_stats = cpu_stat.cpu_percents(self._num_samples)
-        self._logger.info('%s user: %.2f%%, nice: %.2f%%, system: %.2f%%, '
-                          'idle: %.2f%%, iowait: %.2f%%, irq: %.2f%%, '
-                          'softirq: %.2f%%',
-                          collector.CMD_PREFIX,
-                          self._cpu_stats['user'],
-                          self._cpu_stats['nice'],
-                          self._cpu_stats['system'],
-                          self._cpu_stats['idle'],
-                          self._cpu_stats['iowait'],
-                          self._cpu_stats['irq'],
-                          self._cpu_stats['softirq'])
-        return self._cpu_stats
-
diff --git a/tools/collectors/sysmetrics/pidstat.py b/tools/collectors/sysmetrics/pidstat.py
new file mode 100644 (file)
index 0000000..608a0d6
--- /dev/null
@@ -0,0 +1,140 @@
+# Copyright 2015 Intel Corporation.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""module for statistics collection by pidstat
+
+Provides system statistics collected between calls of start() and stop()
+by command line tool pidstat (part of sysstat package)
+
+This requires the following setting in your config:
+
+* PIDSTAT_MONITOR = ['ovs-vswitchd', 'ovsdb-server', 'kvm']
+    processes to be monitorred by pidstat
+
+* PIDSTAT_OPTIONS = '-dur'
+    options which will be passed to pidstat, i.e. what
+    statistics should be collected by pidstat
+
+* LOG_FILE_PIDSTAT = 'pidstat.log'
+    log file for pidstat; it defines suffix, which will be added
+    to testcase name. Pidstat detailed statistics will be stored separately
+    for every testcase.
+
+If this doesn't exist, the application will raise an exception
+(EAFP).
+"""
+
+import os
+import logging
+import subprocess
+import time
+from collections import OrderedDict
+from tools import tasks
+from tools import systeminfo
+from conf import settings
+from tools.collectors.collector import collector
+
+_ROOT_DIR = os.path.dirname(os.path.realpath(__file__))
+
+class Pidstat(collector.ICollector):
+    """A logger of system statistics based on pidstat
+
+    It collects statistics based on configuration
+    """
+    _logger = logging.getLogger(__name__)
+
+    def __init__(self, results_dir, test_name):
+        """
+        Initialize collection of statistics
+        """
+        self._log = os.path.join(results_dir,
+                                 settings.getValue('LOG_FILE_PIDSTAT') +
+                                 '_' + test_name + '.log')
+        self._results = OrderedDict()
+        self._pid = 0
+
+    def start(self):
+        """
+        Starts collection of statistics by pidstat and stores them
+        into the file in directory with test results
+        """
+        monitor = settings.getValue('PIDSTAT_MONITOR')
+        self._logger.info('Statistics are requested for: ' + ', '.join(monitor))
+        pids = systeminfo.get_pids(monitor)
+        if pids:
+            with open(self._log, 'w') as logfile:
+                cmd = ['sudo', 'pidstat', settings.getValue('PIDSTAT_OPTIONS'),
+                       '-p', ','.join(pids),
+                       str(settings.getValue('PIDSTAT_SAMPLE_INTERVAL'))]
+                self._logger.debug('%s', ' '.join(cmd))
+                self._pid = subprocess.Popen(cmd, stdout=logfile, bufsize=0).pid
+
+    def stop(self):
+        """
+        Stops collection of statistics by pidstat and stores statistic summary
+        for each monitored process into self._results dictionary
+        """
+        if self._pid:
+            self._pid = 0
+            # in python3.4 it's not possible to send signal through pid of sudo
+            # process, so all pidstat processes are interupted instead
+            # as a workaround
+            tasks.run_task(['sudo', 'pkill', '--signal', '2', 'pidstat'],
+                           self._logger)
+
+        self._logger.info(
+            'Pidstat log available at %s', self._log)
+
+        # let's give pidstat some time to write down average summary
+        time.sleep(2)
+
+        # parse average values from log file and store them to _results dict
+        self._results = OrderedDict()
+        logfile = open(self._log, 'r')
+        with logfile:
+            line = logfile.readline()
+            while line:
+                line = line.strip()
+                # process only lines with summary
+                if line[0:7] == 'Average':
+                    if line[-7:] == 'Command':
+                        # store header fields if detected
+                        tmp_header = line[8:].split()
+                    else:
+                        # combine stored header fields with actual values
+                        tmp_res = OrderedDict(zip(tmp_header,
+                                                  line[8:].split()))
+                        # use process's name and its  pid as unique key
+                        key = tmp_res.pop('Command') + '_' + tmp_res['PID']
+                        # store values for given command into results dict
+                        if key in self._results:
+                            self._results[key].update(tmp_res)
+                        else:
+                            self._results[key] = tmp_res
+
+                line = logfile.readline()
+
+    def get_results(self):
+        """Returns collected statistics.
+        """
+        return self._results
+
+    def print_results(self):
+        """Logs collected statistics.
+        """
+        for process in self._results:
+            logging.info("Process: " + '_'.join(process.split('_')[:-1]))
+            for(key, value) in self._results[process].items():
+                logging.info("         Statistic: " + str(key) +
+                             ", Value: " + str(value))
index 63d30fc..491dbe9 100644 (file)
@@ -88,12 +88,27 @@ A detailed summary of the main results for each test is outlined below.
 
 The following are the metrics obtained during this test:
 
-|              Metric            |             Result             |
+|             Metric             |             Result             |
 | ------------------------------ | ------------------------------ |
 {%- for item, value in test.result.items() %}
 | {{ "%-30s | %30s |"|format(item,value)}}
 {%- endfor %}
 
+##### Statistics collected
+
+The following system statistics were collected during testcase execution:
+{% for process in test.stats %}
+| --------------------------------------------------------------- |
+| Process: {{ "%-54s |"|format('_'.join(process.split('_')[:-1])) }}
+| ------------------------------ | ------------------------------ |
+|           Statistic            |             Value              |
+| ------------------------------ | ------------------------------ |
+{%- for item, value in test.stats[process].items() %}
+| {{ "%-30s | %30s |"|format(item,value)}}
+{%- endfor %}
+
+{% endfor %}
+
 ##### Anomalies
 
 No anomalies were detected during the course of this test.
index 8d21329..806aecb 100644 (file)
@@ -21,10 +21,8 @@ Generate reports in format defined by X.
 import sys
 import os
 import jinja2
-import csv
 import logging
 
-from collections import OrderedDict
 from core.results.results_constants import ResultsConstants
 from conf import settings
 from tools import systeminfo
@@ -60,27 +58,7 @@ def _get_env():
     return env
 
 
-def _get_results(results_file):
-    """Get results from tests.
-
-    Get test results from a CSV file and return it as a list
-    of dictionaries for each row of data.
-
-    :param results_file: Path of the CSV results file
-
-    :returns: List of test results
-    """
-    with open(results_file, 'r') as csvfile:
-        reader = csv.reader(csvfile, delimiter=',')
-        result = []
-        res_head = next(reader)
-        for res_row in reader:
-            result.append(OrderedDict(zip(list(res_head), list(res_row))))
-
-    return result
-
-
-def generate(testcases, input_file):
+def generate(input_file, tc_results, tc_stats):
     """Generate actual report.
 
     Generate a Markdown-formatted file using results of tests and some
@@ -98,9 +76,9 @@ def generate(testcases, input_file):
 
     tests = []
     try:
-        for result in _get_results(input_file):
+        for result in tc_results:
             test_config = {}
-            for tc_conf in testcases:
+            for tc_conf in settings.getValue('PERFORMANCE_TESTS'):
                 if tc_conf['Name'] == result[ResultsConstants.ID]:
                     test_config = tc_conf
                     break
@@ -119,6 +97,7 @@ def generate(testcases, input_file):
                 'conf': test_config,
                 'result': result,
                 'env': _get_env(),
+                'stats': tc_stats
             })
 
         template_vars = {
@@ -131,12 +110,13 @@ def generate(testcases, input_file):
             logging.info('Test report written to "%s"', output_file)
 
     except KeyError:
-        logging.info("Report: Ignoring file (Wrongly defined columns): %s", (input_file))
+        logging.info("Report: Ignoring file (Wrongly defined columns): %s",
+                     (input_file))
         raise
     return output_file
 
 
 if __name__ == '__main__':
     settings.load_from_dir('conf')
-    OUT = generate(sys.argv[1])
+    OUT = generate(sys.argv[1], '', '')
     print('Test report written to "%s"...' % OUT)
index 19c5d16..287a74d 100644 (file)
@@ -137,3 +137,25 @@ def get_memory_bytes():
 
     return int(mem)
 
+def get_pids(proc_names_list):
+    """ Get pid(s) of process(es) with given name(s)
+
+    :returns: list with pid(s) of given processes or None if processes
+        with given names are not running
+    """
+
+    try:
+        pids = subprocess.check_output(['pidof'] + proc_names_list)
+    except:
+        # such process isn't running
+        return None
+
+    return list(map(str, map(int, pids.split())))
+
+def get_pid(proc_name_str):
+    """ Get pid(s) of process with given name
+
+    :returns: list with pid(s) of given process or None if process
+        with given name is not running
+    """
+    return get_pids([proc_name_str])
diff --git a/vsperf b/vsperf
index 7c9f018..310e1a4 100755 (executable)
--- a/vsperf
+++ b/vsperf
@@ -32,12 +32,8 @@ sys.dont_write_bytecode = True
 from conf import settings
 from core.loader import Loader
 from testcases import TestCase
-from tools.report import report
 from tools import tasks
-from tools.collectors import collector
 from tools.pkt_gen import trafficgen
-from vswitches import vswitch
-from vnfs import vnf
 
 VERBOSITY_LEVELS = {
     'debug': logging.DEBUG,
@@ -173,9 +169,6 @@ def configure_logging(level):
     log_file_traffic_gen = os.path.join(
         settings.getValue('LOG_DIR'),
         settings.getValue('LOG_FILE_TRAFFIC_GEN'))
-    log_file_sys_metrics = os.path.join(
-        settings.getValue('LOG_DIR'),
-        settings.getValue('LOG_FILE_SYS_METRICS'))
 
     logger = logging.getLogger()
     logger.setLevel(logging.DEBUG)
@@ -200,11 +193,6 @@ def configure_logging(level):
         def filter(self, record):
             return record.getMessage().startswith(trafficgen.CMD_PREFIX)
 
-    class SystemMetricsCommandFilter(logging.Filter):
-        """Filter out strings beginning with 'gencmd :'"""
-        def filter(self, record):
-            return record.getMessage().startswith(collector.CMD_PREFIX)
-
     cmd_logger = logging.FileHandler(filename=log_file_host_cmds)
     cmd_logger.setLevel(logging.DEBUG)
     cmd_logger.addFilter(CommandFilter())
@@ -215,11 +203,6 @@ def configure_logging(level):
     gen_logger.addFilter(TrafficGenCommandFilter())
     logger.addHandler(gen_logger)
 
-    metrics_logger = logging.FileHandler(filename=log_file_sys_metrics)
-    metrics_logger.setLevel(logging.DEBUG)
-    metrics_logger.addFilter(SystemMetricsCommandFilter())
-    logger.addHandler(metrics_logger)
-
 
 def apply_filter(tests, tc_filter):
     """Allow a subset of tests to be conveniently selected
@@ -432,13 +415,6 @@ def main():
         files_list = os.listdir(results_path)
         if files_list == []:
             shutil.rmtree(results_path)
-        else:
-            for file in files_list:
-                # generate report from all csv files
-                if file[-3:] == 'csv':
-                    results_csv = os.path.join(results_path, file)
-                    if os.path.isfile(results_csv) and os.access(results_csv, os.R_OK):
-                        report.generate(testcases, results_csv)
 
 if __name__ == "__main__":
     main()