Merge "Refactor remote command execution in vsperf"
[yardstick.git] / yardstick / benchmark / scenarios / networking / vsperf.py
index 4f4ef21..8344b15 100644 (file)
@@ -13,6 +13,7 @@
 # limitations under the License.
 """ Vsperf specific scenario definition """
 
+from __future__ import absolute_import
 import logging
 import os
 import subprocess
@@ -111,12 +112,8 @@ class Vsperf(base.Scenario):
                                                             None)
 
     def setup(self):
-        '''scenario setup'''
+        """scenario setup"""
         vsperf = self.context_cfg['host']
-        vsperf_user = vsperf.get('user', 'ubuntu')
-        vsperf_ssh_port = vsperf.get('ssh_port', ssh.DEFAULT_PORT)
-        vsperf_password = vsperf.get('password', 'ubuntu')
-        vsperf_ip = vsperf.get('ip', None)
 
         # add trafficgen interfaces to the external bridge
         if self.tg_port1:
@@ -127,9 +124,9 @@ class Vsperf(base.Scenario):
                             (self.br_ex, self.tg_port2), shell=True)
 
         # copy vsperf conf to VM
-        LOG.info("user:%s, host:%s", vsperf_user, vsperf_ip)
-        self.client = ssh.SSH(vsperf_user, vsperf_ip,
-                              password=vsperf_password, port=vsperf_ssh_port)
+        self.client = ssh.SSH.from_node(vsperf, defaults={
+            "user": "ubuntu", "password": "ubuntu"
+        })
         # traffic generation could last long
         self.client.wait(timeout=1800)
 
@@ -196,37 +193,34 @@ class Vsperf(base.Scenario):
             cmd += "--conf-file ~/vsperf.conf "
         cmd += "--test-params=\"%s\"" % (';'.join(test_params))
         LOG.debug("Executing command: %s", cmd)
-        status, stdout, stderr = self.client.execute(cmd)
-
-        if status:
-            raise RuntimeError(stderr)
+        self.client.run(cmd)
 
         # get test results
         cmd = "cat /tmp/results*/result.csv"
         LOG.debug("Executing command: %s", cmd)
-        status, stdout, stderr = self.client.execute(cmd)
-
-        if status:
-            raise RuntimeError(stderr)
+        _, stdout, _ = self.client.execute(cmd, raise_on_error=True)
 
         # convert result.csv to JSON format
-        reader = csv.DictReader(stdout.split('\r\n'))
-        result.update(reader.next())
+        reader = csv.DictReader(stdout.split('\r\n'), strict=True)
+        try:
+            result.update(next(reader))
+        except StopIteration:
+            pass
 
         # sla check; go through all defined SLAs and check if values measured
         # by VSPERF are higher then those defined by SLAs
         if 'sla' in self.scenario_cfg and \
            'metrics' in self.scenario_cfg['sla']:
             for metric in self.scenario_cfg['sla']['metrics'].split(','):
-                assert metric in result, \
-                    '%s is not collected by VSPERF' % (metric)
-                assert metric in self.scenario_cfg['sla'], \
-                    '%s is not defined in SLA' % (metric)
+                self.verify_SLA(metric in result,
+                                '%s was not collected by VSPERF' % metric)
+                self.verify_SLA(metric in self.scenario_cfg['sla'],
+                                '%s is not defined in SLA' % metric)
                 vs_res = float(result[metric])
                 sla_res = float(self.scenario_cfg['sla'][metric])
-                assert vs_res >= sla_res, \
-                    'VSPERF_%s(%f) < SLA_%s(%f)' % \
-                    (metric, vs_res, metric, sla_res)
+                self.verify_SLA(vs_res >= sla_res,
+                                'VSPERF_%s(%f) < SLA_%s(%f)'
+                                % (metric, vs_res, metric, sla_res))
 
     def teardown(self):
         """cleanup after the test execution"""