Update sla check for scenarios 15/2615/4
authorhoujingwen <houjingwen@huawei.com>
Mon, 19 Oct 2015 07:37:06 +0000 (15:37 +0800)
committerHou Jingwen <houjingwen@huawei.com>
Thu, 22 Oct 2015 00:55:25 +0000 (00:55 +0000)
This patch modify the question that SLA check result is not complete.

JIRA: YARDSTICK-172

Change-Id: I10438390baee92caf00dbfcdbdb833823ff8ce31
Signed-off-by: houjingwen <houjingwen@huawei.com>
19 files changed:
tests/unit/benchmark/scenarios/compute/test_cyclictest.py
tests/unit/benchmark/scenarios/networking/test_iperf3.py
tests/unit/benchmark/scenarios/networking/test_netperf.py
tests/unit/benchmark/scenarios/networking/test_ping.py
tests/unit/benchmark/scenarios/networking/test_pktgen.py
tests/unit/benchmark/scenarios/storage/test_fio.py
yardstick/benchmark/runners/arithmetic.py
yardstick/benchmark/runners/duration.py
yardstick/benchmark/runners/iteration.py
yardstick/benchmark/runners/sequence.py
yardstick/benchmark/scenarios/compute/cyclictest.py
yardstick/benchmark/scenarios/compute/lmbench.py
yardstick/benchmark/scenarios/compute/perf.py
yardstick/benchmark/scenarios/networking/iperf3.py
yardstick/benchmark/scenarios/networking/netperf.py
yardstick/benchmark/scenarios/networking/ping.py
yardstick/benchmark/scenarios/networking/pktgen.py
yardstick/benchmark/scenarios/storage/fio.py
yardstick/plot/plotter.py

index 3791b4a..28dc4d6 100644 (file)
@@ -51,12 +51,14 @@ class CyclictestTestCase(unittest.TestCase):
         args = {
             "options": options,
         }
+        result = {}
+
         c.server = mock_ssh.SSH()
 
         sample_output = '{"min": 100, "avg": 500, "max": 1000}'
         mock_ssh.SSH().execute.return_value = (0, sample_output, '')
 
-        result = c.run(args)
+        c.run(args, result)
         expected_result = json.loads(sample_output)
         self.assertEqual(result, expected_result)
 
@@ -80,12 +82,14 @@ class CyclictestTestCase(unittest.TestCase):
             "options": options,
             "sla": sla
         }
+        result = {}
+
         c.server = mock_ssh.SSH()
 
         sample_output = '{"min": 100, "avg": 500, "max": 1000}'
         mock_ssh.SSH().execute.return_value = (0, sample_output, '')
 
-        result = c.run(args)
+        c.run(args, result)
         expected_result = json.loads(sample_output)
         self.assertEqual(result, expected_result)
 
@@ -96,11 +100,13 @@ class CyclictestTestCase(unittest.TestCase):
             "options": {},
             "sla": {"max_min_latency": 10}
         }
+        result = {}
+
         c.server = mock_ssh.SSH()
         sample_output = '{"min": 100, "avg": 500, "max": 1000}'
 
         mock_ssh.SSH().execute.return_value = (0, sample_output, '')
-        self.assertRaises(AssertionError, c.run, args)
+        self.assertRaises(AssertionError, c.run, args, result)
 
     def test_cyclictest_unsuccessful_sla_avg_latency(self, mock_ssh):
 
@@ -109,11 +115,13 @@ class CyclictestTestCase(unittest.TestCase):
             "options": {},
             "sla": {"max_avg_latency": 10}
         }
+        result = {}
+
         c.server = mock_ssh.SSH()
         sample_output = '{"min": 100, "avg": 500, "max": 1000}'
 
         mock_ssh.SSH().execute.return_value = (0, sample_output, '')
-        self.assertRaises(AssertionError, c.run, args)
+        self.assertRaises(AssertionError, c.run, args, result)
 
     def test_cyclictest_unsuccessful_sla_max_latency(self, mock_ssh):
 
@@ -122,11 +130,13 @@ class CyclictestTestCase(unittest.TestCase):
             "options": {},
             "sla": {"max_max_latency": 10}
         }
+        result = {}
+
         c.server = mock_ssh.SSH()
         sample_output = '{"min": 100, "avg": 500, "max": 1000}'
 
         mock_ssh.SSH().execute.return_value = (0, sample_output, '')
-        self.assertRaises(AssertionError, c.run, args)
+        self.assertRaises(AssertionError, c.run, args, result)
 
     def test_cyclictest_unsuccessful_script_error(self, mock_ssh):
 
@@ -148,10 +158,12 @@ class CyclictestTestCase(unittest.TestCase):
             "options": options,
             "sla": sla
         }
+        result = {}
+
         c.server = mock_ssh.SSH()
 
         mock_ssh.SSH().execute.return_value = (1, '', 'FOOBAR')
-        self.assertRaises(RuntimeError, c.run, args)
+        self.assertRaises(RuntimeError, c.run, args, result)
 
 
 def main():
index 8b0da65..2ec73eb 100644 (file)
@@ -67,11 +67,12 @@ class IperfTestCase(unittest.TestCase):
 
         options = {}
         args = {'options': options}
+        result = {}
 
         sample_output = self._read_sample_output(self.output_name_tcp)
         mock_ssh.SSH().execute.return_value = (0, sample_output, '')
         expected_result = json.loads(sample_output)
-        result = p.run(args)
+        p.run(args, result)
         self.assertEqual(result, expected_result)
 
     def test_iperf_successful_sla(self, mock_ssh):
@@ -85,11 +86,12 @@ class IperfTestCase(unittest.TestCase):
             'options': options,
             'sla': {'bytes_per_second': 15000000}
         }
+        result = {}
 
         sample_output = self._read_sample_output(self.output_name_tcp)
         mock_ssh.SSH().execute.return_value = (0, sample_output, '')
         expected_result = json.loads(sample_output)
-        result = p.run(args)
+        p.run(args, result)
         self.assertEqual(result, expected_result)
 
     def test_iperf_unsuccessful_sla(self, mock_ssh):
@@ -103,10 +105,11 @@ class IperfTestCase(unittest.TestCase):
             'options': options,
             'sla': {'bytes_per_second': 25000000}
         }
+        result = {}
 
         sample_output = self._read_sample_output(self.output_name_tcp)
         mock_ssh.SSH().execute.return_value = (0, sample_output, '')
-        self.assertRaises(AssertionError, p.run, args)
+        self.assertRaises(AssertionError, p.run, args, result)
 
     def test_iperf_successful_sla_jitter(self, mock_ssh):
 
@@ -119,11 +122,12 @@ class IperfTestCase(unittest.TestCase):
             'options': options,
             'sla': {'jitter': 10}
         }
+        result = {}
 
         sample_output = self._read_sample_output(self.output_name_udp)
         mock_ssh.SSH().execute.return_value = (0, sample_output, '')
         expected_result = json.loads(sample_output)
-        result = p.run(args)
+        p.run(args, result)
         self.assertEqual(result, expected_result)
 
     def test_iperf_unsuccessful_sla_jitter(self, mock_ssh):
@@ -137,10 +141,11 @@ class IperfTestCase(unittest.TestCase):
             'options': options,
             'sla': {'jitter': 0.0001}
         }
+        result = {}
 
         sample_output = self._read_sample_output(self.output_name_udp)
         mock_ssh.SSH().execute.return_value = (0, sample_output, '')
-        self.assertRaises(AssertionError, p.run, args)
+        self.assertRaises(AssertionError, p.run, args, result)
 
     def test_iperf_unsuccessful_script_error(self, mock_ssh):
 
@@ -150,9 +155,10 @@ class IperfTestCase(unittest.TestCase):
 
         options = {}
         args = {'options': options}
+        result = {}
 
         mock_ssh.SSH().execute.return_value = (1, '', 'FOOBAR')
-        self.assertRaises(RuntimeError, p.run, args)
+        self.assertRaises(RuntimeError, p.run, args, result)
 
     def _read_sample_output(self,filename):
         curr_path = os.path.dirname(os.path.abspath(__file__))
index d5c1991..4bb5983 100755 (executable)
@@ -48,11 +48,12 @@ class NetperfTestCase(unittest.TestCase):
 
         options = {}
         args = {'options': options}
+        result = {}
 
         sample_output = self._read_sample_output()
         mock_ssh.SSH().execute.return_value = (0, sample_output, '')
         expected_result = json.loads(sample_output)
-        result = p.run(args)
+        p.run(args, result)
         self.assertEqual(result, expected_result)
 
     def test_netperf_successful_sla(self, mock_ssh):
@@ -66,11 +67,12 @@ class NetperfTestCase(unittest.TestCase):
             'options': options,
             'sla': {'mean_latency': 100}
         }
+        result = {}
 
         sample_output = self._read_sample_output()
         mock_ssh.SSH().execute.return_value = (0, sample_output, '')
         expected_result = json.loads(sample_output)
-        result = p.run(args)
+        p.run(args, result)
         self.assertEqual(result, expected_result)
 
     def test_netperf_unsuccessful_sla(self, mock_ssh):
@@ -84,10 +86,11 @@ class NetperfTestCase(unittest.TestCase):
             'options': options,
             'sla': {'mean_latency': 5}
         }
+        result = {}
 
         sample_output = self._read_sample_output()
         mock_ssh.SSH().execute.return_value = (0, sample_output, '')
-        self.assertRaises(AssertionError, p.run, args)
+        self.assertRaises(AssertionError, p.run, args, result)
 
     def test_netperf_unsuccessful_script_error(self, mock_ssh):
 
@@ -97,9 +100,10 @@ class NetperfTestCase(unittest.TestCase):
 
         options = {}
         args = {'options': options}
+        result = {}
 
         mock_ssh.SSH().execute.return_value = (1, '', 'FOOBAR')
-        self.assertRaises(RuntimeError, p.run, args)
+        self.assertRaises(RuntimeError, p.run, args, result)
 
     def _read_sample_output(self):
         curr_path = os.path.dirname(os.path.abspath(__file__))
index d930adc..b2c5b98 100644 (file)
@@ -35,10 +35,11 @@ class PingTestCase(unittest.TestCase):
             'options': {'packetsize': 200},
             'ipaddr': '172.16.0.138'
             }
+        result = {}
 
         mock_ssh.SSH().execute.return_value = (0, '100', '')
-        result = p.run(args)
-        self.assertEqual(result, float(mock_ssh.SSH().execute.return_value[1]))
+        p.run(args, result)
+        self.assertEqual(result, {'rtt': 100.0})
 
     @mock.patch('yardstick.benchmark.scenarios.networking.ping.ssh')
     def test_ping_successful_sla(self, mock_ssh):
@@ -50,10 +51,11 @@ class PingTestCase(unittest.TestCase):
             'ipaddr': '172.16.0.138',
             'sla': {'max_rtt': 150}
             }
+        result = {}
 
         mock_ssh.SSH().execute.return_value = (0, '100', '')
-        result = p.run(args)
-        self.assertEqual(result, float(mock_ssh.SSH().execute.return_value[1]))
+        p.run(args, result)
+        self.assertEqual(result, {'rtt': 100.0})
 
     @mock.patch('yardstick.benchmark.scenarios.networking.ping.ssh')
     def test_ping_unsuccessful_sla(self, mock_ssh):
@@ -65,9 +67,10 @@ class PingTestCase(unittest.TestCase):
             'ipaddr': '172.16.0.138',
             'sla': {'max_rtt': 50}
             }
+        result = {}
 
         mock_ssh.SSH().execute.return_value = (0, '100', '')
-        self.assertRaises(AssertionError, p.run, args)
+        self.assertRaises(AssertionError, p.run, args, result)
 
     @mock.patch('yardstick.benchmark.scenarios.networking.ping.ssh')
     def test_ping_unsuccessful_script_error(self, mock_ssh):
@@ -79,9 +82,10 @@ class PingTestCase(unittest.TestCase):
             'ipaddr': '172.16.0.138',
             'sla': {'max_rtt': 50}
             }
+        result = {}
 
         mock_ssh.SSH().execute.return_value = (1, '', 'FOOBAR')
-        self.assertRaises(RuntimeError, p.run, args)
+        self.assertRaises(RuntimeError, p.run, args, result)
 
 
 def main():
index a20382c..ae4481f 100644 (file)
@@ -113,6 +113,8 @@ class PktgenTestCase(unittest.TestCase):
             'options': {'packetsize': 60, 'number_of_ports': 10},
             'ipaddr': '172.16.0.139'
         }
+        result = {}
+
         p.server = mock_ssh.SSH()
         p.client = mock_ssh.SSH()
 
@@ -124,7 +126,7 @@ class PktgenTestCase(unittest.TestCase):
             "packets_sent": 149776, "flows": 110}'
         mock_ssh.SSH().execute.return_value = (0, sample_output, '')
 
-        result = p.run(args)
+        p.run(args, result)
         expected_result = json.loads(sample_output)
         expected_result["packets_received"] = 149300
         self.assertEqual(result, expected_result)
@@ -137,6 +139,7 @@ class PktgenTestCase(unittest.TestCase):
             'ipaddr': '172.16.0.139',
             'sla': {'max_ppm': 10000}
         }
+        result = {}
         p.server = mock_ssh.SSH()
         p.client = mock_ssh.SSH()
 
@@ -148,7 +151,7 @@ class PktgenTestCase(unittest.TestCase):
             "packets_sent": 149776, "flows": 110}'
         mock_ssh.SSH().execute.return_value = (0, sample_output, '')
 
-        result = p.run(args)
+        p.run(args, result)
         expected_result = json.loads(sample_output)
         expected_result["packets_received"] = 149300
         self.assertEqual(result, expected_result)
@@ -161,6 +164,8 @@ class PktgenTestCase(unittest.TestCase):
             'ipaddr': '172.16.0.139',
             'sla': {'max_ppm': 1000}
         }
+        result = {}
+
         p.server = mock_ssh.SSH()
         p.client = mock_ssh.SSH()
 
@@ -171,7 +176,7 @@ class PktgenTestCase(unittest.TestCase):
         sample_output = '{"packets_per_second": 9753, "errors": 0, \
             "packets_sent": 149776, "flows": 110}'
         mock_ssh.SSH().execute.return_value = (0, sample_output, '')
-        self.assertRaises(AssertionError, p.run, args)
+        self.assertRaises(AssertionError, p.run, args, result)
 
     def test_pktgen_unsuccessful_script_error(self, mock_ssh):
 
@@ -181,11 +186,13 @@ class PktgenTestCase(unittest.TestCase):
             'ipaddr': '172.16.0.139',
             'sla': {'max_ppm': 1000}
         }
+        result = {}
+
         p.server = mock_ssh.SSH()
         p.client = mock_ssh.SSH()
 
         mock_ssh.SSH().execute.return_value = (1, '', 'FOOBAR')
-        self.assertRaises(RuntimeError, p.run, args)
+        self.assertRaises(RuntimeError, p.run, args, result)
 
 
 def main():
index 6d38e9c..b47aed9 100644 (file)
@@ -60,12 +60,14 @@ class FioTestCase(unittest.TestCase):
             'ramp_time': 10
         }
         args = {'options': options}
+        result = {}
+
         p.client = mock_ssh.SSH()
 
         sample_output = self._read_sample_output(self.sample_output['rw'])
         mock_ssh.SSH().execute.return_value = (0, sample_output, '')
 
-        result = p.run(args)
+        p.run(args, result)
 
         expected_result = '{"read_bw": 83888, "read_iops": 20972,' \
             '"read_lat": 236.8, "write_bw": 84182, "write_iops": 21045,'\
@@ -83,12 +85,14 @@ class FioTestCase(unittest.TestCase):
             'ramp_time': 10
         }
         args = {'options': options}
+        result = {}
+
         p.client = mock_ssh.SSH()
 
         sample_output = self._read_sample_output(self.sample_output['read'])
         mock_ssh.SSH().execute.return_value = (0, sample_output, '')
 
-        result = p.run(args)
+        p.run(args, result)
 
         expected_result = '{"read_bw": 36113, "read_iops": 9028,' \
             '"read_lat": 108.7}'
@@ -105,12 +109,14 @@ class FioTestCase(unittest.TestCase):
             'ramp_time': 10
         }
         args = {'options': options}
+        result = {}
+
         p.client = mock_ssh.SSH()
 
         sample_output = self._read_sample_output(self.sample_output['write'])
         mock_ssh.SSH().execute.return_value = (0, sample_output, '')
 
-        result = p.run(args)
+        p.run(args, result)
 
         expected_result = '{"write_bw": 35107, "write_iops": 8776,'\
             '"write_lat": 111.74}'
@@ -130,13 +136,14 @@ class FioTestCase(unittest.TestCase):
             'options': options,
             'sla': {'write_lat': 300.1}
         }
+        result = {}
 
         p.client = mock_ssh.SSH()
 
         sample_output = self._read_sample_output(self.sample_output['rw'])
         mock_ssh.SSH().execute.return_value = (0, sample_output, '')
 
-        result = p.run(args)
+        p.run(args, result)
 
         expected_result = '{"read_bw": 83888, "read_iops": 20972,' \
             '"read_lat": 236.8, "write_bw": 84182, "write_iops": 21045,'\
@@ -158,12 +165,13 @@ class FioTestCase(unittest.TestCase):
             'options': options,
             'sla': {'write_lat': 200.1}
         }
+        result = {}
 
         p.client = mock_ssh.SSH()
 
         sample_output = self._read_sample_output(self.sample_output['rw'])
         mock_ssh.SSH().execute.return_value = (0, sample_output, '')
-        self.assertRaises(AssertionError, p.run, args)
+        self.assertRaises(AssertionError, p.run, args, result)
 
     def test_fio_successful_bw_iops_sla(self, mock_ssh):
 
@@ -178,13 +186,14 @@ class FioTestCase(unittest.TestCase):
             'options': options,
             'sla': {'read_iops': 20000}
         }
+        result = {}
 
         p.client = mock_ssh.SSH()
 
         sample_output = self._read_sample_output(self.sample_output['rw'])
         mock_ssh.SSH().execute.return_value = (0, sample_output, '')
 
-        result = p.run(args)
+        p.run(args, result)
 
         expected_result = '{"read_bw": 83888, "read_iops": 20972,' \
             '"read_lat": 236.8, "write_bw": 84182, "write_iops": 21045,'\
@@ -205,12 +214,13 @@ class FioTestCase(unittest.TestCase):
             'options': options,
             'sla': {'read_iops': 30000}
         }
+        result = {}
 
         p.client = mock_ssh.SSH()
 
         sample_output = self._read_sample_output(self.sample_output['rw'])
         mock_ssh.SSH().execute.return_value = (0, sample_output, '')
-        self.assertRaises(AssertionError, p.run, args)
+        self.assertRaises(AssertionError, p.run, args, result)
 
     def test_fio_unsuccessful_script_error(self, mock_ssh):
 
@@ -222,10 +232,12 @@ class FioTestCase(unittest.TestCase):
             'ramp_time': 10
         }
         args = {'options': options}
+        result = {}
+
         p.client = mock_ssh.SSH()
 
         mock_ssh.SSH().execute.return_value = (1, '', 'FOOBAR')
-        self.assertRaises(RuntimeError, p.run, args)
+        self.assertRaises(RuntimeError, p.run, args, result)
 
     def _read_sample_output(self, file_name):
         curr_path = os.path.dirname(os.path.abspath(__file__))
index 3f5b640..68c8bfd 100755 (executable)
@@ -63,7 +63,7 @@ def _worker_process(queue, cls, method_name, scenario_cfg):
         errors = ""
 
         try:
-            data = method(scenario_cfg)
+            method(scenario_cfg, data)
         except AssertionError as assertion:
             # SLA validation failed in scenario, determine what to do now
             if sla_action == "assert":
index af5aae8..e4ad037 100644 (file)
@@ -54,7 +54,7 @@ def _worker_process(queue, cls, method_name, scenario_cfg):
         errors = ""
 
         try:
-            data = method(scenario_cfg)
+            method(scenario_cfg, data)
         except AssertionError as assertion:
             # SLA validation failed in scenario, determine what to do now
             if sla_action == "assert":
index 3a6b2e1..b6d861d 100755 (executable)
@@ -53,7 +53,7 @@ def _worker_process(queue, cls, method_name, scenario_cfg):
         errors = ""
 
         try:
-            data = method(scenario_cfg)
+            method(scenario_cfg, data)
         except AssertionError as assertion:
             # SLA validation failed in scenario, determine what to do now
             if sla_action == "assert":
index ac8fe14..29f86e1 100644 (file)
@@ -63,7 +63,7 @@ def _worker_process(queue, cls, method_name, scenario_cfg):
         errors = ""
 
         try:
-            data = method(scenario_cfg)
+            method(scenario_cfg, data)
         except AssertionError as assertion:
             # SLA validation failed in scenario, determine what to do now
             if sla_action == "assert":
index aaa98b8..595986f 100644 (file)
@@ -78,7 +78,7 @@ class Cyclictest(base.Scenario):
 
         self.setup_done = True
 
-    def run(self, args):
+    def run(self, args, result):
         """execute the benchmark"""
         default_args = "-m -n -q"
 
@@ -102,19 +102,20 @@ class Cyclictest(base.Scenario):
         if status:
             raise RuntimeError(stderr)
 
-        data = json.loads(stdout)
+        result.update(json.loads(stdout))
 
         if "sla" in args:
-            for t, latency in data.items():
+            sla_error = ""
+            for t, latency in result.items():
                 if 'max_%s_latency' % t not in args['sla']:
                     continue
 
                 sla_latency = int(args['sla']['max_%s_latency' % t])
                 latency = int(latency)
-                assert latency <= sla_latency, "%s latency %d > " \
-                    "sla:max_%s_latency(%d)" % (t, latency, t, sla_latency)
-
-        return data
+                if latency > sla_latency:
+                    sla_error += "%s latency %d > sla:max_%s_latency(%d); " % \
+                        (t, latency, t, sla_latency)
+            assert sla_error == "", sla_error
 
 
 def _test():
index 3677391..d2558c9 100644 (file)
@@ -58,7 +58,7 @@ class Lmbench(base.Scenario):
 
         self.setup_done = True
 
-    def run(self, args):
+    def run(self, args, result):
         """execute the benchmark"""
 
         if not self.setup_done:
@@ -75,16 +75,17 @@ class Lmbench(base.Scenario):
         if status:
             raise RuntimeError(stderr)
 
-        data = json.loads(stdout)
+        result.update(json.loads(stdout))
 
         if "sla" in args:
+            sla_error = ""
             sla_max_latency = int(args['sla']['max_latency'])
-            for result in data:
-                latency = result['latency']
-                assert latency <= sla_max_latency, "latency %f > " \
-                    "sla:max_latency(%f)" % (latency, sla_max_latency)
-
-        return data
+            for t_latency in result:
+                latency = t_latency['latency']
+                if latency > sla_max_latency:
+                    sla_error += "latency %f > sla:max_latency(%f); " \
+                        % (latency, sla_max_latency)
+            assert sla_error == "", sla_error
 
 
 def _test():
index a874ea9..281bd8e 100644 (file)
@@ -58,7 +58,7 @@ class Perf(base.Scenario):
 
         self.setup_done = True
 
-    def run(self, args):
+    def run(self, args, result):
         """execute the benchmark"""
 
         if not self.setup_done:
@@ -96,23 +96,22 @@ class Perf(base.Scenario):
         if status:
             raise RuntimeError(stdout)
 
-        output = json.loads(stdout)
+        result.update(json.loads(stdout))
 
         if "sla" in args:
             metric = args['sla']['metric']
             exp_val = args['sla']['expected_value']
             smaller_than_exp = 'smaller_than_expected' in args['sla']
 
-            if metric not in output:
+            if metric not in result:
                 assert False, "Metric (%s) not found." % metric
             else:
                 if smaller_than_exp:
-                    assert output[metric] < exp_val, "%s %d >= %d (sla)" \
-                        % (metric, output[metric], exp_val)
+                    assert result[metric] < exp_val, "%s %d >= %d (sla); " \
+                        % (metric, result[metric], exp_val)
                 else:
-                    assert output[metric] >= exp_val, "%s %d < %d (sla)" \
-                        % (metric, output[metric], exp_val)
-        return output
+                    assert result[metric] >= exp_val, "%s %d < %d (sla); " \
+                        % (metric, result[metric], exp_val)
 
 
 def _test():
index e31a892..a324c5b 100644 (file)
@@ -82,7 +82,7 @@ For more info see http://software.es.net/iperf
             LOG.warn(stderr)
         self.target.close()
 
-    def run(self, args):
+    def run(self, args, result):
         """execute the benchmark"""
 
         # if run by a duration runner, get the duration time and setup as arg
@@ -122,7 +122,7 @@ For more info see http://software.es.net/iperf
             # error cause in json dict on stdout
             raise RuntimeError(stdout)
 
-        output = json.loads(stdout)
+        result.update(json.loads(stdout))
 
         if "sla" in args:
             sla_iperf = args["sla"]
@@ -131,21 +131,19 @@ For more info see http://software.es.net/iperf
 
                 # convert bits per second to bytes per second
                 bit_per_second = \
-                    int(output["end"]["sum_received"]["bits_per_second"])
+                    int(result["end"]["sum_received"]["bits_per_second"])
                 bytes_per_second = bit_per_second / 8
                 assert bytes_per_second >= sla_bytes_per_second, \
-                    "bytes_per_second %d < sla:bytes_per_second (%d)" % \
+                    "bytes_per_second %d < sla:bytes_per_second (%d)" % \
                     (bytes_per_second, sla_bytes_per_second)
             else:
                 sla_jitter = float(sla_iperf["jitter"])
 
-                jitter_ms = float(output["end"]["sum"]["jitter_ms"])
+                jitter_ms = float(result["end"]["sum"]["jitter_ms"])
                 assert jitter_ms <= sla_jitter, \
-                    "jitter_ms  %f > sla:jitter %f" % \
+                    "jitter_ms  %f > sla:jitter %f" % \
                     (jitter_ms, sla_jitter)
 
-        return output
-
 
 def _test():
     '''internal test function'''
index 3121fda..fb54970 100755 (executable)
@@ -79,7 +79,7 @@ class Netperf(base.Scenario):
 
         self.setup_done = True
 
-    def run(self, args):
+    def run(self, args, result):
         """execute the benchmark"""
 
         if not self.setup_done:
@@ -118,21 +118,20 @@ class Netperf(base.Scenario):
         if status:
             raise RuntimeError(stderr)
 
-        data = json.loads(stdout)
-        if data['mean_latency'] == '':
+        result.update(json.loads(stdout))
+
+        if result['mean_latency'] == '':
             raise RuntimeError(stdout)
 
         # sla check
-        mean_latency = float(data['mean_latency'])
+        mean_latency = float(result['mean_latency'])
         if "sla" in args:
             sla_max_mean_latency = int(args["sla"]["mean_latency"])
 
             assert mean_latency <= sla_max_mean_latency, \
-                "mean_latency %f > sla_max_mean_latency(%f)" % \
+                "mean_latency %f > sla_max_mean_latency(%f)" % \
                 (mean_latency, sla_max_mean_latency)
 
-        return data
-
 
 def _test():
     '''internal test function'''
index 41395d8..1096435 100644 (file)
@@ -45,7 +45,7 @@ class Ping(base.Scenario):
         self.connection = ssh.SSH(user, host, key_filename=key_filename)
         self.connection.wait()
 
-    def run(self, args):
+    def run(self, args, result):
         """execute the benchmark"""
 
         if "options" in args:
@@ -64,11 +64,9 @@ class Ping(base.Scenario):
         if exit_status != 0:
             raise RuntimeError(stderr)
 
-        rtt = float(stdout)
+        result["rtt"] = float(stdout)
 
         if "sla" in args:
             sla_max_rtt = int(args["sla"]["max_rtt"])
-            assert rtt <= sla_max_rtt, "rtt %f > sla:max_rtt(%f)" % \
-                (rtt, sla_max_rtt)
-
-        return rtt
+            assert result["rtt"] <= sla_max_rtt, "rtt %f > sla:max_rtt(%f); " % \
+                (result["rtt"], sla_max_rtt)
index cc28b51..f373fd2 100644 (file)
@@ -86,7 +86,7 @@ class Pktgen(base.Scenario):
             raise RuntimeError(stderr)
         return int(stdout)
 
-    def run(self, args):
+    def run(self, args, result):
         """execute the benchmark"""
 
         if not self.setup_done:
@@ -119,20 +119,18 @@ class Pktgen(base.Scenario):
         if status:
             raise RuntimeError(stderr)
 
-        data = json.loads(stdout)
+        result.update(json.loads(stdout))
 
-        data['packets_received'] = self._iptables_get_result()
+        result['packets_received'] = self._iptables_get_result()
 
         if "sla" in args:
-            sent = data['packets_sent']
-            received = data['packets_received']
+            sent = result['packets_sent']
+            received = result['packets_received']
             ppm = 1000000 * (sent - received) / sent
             sla_max_ppm = int(args["sla"]["max_ppm"])
-            assert ppm <= sla_max_ppm, "ppm %d > sla_max_ppm %d" \
+            assert ppm <= sla_max_ppm, "ppm %d > sla_max_ppm %d" \
                 % (ppm, sla_max_ppm)
 
-        return data
-
 
 def _test():
     '''internal test function'''
index 1107a8b..af90b07 100644 (file)
@@ -71,11 +71,10 @@ class Fio(base.Scenario):
 
         self.setup_done = True
 
-    def run(self, args):
+    def run(self, args, result):
         """execute the benchmark"""
         default_args = "-ioengine=libaio -direct=1 -group_reporting " \
             "-numjobs=1 -time_based --output-format=json"
-        result = {}
 
         if not self.setup_done:
             self.setup()
@@ -124,6 +123,7 @@ class Fio(base.Scenario):
             result["write_lat"] = raw_data["jobs"][0]["write"]["lat"]["mean"]
 
         if "sla" in args:
+            sla_error = ""
             for k, v in result.items():
                 if k not in args['sla']:
                     continue
@@ -131,15 +131,16 @@ class Fio(base.Scenario):
                 if "lat" in k:
                     # For lattency small value is better
                     max_v = float(args['sla'][k])
-                    assert v <= max_v, "%s %f > " \
-                        "sla:%s(%f)" % (k, v, k, max_v)
+                    if v > max_v:
+                        sla_error += "%s %f > sla:%s(%f); " % (k, v, k, max_v)
                 else:
                     # For bandwidth and iops big value is better
                     min_v = int(args['sla'][k])
-                    assert v >= min_v, "%s %d < " \
-                        "sla:%s(%d)" % (k, v, k, min_v)
+                    if v < min_v:
+                        sla_error += "%s %d < " \
+                            "sla:%s(%d); " % (k, v, k, min_v)
 
-        return result
+            assert sla_error == "", sla_error
 
 
 def _test():
index 0455386..91dd521 100644 (file)
@@ -125,7 +125,7 @@ class Plotter(object):
 
     def _plot_ping(self, records):
         '''ping test result interpretation and visualization on the graph'''
-        rtts = [r['benchmark']['data'] for r in records]
+        rtts = [r['benchmark']['data']['rtt'] for r in records]
         seqs = [r['benchmark']['sequence'] for r in records]
 
         for i in range(0, len(rtts)):