standardize ssh auth
[yardstick.git] / yardstick / benchmark / scenarios / compute / perf.py
index 62b4297..0b8ed9b 100644 (file)
@@ -6,15 +6,18 @@
 # which accompanies this distribution, and is available at
 # http://www.apache.org/licenses/LICENSE-2.0
 ##############################################################################
-import pkg_resources
+from __future__ import absolute_import
+from __future__ import print_function
+
 import logging
-import json
+
+import pkg_resources
+from oslo_serialization import jsonutils
 
 import yardstick.ssh as ssh
 from yardstick.benchmark.scenarios import base
 
 LOG = logging.getLogger(__name__)
-LOG.setLevel(logging.DEBUG)
 
 
 class Perf(base.Scenario):
@@ -37,35 +40,32 @@ class Perf(base.Scenario):
 
     TARGET_SCRIPT = 'perf_benchmark.bash'
 
-    def __init__(self, context):
-        self.context = context
+    def __init__(self, scenario_cfg, context_cfg):
+        self.scenario_cfg = scenario_cfg
+        self.context_cfg = context_cfg
         self.setup_done = False
 
     def setup(self):
         """scenario setup"""
         self.target_script = pkg_resources.resource_filename(
             'yardstick.benchmark.scenarios.compute', Perf.TARGET_SCRIPT)
-        user = self.context.get('user', 'ubuntu')
-        host = self.context.get('host', None)
-        key_filename = self.context.get('key_filename', '~/.ssh/id_rsa')
+        host = self.context_cfg['host']
 
-        LOG.debug("user:%s, host:%s", user, host)
-        self.client = ssh.SSH(user, host, key_filename=key_filename)
+        self.client = ssh.SSH.from_node(host, defaults={"user": "ubuntu"})
         self.client.wait(timeout=600)
 
         # copy script to host
-        self.client.run("cat > ~/perf_benchmark.sh",
-                        stdin=open(self.target_script, "rb"))
+        self.client._put_file_shell(self.target_script, '~/perf_benchmark.sh')
 
         self.setup_done = True
 
-    def run(self, args):
+    def run(self, result):
         """execute the benchmark"""
 
         if not self.setup_done:
             self.setup()
 
-        options = args['options']
+        options = self.scenario_cfg['options']
         events = options.get('events', ['task-clock'])
 
         events_string = ""
@@ -73,7 +73,8 @@ class Perf(base.Scenario):
             events_string += event + " "
 
         # if run by a duration runner
-        duration_time = self.context.get("duration", None)
+        duration_time = self.scenario_cfg["runner"].get("duration", None) \
+            if "runner" in self.scenario_cfg else None
         # if run by an arithmetic runner
         arithmetic_time = options.get("duration", None)
         if duration_time:
@@ -97,44 +98,48 @@ class Perf(base.Scenario):
         if status:
             raise RuntimeError(stdout)
 
-        output = json.loads(stdout)
+        result.update(jsonutils.loads(stdout))
 
-        if "sla" in args:
-            metric = args['sla']['metric']
-            exp_val = args['sla']['expected_value']
-            smaller_than_exp = 'smaller_than_expected' in args['sla']
+        if "sla" in self.scenario_cfg:
+            metric = self.scenario_cfg['sla']['metric']
+            exp_val = self.scenario_cfg['sla']['expected_value']
+            smaller_than_exp = 'smaller_than_expected' \
+                               in self.scenario_cfg['sla']
 
-            if metric not in output:
+            if metric not in result:
                 assert False, "Metric (%s) not found." % metric
             else:
                 if smaller_than_exp:
-                    assert output[metric] < exp_val, "%s %d >= %d (sla)" \
-                        % (metric, output[metric], exp_val)
+                    assert result[metric] < exp_val, "%s %d >= %d (sla); " \
+                        % (metric, result[metric], exp_val)
                 else:
-                    assert output[metric] >= exp_val, "%s %d < %d (sla)" \
-                        % (metric, output[metric], exp_val)
-        return output
+                    assert result[metric] >= exp_val, "%s %d < %d (sla); " \
+                        % (metric, result[metric], exp_val)
 
 
 def _test():
     """internal test function"""
     key_filename = pkg_resources.resource_filename('yardstick.resources',
                                                    'files/yardstick_key')
-    ctx = {'host': '172.16.0.137',
-           'user': 'ubuntu',
-           'key_filename': key_filename
-           }
+    ctx = {
+        'host': {
+            'ip': '10.229.47.137',
+            'user': 'root',
+            'key_filename': key_filename
+        }
+    }
 
     logger = logging.getLogger('yardstick')
     logger.setLevel(logging.DEBUG)
 
-    p = Perf(ctx)
-
     options = {'load': True}
     args = {'options': options}
+    result = {}
+
+    p = Perf(args, ctx)
+    p.run(result)
+    print(result)
 
-    result = p.run(args)
-    print result
 
 if __name__ == '__main__':
     _test()