Split Yardstick CLI with Yardstick core logic 77/26577/4
authorchenjiankun <chenjiankun1@huawei.com>
Thu, 29 Dec 2016 11:45:12 +0000 (11:45 +0000)
committerchenjiankun <chenjiankun1@huawei.com>
Fri, 30 Dec 2016 16:55:11 +0000 (16:55 +0000)
JIRA: YARDSTICK-511

We need to unify yardstick entry. Now the solution is using CLI call API
as nova do.
This is the first step: coupling the yardstick core logic from CLI.
Moving the core logic to yardstick/benchmark/core and the CLI using a
object to call yardstick core logic.

Change-Id: I84f10d2134635880c281cc63212a8533f2dd7d4e
Signed-off-by: chenjiankun <chenjiankun1@huawei.com>
20 files changed:
tests/unit/benchmark/core/__init__.py [moved from tests/unit/cmd/commands/__init__.py with 100% similarity]
tests/unit/benchmark/core/no_constraint_no_args_scenario_sample.yaml [moved from tests/unit/cmd/commands/no_constraint_no_args_scenario_sample.yaml with 100% similarity]
tests/unit/benchmark/core/no_constraint_with_args_scenario_sample.yaml [moved from tests/unit/cmd/commands/no_constraint_with_args_scenario_sample.yaml with 100% similarity]
tests/unit/benchmark/core/test_plugin.py [moved from tests/unit/cmd/commands/test_plugin.py with 80% similarity]
tests/unit/benchmark/core/test_task.py [moved from tests/unit/cmd/commands/test_task.py with 69% similarity]
tests/unit/benchmark/core/test_testcase.py [moved from tests/unit/cmd/commands/test_testcase.py with 62% similarity]
tests/unit/benchmark/core/with_constraint_no_args_scenario_sample.yaml [moved from tests/unit/cmd/commands/with_constraint_no_args_scenario_sample.yaml with 100% similarity]
tests/unit/benchmark/core/with_constraint_with_args_scenario_sample.yaml [moved from tests/unit/cmd/commands/with_constraint_with_args_scenario_sample.yaml with 100% similarity]
yardstick/benchmark/core/__init__.py [new file with mode: 0644]
yardstick/benchmark/core/plugin.py [new file with mode: 0644]
yardstick/benchmark/core/runner.py [new file with mode: 0644]
yardstick/benchmark/core/scenario.py [new file with mode: 0644]
yardstick/benchmark/core/task.py [new file with mode: 0644]
yardstick/benchmark/core/testcase.py [new file with mode: 0644]
yardstick/cmd/commands/__init__.py
yardstick/cmd/commands/plugin.py
yardstick/cmd/commands/runner.py
yardstick/cmd/commands/scenario.py
yardstick/cmd/commands/task.py
yardstick/cmd/commands/testcase.py

similarity index 80%
rename from tests/unit/cmd/commands/test_plugin.py
rename to tests/unit/benchmark/core/test_plugin.py
index 2e823fd..441116a 100644 (file)
@@ -9,12 +9,12 @@
 # http://www.apache.org/licenses/LICENSE-2.0
 ##############################################################################
 
-# Unittest for yardstick.cmd.commands.plugin
+# Unittest for yardstick.benchmark.core.plugin
 
 import mock
 import unittest
 
-from yardstick.cmd.commands import plugin
+from yardstick.benchmark.core import plugin
 
 
 class Arg(object):
@@ -22,30 +22,30 @@ class Arg(object):
         self.input_file = ('plugin/sample_config.yaml',)
 
 
-@mock.patch('yardstick.cmd.commands.plugin.ssh')
-class pluginCommandsTestCase(unittest.TestCase):
+@mock.patch('yardstick.benchmark.core.plugin.ssh')
+class pluginTestCase(unittest.TestCase):
 
     def setUp(self):
         self.result = {}
 
-    def test_do_install(self, mock_ssh):
-        p = plugin.PluginCommands()
+    def test_install(self, mock_ssh):
+        p = plugin.Plugin()
         mock_ssh.SSH().execute.return_value = (0, '', '')
         input_file = Arg()
-        p.do_install(input_file)
+        p.install(input_file)
         expected_result = {}
         self.assertEqual(self.result, expected_result)
 
-    def test_do_remove(self, mock_ssh):
-        p = plugin.PluginCommands()
+    def test_remove(self, mock_ssh):
+        p = plugin.Plugin()
         mock_ssh.SSH().execute.return_value = (0, '', '')
         input_file = Arg()
-        p.do_remove(input_file)
+        p.remove(input_file)
         expected_result = {}
         self.assertEqual(self.result, expected_result)
 
     def test_install_setup_run(self, mock_ssh):
-        p = plugin.PluginCommands()
+        p = plugin.Plugin()
         mock_ssh.SSH().execute.return_value = (0, '', '')
         plugins = {
             "name": "sample"
@@ -64,7 +64,7 @@ class pluginCommandsTestCase(unittest.TestCase):
         self.assertEqual(self.result, expected_result)
 
     def test_remove_setup_run(self, mock_ssh):
-        p = plugin.PluginCommands()
+        p = plugin.Plugin()
         mock_ssh.SSH().execute.return_value = (0, '', '')
         plugins = {
             "name": "sample"
@@ -81,3 +81,11 @@ class pluginCommandsTestCase(unittest.TestCase):
         p._run(plugin_name)
         expected_result = {}
         self.assertEqual(self.result, expected_result)
+
+
+def main():
+    unittest.main()
+
+
+if __name__ == '__main__':
+    main()
similarity index 69%
rename from tests/unit/cmd/commands/test_task.py
rename to tests/unit/benchmark/core/test_task.py
index 0177fd0..463c43e 100644 (file)
@@ -9,18 +9,18 @@
 # http://www.apache.org/licenses/LICENSE-2.0
 ##############################################################################
 
-# Unittest for yardstick.cmd.commands.task
+# Unittest for yardstick.benchmark.core.task
 
 import os
 import mock
 import unittest
 
-from yardstick.cmd.commands import task
+from yardstick.benchmark.core import task
 
 
-class TaskCommandsTestCase(unittest.TestCase):
+class TaskTestCase(unittest.TestCase):
 
-    @mock.patch('yardstick.cmd.commands.task.Context')
+    @mock.patch('yardstick.benchmark.core.task.Context')
     def test_parse_nodes_host_target_same_context(self, mock_context):
         nodes = {
             "host": "node1.LF",
@@ -38,42 +38,45 @@ class TaskCommandsTestCase(unittest.TestCase):
         self.assertEqual(context_cfg["host"], server_info)
         self.assertEqual(context_cfg["target"], server_info)
 
-    @mock.patch('yardstick.cmd.commands.task.Context')
-    @mock.patch('yardstick.cmd.commands.task.base_runner')
+    @mock.patch('yardstick.benchmark.core.task.Context')
+    @mock.patch('yardstick.benchmark.core.task.base_runner')
     def test_run(self, mock_base_runner, mock_ctx):
-        scenario = \
-            {'host': 'athena.demo',
-             'target': 'ares.demo',
-             'runner':
-                 {'duration': 60,
-                  'interval': 1,
-                  'type': 'Duration'
-                 },
-                 'type': 'Ping'}
-
-        t = task.TaskCommands()
+        scenario = {
+            'host': 'athena.demo',
+            'target': 'ares.demo',
+            'runner': {
+                'duration': 60,
+                'interval': 1,
+                'type': 'Duration'
+             },
+            'type': 'Ping'
+        }
+
+        t = task.Task()
         runner = mock.Mock()
         runner.join.return_value = 0
         mock_base_runner.Runner.get.return_value = runner
         t._run([scenario], False, "yardstick.out")
         self.assertTrue(runner.run.called)
 
-    @mock.patch('yardstick.cmd.commands.task.os')
+    @mock.patch('yardstick.benchmark.core.task.os')
     def test_check_precondition(self, mock_os):
-        cfg = \
-            {'precondition':
-                 {'installer_type': 'compass',
-                  'deploy_scenarios': 'os-nosdn',
-                  'pod_name': 'huawei-pod1'
-                 }
+        cfg = {
+            'precondition': {
+                'installer_type': 'compass',
+                'deploy_scenarios': 'os-nosdn',
+                'pod_name': 'huawei-pod1'
             }
+        }
 
         t = task.TaskParser('/opt')
-        mock_os.environ.get.side_effect = ['compass', 'os-nosdn', 'huawei-pod1']
+        mock_os.environ.get.side_effect = ['compass',
+                                           'os-nosdn',
+                                           'huawei-pod1']
         result = t._check_precondition(cfg)
         self.assertTrue(result)
 
-    @mock.patch('yardstick.cmd.commands.task.os.environ')
+    @mock.patch('yardstick.benchmark.core.task.os.environ')
     def test_parse_suite_no_constraint_no_args(self, mock_environ):
         SAMPLE_SCENARIO_PATH = "no_constraint_no_args_scenario_sample.yaml"
         t = task.TaskParser(self._get_file_abspath(SAMPLE_SCENARIO_PATH))
@@ -82,15 +85,15 @@ class TaskCommandsTestCase(unittest.TestCase):
         print ("files=%s, args=%s, fnames=%s" % (task_files, task_args,
                task_args_fnames))
         self.assertEqual(task_files[0],
-            'tests/opnfv/test_cases/opnfv_yardstick_tc037.yaml')
+                         'tests/opnfv/test_cases/opnfv_yardstick_tc037.yaml')
         self.assertEqual(task_files[1],
-            'tests/opnfv/test_cases/opnfv_yardstick_tc043.yaml')
+                         'tests/opnfv/test_cases/opnfv_yardstick_tc043.yaml')
         self.assertEqual(task_args[0], None)
         self.assertEqual(task_args[1], None)
         self.assertEqual(task_args_fnames[0], None)
         self.assertEqual(task_args_fnames[1], None)
 
-    @mock.patch('yardstick.cmd.commands.task.os.environ')
+    @mock.patch('yardstick.benchmark.core.task.os.environ')
     def test_parse_suite_no_constraint_with_args(self, mock_environ):
         SAMPLE_SCENARIO_PATH = "no_constraint_with_args_scenario_sample.yaml"
         t = task.TaskParser(self._get_file_abspath(SAMPLE_SCENARIO_PATH))
@@ -99,16 +102,16 @@ class TaskCommandsTestCase(unittest.TestCase):
         print ("files=%s, args=%s, fnames=%s" % (task_files, task_args,
                task_args_fnames))
         self.assertEqual(task_files[0],
-            'tests/opnfv/test_cases/opnfv_yardstick_tc037.yaml')
+                         'tests/opnfv/test_cases/opnfv_yardstick_tc037.yaml')
         self.assertEqual(task_files[1],
-            'tests/opnfv/test_cases/opnfv_yardstick_tc043.yaml')
+                         'tests/opnfv/test_cases/opnfv_yardstick_tc043.yaml')
         self.assertEqual(task_args[0], None)
         self.assertEqual(task_args[1],
-                        '{"host": "node1.LF","target": "node2.LF"}')
+                         '{"host": "node1.LF","target": "node2.LF"}')
         self.assertEqual(task_args_fnames[0], None)
         self.assertEqual(task_args_fnames[1], None)
 
-    @mock.patch('yardstick.cmd.commands.task.os.environ')
+    @mock.patch('yardstick.benchmark.core.task.os.environ')
     def test_parse_suite_with_constraint_no_args(self, mock_environ):
         SAMPLE_SCENARIO_PATH = "with_constraint_no_args_scenario_sample.yaml"
         t = task.TaskParser(self._get_file_abspath(SAMPLE_SCENARIO_PATH))
@@ -117,15 +120,15 @@ class TaskCommandsTestCase(unittest.TestCase):
         print ("files=%s, args=%s, fnames=%s" % (task_files, task_args,
                task_args_fnames))
         self.assertEqual(task_files[0],
-            'tests/opnfv/test_cases/opnfv_yardstick_tc037.yaml')
+                         'tests/opnfv/test_cases/opnfv_yardstick_tc037.yaml')
         self.assertEqual(task_files[1],
-            'tests/opnfv/test_cases/opnfv_yardstick_tc043.yaml')
+                         'tests/opnfv/test_cases/opnfv_yardstick_tc043.yaml')
         self.assertEqual(task_args[0], None)
         self.assertEqual(task_args[1], None)
         self.assertEqual(task_args_fnames[0], None)
         self.assertEqual(task_args_fnames[1], None)
 
-    @mock.patch('yardstick.cmd.commands.task.os.environ')
+    @mock.patch('yardstick.benchmark.core.task.os.environ')
     def test_parse_suite_with_constraint_with_args(self, mock_environ):
         SAMPLE_SCENARIO_PATH = "with_constraint_with_args_scenario_sample.yaml"
         t = task.TaskParser(self._get_file_abspath(SAMPLE_SCENARIO_PATH))
@@ -134,12 +137,12 @@ class TaskCommandsTestCase(unittest.TestCase):
         print ("files=%s, args=%s, fnames=%s" % (task_files, task_args,
                task_args_fnames))
         self.assertEqual(task_files[0],
-            'tests/opnfv/test_cases/opnfv_yardstick_tc037.yaml')
+                         'tests/opnfv/test_cases/opnfv_yardstick_tc037.yaml')
         self.assertEqual(task_files[1],
-            'tests/opnfv/test_cases/opnfv_yardstick_tc043.yaml')
+                         'tests/opnfv/test_cases/opnfv_yardstick_tc043.yaml')
         self.assertEqual(task_args[0], None)
         self.assertEqual(task_args[1],
-                        '{"host": "node1.LF","target": "node2.LF"}')
+                         '{"host": "node1.LF","target": "node2.LF"}')
         self.assertEqual(task_args_fnames[0], None)
         self.assertEqual(task_args_fnames[1], None)
 
@@ -148,3 +151,10 @@ class TaskCommandsTestCase(unittest.TestCase):
         file_path = os.path.join(curr_path, filename)
         return file_path
 
+
+def main():
+    unittest.main()
+
+
+if __name__ == '__main__':
+    main()
similarity index 62%
rename from tests/unit/cmd/commands/test_testcase.py
rename to tests/unit/benchmark/core/test_testcase.py
index c55c367..6e0473c 100644 (file)
 
 # Unittest for yardstick.cmd.commands.testcase
 
-import mock
 import unittest
 
-from yardstick.cmd.commands import testcase
-from yardstick.cmd.commands.testcase import TestcaseCommands
+from yardstick.benchmark.core import testcase
+
 
 class Arg(object):
     def __init__(self):
-        self.casename=('opnfv_yardstick_tc001',)
+        self.casename = ('opnfv_yardstick_tc001',)
+
 
-class TestcaseCommandsUT(unittest.TestCase):
+class TestcaseUT(unittest.TestCase):
 
-    def test_do_list(self):
-        t = testcase.TestcaseCommands()
-        result = t.do_list("")
+    def test_list_all(self):
+        t = testcase.Testcase()
+        result = t.list_all("")
         self.assertEqual(result, True)
 
-    def test_do_show(self):
-        t = testcase.TestcaseCommands()
+    def test_show(self):
+        t = testcase.Testcase()
         casename = Arg()
-        result = t.do_show(casename)
+        result = t.show(casename)
         self.assertEqual(result, True)
 
+
+def main():
+    unittest.main()
+
+
+if __name__ == '__main__':
+    main()
diff --git a/yardstick/benchmark/core/__init__.py b/yardstick/benchmark/core/__init__.py
new file mode 100644 (file)
index 0000000..12c83f8
--- /dev/null
@@ -0,0 +1,38 @@
+##############################################################################
+# Copyright (c) 2015 Ericsson AB and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+from __future__ import print_function
+
+
+class Param(object):
+
+    def __init__(self, kwargs):
+        # list
+        self.inputfile = kwargs.get('inputfile')
+        self.task_args = kwargs.get('task-args')
+        self.task_args_file = kwargs.get('task-args-file')
+        self.keep_deploy = kwargs.get('keep-deploy')
+        self.parse_only = kwargs.get('parse-only')
+        self.output_file = kwargs.get('output-file', '/tmp/yardstick.out')
+        self.suite = kwargs.get('suite')
+
+        # list
+        self.input_file = kwargs.get('input_file')
+
+        # list
+        self.casename = kwargs.get('casename')
+
+        # list
+        self.type = kwargs.get('type')
+
+
+def print_hbar(barlen):
+    '''print to stdout a horizontal bar'''
+    print("+"),
+    print("-" * barlen),
+    print("+")
diff --git a/yardstick/benchmark/core/plugin.py b/yardstick/benchmark/core/plugin.py
new file mode 100644 (file)
index 0000000..da12ce4
--- /dev/null
@@ -0,0 +1,212 @@
+##############################################################################
+# Copyright (c) 2016 Huawei Technologies Co.,Ltd and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+""" Handler for yardstick command 'plugin' """
+
+from __future__ import print_function
+import os
+import sys
+import yaml
+import time
+import logging
+import pkg_resources
+import yardstick.ssh as ssh
+
+from yardstick.common.task_template import TaskTemplate
+
+LOG = logging.getLogger(__name__)
+
+
+class Plugin(object):
+    """Plugin commands.
+
+       Set of commands to manage plugins.
+    """
+
+    def install(self, args):
+        """Install a plugin."""
+
+        total_start_time = time.time()
+        parser = PluginParser(args.input_file[0])
+
+        plugins, deployment = parser.parse_plugin()
+        plugin_name = plugins.get("name")
+        print("Installing plugin: %s" % plugin_name)
+
+        LOG.info("Executing _install_setup()")
+        self._install_setup(plugin_name, deployment)
+
+        LOG.info("Executing _run()")
+        self._run(plugin_name)
+
+        total_end_time = time.time()
+        LOG.info("total finished in %d secs",
+                 total_end_time - total_start_time)
+
+        print("Done, exiting")
+
+    def remove(self, args):
+        """Remove a plugin."""
+
+        total_start_time = time.time()
+        parser = PluginParser(args.input_file[0])
+
+        plugins, deployment = parser.parse_plugin()
+        plugin_name = plugins.get("name")
+        print("Removing plugin: %s" % plugin_name)
+
+        LOG.info("Executing _remove_setup()")
+        self._remove_setup(plugin_name, deployment)
+
+        LOG.info("Executing _run()")
+        self._run(plugin_name)
+
+        total_end_time = time.time()
+        LOG.info("total finished in %d secs",
+                 total_end_time - total_start_time)
+
+        print("Done, exiting")
+
+    def _install_setup(self, plugin_name, deployment):
+        """Deployment environment setup"""
+        target_script = plugin_name + ".bash"
+        self.script = pkg_resources.resource_filename(
+            'yardstick.resources', 'scripts/install/' + target_script)
+
+        deployment_user = deployment.get("user")
+        deployment_ssh_port = deployment.get("ssh_port", ssh.DEFAULT_PORT)
+        deployment_ip = deployment.get("ip", None)
+        deployment_password = deployment.get("password", None)
+        deployment_key_filename = deployment.get("key_filename",
+                                                 "/root/.ssh/id_rsa")
+
+        if deployment_ip == "local":
+            installer_ip = os.environ.get("INSTALLER_IP", None)
+
+            if deployment_password is not None:
+                self._login_via_password(deployment_user, installer_ip,
+                                         deployment_password,
+                                         deployment_ssh_port)
+            else:
+                self._login_via_key(self, deployment_user, installer_ip,
+                                    deployment_key_filename,
+                                    deployment_ssh_port)
+        else:
+            if deployment_password is not None:
+                self._login_via_password(deployment_user, deployment_ip,
+                                         deployment_password,
+                                         deployment_ssh_port)
+            else:
+                self._login_via_key(self, deployment_user, deployment_ip,
+                                    deployment_key_filename,
+                                    deployment_ssh_port)
+        # copy script to host
+        remotepath = '~/%s.sh' % plugin_name
+
+        LOG.info("copying script to host: %s", remotepath)
+        self.client._put_file_shell(self.script, remotepath)
+
+    def _remove_setup(self, plugin_name, deployment):
+        """Deployment environment setup"""
+        target_script = plugin_name + ".bash"
+        self.script = pkg_resources.resource_filename(
+            'yardstick.resources', 'scripts/remove/' + target_script)
+
+        deployment_user = deployment.get("user")
+        deployment_ssh_port = deployment.get("ssh_port", ssh.DEFAULT_PORT)
+        deployment_ip = deployment.get("ip", None)
+        deployment_password = deployment.get("password", None)
+        deployment_key_filename = deployment.get("key_filename",
+                                                 "/root/.ssh/id_rsa")
+
+        if deployment_ip == "local":
+            installer_ip = os.environ.get("INSTALLER_IP", None)
+
+            if deployment_password is not None:
+                self._login_via_password(deployment_user, installer_ip,
+                                         deployment_password,
+                                         deployment_ssh_port)
+            else:
+                self._login_via_key(self, deployment_user, installer_ip,
+                                    deployment_key_filename,
+                                    deployment_ssh_port)
+        else:
+            if deployment_password is not None:
+                self._login_via_password(deployment_user, deployment_ip,
+                                         deployment_password,
+                                         deployment_ssh_port)
+            else:
+                self._login_via_key(self, deployment_user, deployment_ip,
+                                    deployment_key_filename,
+                                    deployment_ssh_port)
+
+        # copy script to host
+        remotepath = '~/%s.sh' % plugin_name
+
+        LOG.info("copying script to host: %s", remotepath)
+        self.client._put_file_shell(self.script, remotepath)
+
+    def _login_via_password(self, user, ip, password, ssh_port):
+        LOG.info("Log in via pw, user:%s, host:%s", user, ip)
+        self.client = ssh.SSH(user, ip, password=password, port=ssh_port)
+        self.client.wait(timeout=600)
+
+    def _login_via_key(self, user, ip, key_filename, ssh_port):
+        LOG.info("Log in via key, user:%s, host:%s", user, ip)
+        self.client = ssh.SSH(user, ip, key_filename=key_filename,
+                              port=ssh_port)
+        self.client.wait(timeout=600)
+
+    def _run(self, plugin_name):
+        """Run installation script """
+        cmd = "sudo bash %s" % plugin_name + ".sh"
+
+        LOG.info("Executing command: %s", cmd)
+        status, stdout, stderr = self.client.execute(cmd)
+
+
+class PluginParser(object):
+    """Parser for plugin configration files in yaml format"""
+
+    def __init__(self, path):
+        self.path = path
+
+    def parse_plugin(self):
+        """parses the plugin file and return a plugins instance
+           and a deployment instance
+        """
+
+        print ("Parsing plugin config:", self.path)
+
+        try:
+            kw = {}
+            with open(self.path) as f:
+                try:
+                    input_plugin = f.read()
+                    rendered_plugin = TaskTemplate.render(input_plugin, **kw)
+                except Exception as e:
+                    print(("Failed to render template:\n%(plugin)s\n%(err)s\n")
+                          % {"plugin": input_plugin, "err": e})
+                    raise e
+                print(("Input plugin is:\n%s\n") % rendered_plugin)
+
+                cfg = yaml.load(rendered_plugin)
+        except IOError as ioerror:
+            sys.exit(ioerror)
+
+        self._check_schema(cfg["schema"], "plugin")
+
+        return cfg["plugins"], cfg["deployment"]
+
+    def _check_schema(self, cfg_schema, schema_type):
+        """Check if configration file is using the correct schema type"""
+
+        if cfg_schema != "yardstick:" + schema_type + ":0.1":
+            sys.exit("error: file %s has unknown schema %s" % (self.path,
+                                                               cfg_schema))
diff --git a/yardstick/benchmark/core/runner.py b/yardstick/benchmark/core/runner.py
new file mode 100644 (file)
index 0000000..e8dd21a
--- /dev/null
@@ -0,0 +1,36 @@
+##############################################################################
+# Copyright (c) 2015 Ericsson AB and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+""" Handler for yardstick command 'runner' """
+
+from yardstick.benchmark.runners.base import Runner
+from yardstick.benchmark.core import print_hbar
+
+
+class Runners(object):
+    '''Runner commands.
+
+       Set of commands to discover and display runner types.
+    '''
+
+    def list_all(self, args):
+        '''List existing runner types'''
+        types = Runner.get_types()
+        print_hbar(78)
+        print("| %-16s | %-60s" % ("Type", "Description"))
+        print_hbar(78)
+        for rtype in types:
+            print "| %-16s | %-60s" % (rtype.__execution_type__,
+                                       rtype.__doc__.split("\n")[0])
+        print_hbar(78)
+
+    def show(self, args):
+        '''Show details of a specific runner type'''
+        rtype = Runner.get_cls(args.type[0])
+        print rtype.__doc__
diff --git a/yardstick/benchmark/core/scenario.py b/yardstick/benchmark/core/scenario.py
new file mode 100644 (file)
index 0000000..e228054
--- /dev/null
@@ -0,0 +1,36 @@
+##############################################################################
+# Copyright (c) 2015 Ericsson AB and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+""" Handler for yardstick command 'scenario' """
+
+from yardstick.benchmark.scenarios.base import Scenario
+from yardstick.benchmark.core import print_hbar
+
+
+class Scenarios(object):
+    '''Scenario commands.
+
+       Set of commands to discover and display scenario types.
+    '''
+
+    def list_all(self, args):
+        '''List existing scenario types'''
+        types = Scenario.get_types()
+        print_hbar(78)
+        print("| %-16s | %-60s" % ("Type", "Description"))
+        print_hbar(78)
+        for stype in types:
+            print("| %-16s | %-60s" % (stype.__scenario_type__,
+                                       stype.__doc__.split("\n")[0]))
+        print_hbar(78)
+
+    def show(self, args):
+        '''Show details of a specific scenario type'''
+        stype = Scenario.get_cls(args.type[0])
+        print stype.__doc__
diff --git a/yardstick/benchmark/core/task.py b/yardstick/benchmark/core/task.py
new file mode 100644 (file)
index 0000000..397ba00
--- /dev/null
@@ -0,0 +1,484 @@
+##############################################################################
+# Copyright (c) 2015 Ericsson AB and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+""" Handler for yardstick command 'task' """
+
+import sys
+import os
+import yaml
+import atexit
+import ipaddress
+import time
+import logging
+import uuid
+import errno
+from itertools import ifilter
+
+from yardstick.benchmark.contexts.base import Context
+from yardstick.benchmark.runners import base as base_runner
+from yardstick.common.task_template import TaskTemplate
+from yardstick.common.utils import source_env
+from yardstick.common import constants
+
+output_file_default = "/tmp/yardstick.out"
+test_cases_dir_default = "tests/opnfv/test_cases/"
+LOG = logging.getLogger(__name__)
+
+
+class Task(object):     # pragma: no cover
+    '''Task commands.
+
+       Set of commands to manage benchmark tasks.
+    '''
+
+    def start(self, args, **kwargs):
+        '''Start a benchmark scenario.'''
+
+        atexit.register(atexit_handler)
+
+        self.task_id = kwargs.get('task_id', str(uuid.uuid4()))
+
+        check_environment()
+
+        total_start_time = time.time()
+        parser = TaskParser(args.inputfile[0])
+
+        if args.suite:
+            # 1.parse suite, return suite_params info
+            task_files, task_args, task_args_fnames = \
+                parser.parse_suite()
+        else:
+            task_files = [parser.path]
+            task_args = [args.task_args]
+            task_args_fnames = [args.task_args_file]
+
+        LOG.info("\ntask_files:%s, \ntask_args:%s, \ntask_args_fnames:%s",
+                 task_files, task_args, task_args_fnames)
+
+        if args.parse_only:
+            sys.exit(0)
+
+        if os.path.isfile(args.output_file):
+            os.remove(args.output_file)
+        # parse task_files
+        for i in range(0, len(task_files)):
+            one_task_start_time = time.time()
+            parser.path = task_files[i]
+            scenarios, run_in_parallel, meet_precondition = parser.parse_task(
+                 self.task_id, task_args[i], task_args_fnames[i])
+
+            if not meet_precondition:
+                LOG.info("meet_precondition is %s, please check envrionment",
+                         meet_precondition)
+                continue
+
+            self._run(scenarios, run_in_parallel, args.output_file)
+
+            if args.keep_deploy:
+                # keep deployment, forget about stack
+                # (hide it for exit handler)
+                Context.list = []
+            else:
+                for context in Context.list:
+                    context.undeploy()
+                Context.list = []
+            one_task_end_time = time.time()
+            LOG.info("task %s finished in %d secs", task_files[i],
+                     one_task_end_time - one_task_start_time)
+
+        total_end_time = time.time()
+        LOG.info("total finished in %d secs",
+                 total_end_time - total_start_time)
+
+        print "Done, exiting"
+
+    def _run(self, scenarios, run_in_parallel, output_file):
+        '''Deploys context and calls runners'''
+        for context in Context.list:
+            context.deploy()
+
+        background_runners = []
+
+        # Start all background scenarios
+        for scenario in ifilter(_is_background_scenario, scenarios):
+            scenario["runner"] = dict(type="Duration", duration=1000000000)
+            runner = run_one_scenario(scenario, output_file)
+            background_runners.append(runner)
+
+        runners = []
+        if run_in_parallel:
+            for scenario in scenarios:
+                if not _is_background_scenario(scenario):
+                    runner = run_one_scenario(scenario, output_file)
+                    runners.append(runner)
+
+            # Wait for runners to finish
+            for runner in runners:
+                runner_join(runner)
+                print "Runner ended, output in", output_file
+        else:
+            # run serially
+            for scenario in scenarios:
+                if not _is_background_scenario(scenario):
+                    runner = run_one_scenario(scenario, output_file)
+                    runner_join(runner)
+                    print "Runner ended, output in", output_file
+
+        # Abort background runners
+        for runner in background_runners:
+            runner.abort()
+
+        # Wait for background runners to finish
+        for runner in background_runners:
+            if runner.join(timeout=60) is None:
+                # Nuke if it did not stop nicely
+                base_runner.Runner.terminate(runner)
+                runner_join(runner)
+            else:
+                base_runner.Runner.release(runner)
+            print "Background task ended"
+
+
+# TODO: Move stuff below into TaskCommands class !?
+
+
+class TaskParser(object):       # pragma: no cover
+    '''Parser for task config files in yaml format'''
+    def __init__(self, path):
+        self.path = path
+
+    def _meet_constraint(self, task, cur_pod, cur_installer):
+        if "constraint" in task:
+            constraint = task.get('constraint', None)
+            if constraint is not None:
+                tc_fit_pod = constraint.get('pod', None)
+                tc_fit_installer = constraint.get('installer', None)
+                LOG.info("cur_pod:%s, cur_installer:%s,tc_constraints:%s",
+                         cur_pod, cur_installer, constraint)
+                if cur_pod and tc_fit_pod and cur_pod not in tc_fit_pod:
+                    return False
+                if cur_installer and tc_fit_installer and \
+                        cur_installer not in tc_fit_installer:
+                    return False
+        return True
+
+    def _get_task_para(self, task, cur_pod):
+        task_args = task.get('task_args', None)
+        if task_args is not None:
+            task_args = task_args.get(cur_pod, None)
+        task_args_fnames = task.get('task_args_fnames', None)
+        if task_args_fnames is not None:
+            task_args_fnames = task_args_fnames.get(cur_pod, None)
+        return task_args, task_args_fnames
+
+    def parse_suite(self):
+        '''parse the suite file and return a list of task config file paths
+           and lists of optional parameters if present'''
+        LOG.info("\nParsing suite file:%s", self.path)
+
+        try:
+            with open(self.path) as stream:
+                cfg = yaml.load(stream)
+        except IOError as ioerror:
+            sys.exit(ioerror)
+
+        self._check_schema(cfg["schema"], "suite")
+        LOG.info("\nStarting scenario:%s", cfg["name"])
+
+        test_cases_dir = cfg.get("test_cases_dir", test_cases_dir_default)
+        if test_cases_dir[-1] != os.sep:
+            test_cases_dir += os.sep
+
+        cur_pod = os.environ.get('NODE_NAME', None)
+        cur_installer = os.environ.get('INSTALLER_TYPE', None)
+
+        valid_task_files = []
+        valid_task_args = []
+        valid_task_args_fnames = []
+
+        for task in cfg["test_cases"]:
+            # 1.check file_name
+            if "file_name" in task:
+                task_fname = task.get('file_name', None)
+                if task_fname is None:
+                    continue
+            else:
+                continue
+            # 2.check constraint
+            if self._meet_constraint(task, cur_pod, cur_installer):
+                valid_task_files.append(test_cases_dir + task_fname)
+            else:
+                continue
+            # 3.fetch task parameters
+            task_args, task_args_fnames = self._get_task_para(task, cur_pod)
+            valid_task_args.append(task_args)
+            valid_task_args_fnames.append(task_args_fnames)
+
+        return valid_task_files, valid_task_args, valid_task_args_fnames
+
+    def parse_task(self, task_id, task_args=None, task_args_file=None):
+        '''parses the task file and return an context and scenario instances'''
+        print "Parsing task config:", self.path
+
+        try:
+            kw = {}
+            if task_args_file:
+                with open(task_args_file) as f:
+                    kw.update(parse_task_args("task_args_file", f.read()))
+            kw.update(parse_task_args("task_args", task_args))
+        except TypeError:
+            raise TypeError()
+
+        try:
+            with open(self.path) as f:
+                try:
+                    input_task = f.read()
+                    rendered_task = TaskTemplate.render(input_task, **kw)
+                except Exception as e:
+                    print(("Failed to render template:\n%(task)s\n%(err)s\n")
+                          % {"task": input_task, "err": e})
+                    raise e
+                print(("Input task is:\n%s\n") % rendered_task)
+
+                cfg = yaml.load(rendered_task)
+        except IOError as ioerror:
+            sys.exit(ioerror)
+
+        self._check_schema(cfg["schema"], "task")
+        meet_precondition = self._check_precondition(cfg)
+
+        # TODO: support one or many contexts? Many would simpler and precise
+        # TODO: support hybrid context type
+        if "context" in cfg:
+            context_cfgs = [cfg["context"]]
+        elif "contexts" in cfg:
+            context_cfgs = cfg["contexts"]
+        else:
+            context_cfgs = [{"type": "Dummy"}]
+
+        for cfg_attrs in context_cfgs:
+            context_type = cfg_attrs.get("type", "Heat")
+            if "Heat" == context_type and "networks" in cfg_attrs:
+                # bugfix: if there are more than one network,
+                # only add "external_network" on first one.
+                # the name of netwrok should follow this rule:
+                # test, test2, test3 ...
+                # sort network with the length of network's name
+                sorted_networks = sorted(cfg_attrs["networks"].keys())
+                # config external_network based on env var
+                cfg_attrs["networks"][sorted_networks[0]]["external_network"] \
+                    = os.environ.get("EXTERNAL_NETWORK", "net04_ext")
+
+            context = Context.get(context_type)
+            context.init(cfg_attrs)
+
+        run_in_parallel = cfg.get("run_in_parallel", False)
+
+        # add tc and task id for influxdb extended tags
+        for scenario in cfg["scenarios"]:
+            task_name = os.path.splitext(os.path.basename(self.path))[0]
+            scenario["tc"] = task_name
+            scenario["task_id"] = task_id
+
+        # TODO we need something better here, a class that represent the file
+        return cfg["scenarios"], run_in_parallel, meet_precondition
+
+    def _check_schema(self, cfg_schema, schema_type):
+        '''Check if config file is using the correct schema type'''
+
+        if cfg_schema != "yardstick:" + schema_type + ":0.1":
+            sys.exit("error: file %s has unknown schema %s" % (self.path,
+                                                               cfg_schema))
+
+    def _check_precondition(self, cfg):
+        '''Check if the envrionment meet the preconditon'''
+
+        if "precondition" in cfg:
+            precondition = cfg["precondition"]
+            installer_type = precondition.get("installer_type", None)
+            deploy_scenarios = precondition.get("deploy_scenarios", None)
+            tc_fit_pods = precondition.get("pod_name", None)
+            installer_type_env = os.environ.get('INSTALL_TYPE', None)
+            deploy_scenario_env = os.environ.get('DEPLOY_SCENARIO', None)
+            pod_name_env = os.environ.get('NODE_NAME', None)
+
+            LOG.info("installer_type: %s, installer_type_env: %s",
+                     installer_type, installer_type_env)
+            LOG.info("deploy_scenarios: %s, deploy_scenario_env: %s",
+                     deploy_scenarios, deploy_scenario_env)
+            LOG.info("tc_fit_pods: %s, pod_name_env: %s",
+                     tc_fit_pods, pod_name_env)
+            if installer_type and installer_type_env:
+                if installer_type_env not in installer_type:
+                    return False
+            if deploy_scenarios and deploy_scenario_env:
+                deploy_scenarios_list = deploy_scenarios.split(',')
+                for deploy_scenario in deploy_scenarios_list:
+                    if deploy_scenario_env.startswith(deploy_scenario):
+                        return True
+                return False
+            if tc_fit_pods and pod_name_env:
+                if pod_name_env not in tc_fit_pods:
+                    return False
+        return True
+
+
+def atexit_handler():
+    '''handler for process termination'''
+    base_runner.Runner.terminate_all()
+
+    if len(Context.list) > 0:
+        print "Undeploying all contexts"
+        for context in Context.list:
+            context.undeploy()
+
+
+def is_ip_addr(addr):
+    '''check if string addr is an IP address'''
+    try:
+        ipaddress.ip_address(unicode(addr))
+        return True
+    except ValueError:
+        return False
+
+
+def _is_same_heat_context(host_attr, target_attr):
+    '''check if two servers are in the same heat context
+    host_attr: either a name for a server created by yardstick or a dict
+    with attribute name mapping when using external heat templates
+    target_attr: either a name for a server created by yardstick or a dict
+    with attribute name mapping when using external heat templates
+    '''
+    host = None
+    target = None
+    for context in Context.list:
+        if context.__context_type__ != "Heat":
+            continue
+
+        host = context._get_server(host_attr)
+        if host is None:
+            continue
+
+        target = context._get_server(target_attr)
+        if target is None:
+            return False
+
+        # Both host and target is not None, then they are in the
+        # same heat context.
+        return True
+
+    return False
+
+
+def _is_background_scenario(scenario):
+    if "run_in_background" in scenario:
+        return scenario["run_in_background"]
+    else:
+        return False
+
+
+def run_one_scenario(scenario_cfg, output_file):
+    '''run one scenario using context'''
+    runner_cfg = scenario_cfg["runner"]
+    runner_cfg['output_filename'] = output_file
+
+    # TODO support get multi hosts/vms info
+    context_cfg = {}
+    if "host" in scenario_cfg:
+        context_cfg['host'] = Context.get_server(scenario_cfg["host"])
+
+    if "target" in scenario_cfg:
+        if is_ip_addr(scenario_cfg["target"]):
+            context_cfg['target'] = {}
+            context_cfg['target']["ipaddr"] = scenario_cfg["target"]
+        else:
+            context_cfg['target'] = Context.get_server(scenario_cfg["target"])
+            if _is_same_heat_context(scenario_cfg["host"],
+                                     scenario_cfg["target"]):
+                context_cfg["target"]["ipaddr"] = \
+                    context_cfg["target"]["private_ip"]
+            else:
+                context_cfg["target"]["ipaddr"] = \
+                    context_cfg["target"]["ip"]
+
+    if "targets" in scenario_cfg:
+        ip_list = []
+        for target in scenario_cfg["targets"]:
+            if is_ip_addr(target):
+                ip_list.append(target)
+                context_cfg['target'] = {}
+            else:
+                context_cfg['target'] = Context.get_server(target)
+                if _is_same_heat_context(scenario_cfg["host"], target):
+                    ip_list.append(context_cfg["target"]["private_ip"])
+                else:
+                    ip_list.append(context_cfg["target"]["ip"])
+        context_cfg['target']['ipaddr'] = ','.join(ip_list)
+
+    if "nodes" in scenario_cfg:
+        context_cfg["nodes"] = parse_nodes_with_context(scenario_cfg)
+    runner = base_runner.Runner.get(runner_cfg)
+
+    print "Starting runner of type '%s'" % runner_cfg["type"]
+    runner.run(scenario_cfg, context_cfg)
+
+    return runner
+
+
+def parse_nodes_with_context(scenario_cfg):
+    '''paras the 'nodes' fields in scenario '''
+    nodes = scenario_cfg["nodes"]
+
+    nodes_cfg = {}
+    for nodename in nodes:
+        nodes_cfg[nodename] = Context.get_server(nodes[nodename])
+
+    return nodes_cfg
+
+
+def runner_join(runner):
+    '''join (wait for) a runner, exit process at runner failure'''
+    status = runner.join()
+    base_runner.Runner.release(runner)
+    if status != 0:
+        sys.exit("Runner failed")
+
+
+def print_invalid_header(source_name, args):
+    print(("Invalid %(source)s passed:\n\n %(args)s\n")
+          % {"source": source_name, "args": args})
+
+
+def parse_task_args(src_name, args):
+    try:
+        kw = args and yaml.safe_load(args)
+        kw = {} if kw is None else kw
+    except yaml.parser.ParserError as e:
+        print_invalid_header(src_name, args)
+        print(("%(source)s has to be YAML. Details:\n\n%(err)s\n")
+              % {"source": src_name, "err": e})
+        raise TypeError()
+
+    if not isinstance(kw, dict):
+        print_invalid_header(src_name, args)
+        print(("%(src)s had to be dict, actually %(src_type)s\n")
+              % {"src": src_name, "src_type": type(kw)})
+        raise TypeError()
+    return kw
+
+
+def check_environment():
+    auth_url = os.environ.get('OS_AUTH_URL', None)
+    if not auth_url:
+        try:
+            source_env(constants.OPENSTACK_RC_FILE)
+        except IOError as e:
+            if e.errno != errno.EEXIST:
+                raise
+            LOG.debug('OPENRC file not found')
diff --git a/yardstick/benchmark/core/testcase.py b/yardstick/benchmark/core/testcase.py
new file mode 100644 (file)
index 0000000..d292ad2
--- /dev/null
@@ -0,0 +1,112 @@
+##############################################################################
+# Copyright (c) 2015 Huawei Technologies Co.,Ltd and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+""" Handler for yardstick command 'testcase' """
+import os
+import yaml
+import sys
+
+from yardstick.benchmark.core import print_hbar
+from yardstick.common.task_template import TaskTemplate
+from yardstick.definitions import YARDSTICK_ROOT_PATH
+
+
+class Testcase(object):
+    '''Testcase commands.
+
+       Set of commands to discover and display test cases.
+    '''
+    def __init__(self):
+        self.test_case_path = YARDSTICK_ROOT_PATH + 'tests/opnfv/test_cases/'
+        self.testcase_list = []
+
+    def list_all(self, args):
+        '''List existing test cases'''
+
+        try:
+            testcase_files = os.listdir(self.test_case_path)
+        except Exception as e:
+            print(("Failed to list dir:\n%(path)s\n%(err)s\n")
+                  % {"path": self.test_case_path, "err": e})
+            raise e
+        testcase_files.sort()
+
+        for testcase_file in testcase_files:
+            record = self._get_record(testcase_file)
+            self.testcase_list.append(record)
+
+        self._format_print(self.testcase_list)
+        return True
+
+    def show(self, args):
+        '''Show details of a specific test case'''
+        testcase_name = args.casename[0]
+        testcase_path = self.test_case_path + testcase_name + ".yaml"
+        try:
+            with open(testcase_path) as f:
+                try:
+                    testcase_info = f.read()
+                    print testcase_info
+
+                except Exception as e:
+                    print(("Failed to load test cases:"
+                           "\n%(testcase_file)s\n%(err)s\n")
+                          % {"testcase_file": testcase_path, "err": e})
+                    raise e
+        except IOError as ioerror:
+            sys.exit(ioerror)
+        return True
+
+    def _get_record(self, testcase_file):
+
+        try:
+            with open(self.test_case_path + testcase_file) as f:
+                try:
+                    testcase_info = f.read()
+                except Exception as e:
+                    print(("Failed to load test cases:"
+                           "\n%(testcase_file)s\n%(err)s\n")
+                          % {"testcase_file": testcase_file, "err": e})
+                    raise e
+                description, installer, deploy_scenarios = \
+                    self._parse_testcase(testcase_info)
+
+                record = {'Name': testcase_file.split(".")[0],
+                          'Description': description,
+                          'installer': installer,
+                          'deploy_scenarios': deploy_scenarios}
+                return record
+        except IOError as ioerror:
+            sys.exit(ioerror)
+
+    def _parse_testcase(self, testcase_info):
+
+        kw = {}
+        rendered_testcase = TaskTemplate.render(testcase_info, **kw)
+        testcase_cfg = yaml.load(rendered_testcase)
+        test_precondition = testcase_cfg.get('precondition', None)
+        installer_type = 'all'
+        deploy_scenarios = 'all'
+        if test_precondition is not None:
+            installer_type = test_precondition.get('installer_type', 'all')
+            deploy_scenarios = test_precondition.get('deploy_scenarios', 'all')
+
+        description = testcase_info.split("\n")[2][1:].strip()
+        return description, installer_type, deploy_scenarios
+
+    def _format_print(self, testcase_list):
+        '''format output'''
+
+        print_hbar(88)
+        print("| %-21s | %-60s" % ("Testcase Name", "Description"))
+        print_hbar(88)
+        for testcase_record in testcase_list:
+            print "| %-16s | %-60s" % (testcase_record['Name'],
+                                       testcase_record['Description'])
+        print_hbar(88)
index e69de29..ba229d4 100644 (file)
@@ -0,0 +1,9 @@
+from yardstick.benchmark.core import Param
+
+
+def change_osloobj_to_paras(args):
+    param = Param({})
+    for k in param.__dict__:
+        if hasattr(args, k):
+            setattr(param, k, getattr(args, k))
+    return param
index cf66125..9409566 100644 (file)
@@ -9,18 +9,9 @@
 
 """ Handler for yardstick command 'plugin' """
 
-import os
-import sys
-import yaml
-import time
-import logging
-import pkg_resources
-import yardstick.ssh as ssh
-
+from yardstick.benchmark.core.plugin import Plugin
 from yardstick.common.utils import cliargs
-from yardstick.common.task_template import TaskTemplate
-
-LOG = logging.getLogger(__name__)
+from yardstick.cmd.commands import change_osloobj_to_paras
 
 
 class PluginCommands(object):
@@ -33,184 +24,12 @@ class PluginCommands(object):
              nargs=1)
     def do_install(self, args):
         '''Install a plugin.'''
-
-        total_start_time = time.time()
-        parser = PluginParser(args.input_file[0])
-
-        plugins, deployment = parser.parse_plugin()
-        plugin_name = plugins.get("name")
-        print("Installing plugin: %s" % plugin_name)
-
-        LOG.info("Executing _install_setup()")
-        self._install_setup(plugin_name, deployment)
-
-        LOG.info("Executing _run()")
-        self._run(plugin_name)
-
-        total_end_time = time.time()
-        LOG.info("total finished in %d secs",
-                 total_end_time - total_start_time)
-
-        print("Done, exiting")
+        param = change_osloobj_to_paras(args)
+        Plugin().install(param)
 
     @cliargs("input_file", type=str, help="path to plugin configuration file",
              nargs=1)
     def do_remove(self, args):
         '''Remove a plugin.'''
-
-        total_start_time = time.time()
-        parser = PluginParser(args.input_file[0])
-
-        plugins, deployment = parser.parse_plugin()
-        plugin_name = plugins.get("name")
-        print("Removing plugin: %s" % plugin_name)
-
-        LOG.info("Executing _remove_setup()")
-        self._remove_setup(plugin_name, deployment)
-
-        LOG.info("Executing _run()")
-        self._run(plugin_name)
-
-        total_end_time = time.time()
-        LOG.info("total finished in %d secs",
-                 total_end_time - total_start_time)
-
-        print("Done, exiting")
-
-    def _install_setup(self, plugin_name, deployment):
-        '''Deployment environment setup'''
-        target_script = plugin_name + ".bash"
-        self.script = pkg_resources.resource_filename(
-            'yardstick.resources', 'scripts/install/' + target_script)
-
-        deployment_user = deployment.get("user")
-        deployment_ssh_port = deployment.get("ssh_port", ssh.DEFAULT_PORT)
-        deployment_ip = deployment.get("ip", None)
-        deployment_password = deployment.get("password", None)
-        deployment_key_filename = deployment.get("key_filename",
-                                                 "/root/.ssh/id_rsa")
-
-        if deployment_ip == "local":
-            installer_ip = os.environ.get("INSTALLER_IP", None)
-
-            if deployment_password is not None:
-                self._login_via_password(deployment_user, installer_ip,
-                                         deployment_password,
-                                         deployment_ssh_port)
-            else:
-                self._login_via_key(self, deployment_user, installer_ip,
-                                    deployment_key_filename,
-                                    deployment_ssh_port)
-        else:
-            if deployment_password is not None:
-                self._login_via_password(deployment_user, deployment_ip,
-                                         deployment_password,
-                                         deployment_ssh_port)
-            else:
-                self._login_via_key(self, deployment_user, deployment_ip,
-                                    deployment_key_filename,
-                                    deployment_ssh_port)
-        # copy script to host
-        cmd = "cat > ~/%s.sh" % plugin_name
-
-        LOG.info("copying script to host: %s", cmd)
-        self.client.run(cmd, stdin=open(self.script, 'rb'))
-
-    def _remove_setup(self, plugin_name, deployment):
-        '''Deployment environment setup'''
-        target_script = plugin_name + ".bash"
-        self.script = pkg_resources.resource_filename(
-            'yardstick.resources', 'scripts/remove/' + target_script)
-
-        deployment_user = deployment.get("user")
-        deployment_ssh_port = deployment.get("ssh_port", ssh.DEFAULT_PORT)
-        deployment_ip = deployment.get("ip", None)
-        deployment_password = deployment.get("password", None)
-        deployment_key_filename = deployment.get("key_filename",
-                                                 "/root/.ssh/id_rsa")
-
-        if deployment_ip == "local":
-            installer_ip = os.environ.get("INSTALLER_IP", None)
-
-            if deployment_password is not None:
-                self._login_via_password(deployment_user, installer_ip,
-                                         deployment_password,
-                                         deployment_ssh_port)
-            else:
-                self._login_via_key(self, deployment_user, installer_ip,
-                                    deployment_key_filename,
-                                    deployment_ssh_port)
-        else:
-            if deployment_password is not None:
-                self._login_via_password(deployment_user, deployment_ip,
-                                         deployment_password,
-                                         deployment_ssh_port)
-            else:
-                self._login_via_key(self, deployment_user, deployment_ip,
-                                    deployment_key_filename,
-                                    deployment_ssh_port)
-
-        # copy script to host
-        cmd = "cat > ~/%s.sh" % plugin_name
-
-        LOG.info("copying script to host: %s", cmd)
-        self.client.run(cmd, stdin=open(self.script, 'rb'))
-
-    def _login_via_password(self, user, ip, password, ssh_port):
-        LOG.info("Log in via pw, user:%s, host:%s", user, ip)
-        self.client = ssh.SSH(user, ip, password=password, port=ssh_port)
-        self.client.wait(timeout=600)
-
-    def _login_via_key(self, user, ip, key_filename, ssh_port):
-        LOG.info("Log in via key, user:%s, host:%s", user, ip)
-        self.client = ssh.SSH(user, ip, key_filename=key_filename,
-                              port=ssh_port)
-        self.client.wait(timeout=600)
-
-    def _run(self, plugin_name):
-        '''Run installation script '''
-        cmd = "sudo bash %s" % plugin_name + ".sh"
-
-        LOG.info("Executing command: %s", cmd)
-        status, stdout, stderr = self.client.execute(cmd)
-
-
-class PluginParser(object):
-    '''Parser for plugin configration files in yaml format'''
-
-    def __init__(self, path):
-        self.path = path
-
-    def parse_plugin(self):
-        '''parses the plugin file and return a plugins instance
-           and a deployment instance
-        '''
-
-        print "Parsing plugin config:", self.path
-
-        try:
-            kw = {}
-            with open(self.path) as f:
-                try:
-                    input_plugin = f.read()
-                    rendered_plugin = TaskTemplate.render(input_plugin, **kw)
-                except Exception as e:
-                    print(("Failed to render template:\n%(plugin)s\n%(err)s\n")
-                          % {"plugin": input_plugin, "err": e})
-                    raise e
-                print(("Input plugin is:\n%s\n") % rendered_plugin)
-
-                cfg = yaml.load(rendered_plugin)
-        except IOError as ioerror:
-            sys.exit(ioerror)
-
-        self._check_schema(cfg["schema"], "plugin")
-
-        return cfg["plugins"], cfg["deployment"]
-
-    def _check_schema(self, cfg_schema, schema_type):
-        '''Check if configration file is using the correct schema type'''
-
-        if cfg_schema != "yardstick:" + schema_type + ":0.1":
-            sys.exit("error: file %s has unknown schema %s" % (self.path,
-                                                               cfg_schema))
+        param = change_osloobj_to_paras(args)
+        Plugin().remove(param)
index 84bc3c6..62a2082 100644 (file)
@@ -9,9 +9,9 @@
 
 """ Handler for yardstick command 'runner' """
 
-from yardstick.benchmark.runners.base import Runner
+from yardstick.benchmark.core.runner import Runners
 from yardstick.common.utils import cliargs
-from yardstick.cmd import print_hbar
+from yardstick.cmd.commands import change_osloobj_to_paras
 
 
 class RunnerCommands(object):
@@ -22,17 +22,11 @@ class RunnerCommands(object):
 
     def do_list(self, args):
         '''List existing runner types'''
-        types = Runner.get_types()
-        print_hbar(78)
-        print("| %-16s | %-60s" % ("Type", "Description"))
-        print_hbar(78)
-        for rtype in types:
-            print "| %-16s | %-60s" % (rtype.__execution_type__,
-                                       rtype.__doc__.split("\n")[0])
-        print_hbar(78)
+        param = change_osloobj_to_paras(args)
+        Runners().list_all(param)
 
     @cliargs("type", type=str, help="runner type", nargs=1)
     def do_show(self, args):
         '''Show details of a specific runner type'''
-        rtype = Runner.get_cls(args.type[0])
-        print rtype.__doc__
+        param = change_osloobj_to_paras(args)
+        Runners().show(param)
index 00d46cf..6aa3a45 100644 (file)
@@ -9,9 +9,9 @@
 
 """ Handler for yardstick command 'scenario' """
 
-from yardstick.benchmark.scenarios.base import Scenario
+from yardstick.benchmark.core.scenario import Scenarios
 from yardstick.common.utils import cliargs
-from yardstick.cmd import print_hbar
+from yardstick.cmd.commands import change_osloobj_to_paras
 
 
 class ScenarioCommands(object):
@@ -22,17 +22,11 @@ class ScenarioCommands(object):
 
     def do_list(self, args):
         '''List existing scenario types'''
-        types = Scenario.get_types()
-        print_hbar(78)
-        print("| %-16s | %-60s" % ("Type", "Description"))
-        print_hbar(78)
-        for stype in types:
-            print("| %-16s | %-60s" % (stype.__scenario_type__,
-                                       stype.__doc__.split("\n")[0]))
-        print_hbar(78)
+        param = change_osloobj_to_paras(args)
+        Scenarios().list_all(param)
 
     @cliargs("type", type=str, help="runner type", nargs=1)
     def do_show(self, args):
         '''Show details of a specific scenario type'''
-        stype = Scenario.get_cls(args.type[0])
-        print stype.__doc__
+        param = change_osloobj_to_paras(args)
+        Scenarios().show(param)
index 9524778..bd018bc 100644 (file)
@@ -8,28 +8,12 @@
 ##############################################################################
 
 """ Handler for yardstick command 'task' """
-
-import sys
-import os
-import yaml
-import atexit
-import ipaddress
-import time
-import logging
-import uuid
-import errno
-from itertools import ifilter
-
-from yardstick.benchmark.contexts.base import Context
-from yardstick.benchmark.runners import base as base_runner
-from yardstick.common.task_template import TaskTemplate
+from yardstick.benchmark.core.task import Task
 from yardstick.common.utils import cliargs
-from yardstick.common.utils import source_env
-from yardstick.common import constants
+from yardstick.cmd.commands import change_osloobj_to_paras
+
 
 output_file_default = "/tmp/yardstick.out"
-test_cases_dir_default = "tests/opnfv/test_cases/"
-LOG = logging.getLogger(__name__)
 
 
 class TaskCommands(object):
@@ -55,447 +39,5 @@ class TaskCommands(object):
     @cliargs("--suite", help="process test suite file instead of a task file",
              action="store_true")
     def do_start(self, args, **kwargs):
-        '''Start a benchmark scenario.'''
-
-        atexit.register(atexit_handler)
-
-        self.task_id = kwargs.get('task_id', str(uuid.uuid4()))
-
-        check_environment()
-
-        total_start_time = time.time()
-        parser = TaskParser(args.inputfile[0])
-
-        if args.suite:
-            # 1.parse suite, return suite_params info
-            task_files, task_args, task_args_fnames = \
-                parser.parse_suite()
-        else:
-            task_files = [parser.path]
-            task_args = [args.task_args]
-            task_args_fnames = [args.task_args_file]
-
-        LOG.info("\ntask_files:%s, \ntask_args:%s, \ntask_args_fnames:%s",
-                 task_files, task_args, task_args_fnames)
-
-        if args.parse_only:
-            sys.exit(0)
-
-        if os.path.isfile(args.output_file):
-            os.remove(args.output_file)
-        # parse task_files
-        for i in range(0, len(task_files)):
-            one_task_start_time = time.time()
-            parser.path = task_files[i]
-            scenarios, run_in_parallel, meet_precondition = parser.parse_task(
-                 self.task_id, task_args[i], task_args_fnames[i])
-
-            if not meet_precondition:
-                LOG.info("meet_precondition is %s, please check envrionment",
-                         meet_precondition)
-                continue
-
-            self._run(scenarios, run_in_parallel, args.output_file)
-
-            if args.keep_deploy:
-                # keep deployment, forget about stack
-                # (hide it for exit handler)
-                Context.list = []
-            else:
-                for context in Context.list:
-                    context.undeploy()
-                Context.list = []
-            one_task_end_time = time.time()
-            LOG.info("task %s finished in %d secs", task_files[i],
-                     one_task_end_time - one_task_start_time)
-
-        total_end_time = time.time()
-        LOG.info("total finished in %d secs",
-                 total_end_time - total_start_time)
-
-        print "Done, exiting"
-
-    def _run(self, scenarios, run_in_parallel, output_file):
-        '''Deploys context and calls runners'''
-        for context in Context.list:
-            context.deploy()
-
-        background_runners = []
-
-        # Start all background scenarios
-        for scenario in ifilter(_is_background_scenario, scenarios):
-            scenario["runner"] = dict(type="Duration", duration=1000000000)
-            runner = run_one_scenario(scenario, output_file)
-            background_runners.append(runner)
-
-        runners = []
-        if run_in_parallel:
-            for scenario in scenarios:
-                if not _is_background_scenario(scenario):
-                    runner = run_one_scenario(scenario, output_file)
-                    runners.append(runner)
-
-            # Wait for runners to finish
-            for runner in runners:
-                runner_join(runner)
-                print "Runner ended, output in", output_file
-        else:
-            # run serially
-            for scenario in scenarios:
-                if not _is_background_scenario(scenario):
-                    runner = run_one_scenario(scenario, output_file)
-                    runner_join(runner)
-                    print "Runner ended, output in", output_file
-
-        # Abort background runners
-        for runner in background_runners:
-            runner.abort()
-
-        # Wait for background runners to finish
-        for runner in background_runners:
-            if runner.join(timeout=60) is None:
-                # Nuke if it did not stop nicely
-                base_runner.Runner.terminate(runner)
-                runner_join(runner)
-            else:
-                base_runner.Runner.release(runner)
-            print "Background task ended"
-
-
-# TODO: Move stuff below into TaskCommands class !?
-
-
-class TaskParser(object):
-    '''Parser for task config files in yaml format'''
-    def __init__(self, path):
-        self.path = path
-
-    def _meet_constraint(self, task, cur_pod, cur_installer):
-        if "constraint" in task:
-            constraint = task.get('constraint', None)
-            if constraint is not None:
-                tc_fit_pod = constraint.get('pod', None)
-                tc_fit_installer = constraint.get('installer', None)
-                LOG.info("cur_pod:%s, cur_installer:%s,tc_constraints:%s",
-                         cur_pod, cur_installer, constraint)
-                if cur_pod and tc_fit_pod and cur_pod not in tc_fit_pod:
-                    return False
-                if cur_installer and tc_fit_installer and \
-                        cur_installer not in tc_fit_installer:
-                    return False
-        return True
-
-    def _get_task_para(self, task, cur_pod):
-        task_args = task.get('task_args', None)
-        if task_args is not None:
-            task_args = task_args.get(cur_pod, None)
-        task_args_fnames = task.get('task_args_fnames', None)
-        if task_args_fnames is not None:
-            task_args_fnames = task_args_fnames.get(cur_pod, None)
-        return task_args, task_args_fnames
-
-    def parse_suite(self):
-        '''parse the suite file and return a list of task config file paths
-           and lists of optional parameters if present'''
-        LOG.info("\nParsing suite file:%s", self.path)
-
-        try:
-            with open(self.path) as stream:
-                cfg = yaml.load(stream)
-        except IOError as ioerror:
-            sys.exit(ioerror)
-
-        self._check_schema(cfg["schema"], "suite")
-        LOG.info("\nStarting scenario:%s", cfg["name"])
-
-        test_cases_dir = cfg.get("test_cases_dir", test_cases_dir_default)
-        if test_cases_dir[-1] != os.sep:
-            test_cases_dir += os.sep
-
-        cur_pod = os.environ.get('NODE_NAME', None)
-        cur_installer = os.environ.get('INSTALLER_TYPE', None)
-
-        valid_task_files = []
-        valid_task_args = []
-        valid_task_args_fnames = []
-
-        for task in cfg["test_cases"]:
-            # 1.check file_name
-            if "file_name" in task:
-                task_fname = task.get('file_name', None)
-                if task_fname is None:
-                    continue
-            else:
-                continue
-            # 2.check constraint
-            if self._meet_constraint(task, cur_pod, cur_installer):
-                valid_task_files.append(test_cases_dir + task_fname)
-            else:
-                continue
-            # 3.fetch task parameters
-            task_args, task_args_fnames = self._get_task_para(task, cur_pod)
-            valid_task_args.append(task_args)
-            valid_task_args_fnames.append(task_args_fnames)
-
-        return valid_task_files, valid_task_args, valid_task_args_fnames
-
-    def parse_task(self, task_id, task_args=None, task_args_file=None):
-        '''parses the task file and return an context and scenario instances'''
-        print "Parsing task config:", self.path
-
-        try:
-            kw = {}
-            if task_args_file:
-                with open(task_args_file) as f:
-                    kw.update(parse_task_args("task_args_file", f.read()))
-            kw.update(parse_task_args("task_args", task_args))
-        except TypeError:
-            raise TypeError()
-
-        try:
-            with open(self.path) as f:
-                try:
-                    input_task = f.read()
-                    rendered_task = TaskTemplate.render(input_task, **kw)
-                except Exception as e:
-                    print(("Failed to render template:\n%(task)s\n%(err)s\n")
-                          % {"task": input_task, "err": e})
-                    raise e
-                print(("Input task is:\n%s\n") % rendered_task)
-
-                cfg = yaml.load(rendered_task)
-        except IOError as ioerror:
-            sys.exit(ioerror)
-
-        self._check_schema(cfg["schema"], "task")
-        meet_precondition = self._check_precondition(cfg)
-
-        # TODO: support one or many contexts? Many would simpler and precise
-        # TODO: support hybrid context type
-        if "context" in cfg:
-            context_cfgs = [cfg["context"]]
-        elif "contexts" in cfg:
-            context_cfgs = cfg["contexts"]
-        else:
-            context_cfgs = [{"type": "Dummy"}]
-
-        for cfg_attrs in context_cfgs:
-            context_type = cfg_attrs.get("type", "Heat")
-            if "Heat" == context_type and "networks" in cfg_attrs:
-                # bugfix: if there are more than one network,
-                # only add "external_network" on first one.
-                # the name of netwrok should follow this rule:
-                # test, test2, test3 ...
-                # sort network with the length of network's name
-                sorted_networks = sorted(cfg_attrs["networks"].keys())
-                # config external_network based on env var
-                cfg_attrs["networks"][sorted_networks[0]]["external_network"] \
-                    = os.environ.get("EXTERNAL_NETWORK", "net04_ext")
-
-            context = Context.get(context_type)
-            context.init(cfg_attrs)
-
-        run_in_parallel = cfg.get("run_in_parallel", False)
-
-        # add tc and task id for influxdb extended tags
-        for scenario in cfg["scenarios"]:
-            task_name = os.path.splitext(os.path.basename(self.path))[0]
-            scenario["tc"] = task_name
-            scenario["task_id"] = task_id
-
-        # TODO we need something better here, a class that represent the file
-        return cfg["scenarios"], run_in_parallel, meet_precondition
-
-    def _check_schema(self, cfg_schema, schema_type):
-        '''Check if config file is using the correct schema type'''
-
-        if cfg_schema != "yardstick:" + schema_type + ":0.1":
-            sys.exit("error: file %s has unknown schema %s" % (self.path,
-                                                               cfg_schema))
-
-    def _check_precondition(self, cfg):
-        '''Check if the envrionment meet the preconditon'''
-
-        if "precondition" in cfg:
-            precondition = cfg["precondition"]
-            installer_type = precondition.get("installer_type", None)
-            deploy_scenarios = precondition.get("deploy_scenarios", None)
-            tc_fit_pods = precondition.get("pod_name", None)
-            installer_type_env = os.environ.get('INSTALL_TYPE', None)
-            deploy_scenario_env = os.environ.get('DEPLOY_SCENARIO', None)
-            pod_name_env = os.environ.get('NODE_NAME', None)
-
-            LOG.info("installer_type: %s, installer_type_env: %s",
-                     installer_type, installer_type_env)
-            LOG.info("deploy_scenarios: %s, deploy_scenario_env: %s",
-                     deploy_scenarios, deploy_scenario_env)
-            LOG.info("tc_fit_pods: %s, pod_name_env: %s",
-                     tc_fit_pods, pod_name_env)
-            if installer_type and installer_type_env:
-                if installer_type_env not in installer_type:
-                    return False
-            if deploy_scenarios and deploy_scenario_env:
-                deploy_scenarios_list = deploy_scenarios.split(',')
-                for deploy_scenario in deploy_scenarios_list:
-                    if deploy_scenario_env.startswith(deploy_scenario):
-                        return True
-                return False
-            if tc_fit_pods and pod_name_env:
-                if pod_name_env not in tc_fit_pods:
-                    return False
-        return True
-
-
-def atexit_handler():
-    '''handler for process termination'''
-    base_runner.Runner.terminate_all()
-
-    if len(Context.list) > 0:
-        print "Undeploying all contexts"
-        for context in Context.list:
-            context.undeploy()
-
-
-def is_ip_addr(addr):
-    '''check if string addr is an IP address'''
-    try:
-        ipaddress.ip_address(unicode(addr))
-        return True
-    except ValueError:
-        return False
-
-
-def _is_same_heat_context(host_attr, target_attr):
-    '''check if two servers are in the same heat context
-    host_attr: either a name for a server created by yardstick or a dict
-    with attribute name mapping when using external heat templates
-    target_attr: either a name for a server created by yardstick or a dict
-    with attribute name mapping when using external heat templates
-    '''
-    host = None
-    target = None
-    for context in Context.list:
-        if context.__context_type__ != "Heat":
-            continue
-
-        host = context._get_server(host_attr)
-        if host is None:
-            continue
-
-        target = context._get_server(target_attr)
-        if target is None:
-            return False
-
-        # Both host and target is not None, then they are in the
-        # same heat context.
-        return True
-
-    return False
-
-
-def _is_background_scenario(scenario):
-    if "run_in_background" in scenario:
-        return scenario["run_in_background"]
-    else:
-        return False
-
-
-def run_one_scenario(scenario_cfg, output_file):
-    '''run one scenario using context'''
-    runner_cfg = scenario_cfg["runner"]
-    runner_cfg['output_filename'] = output_file
-
-    # TODO support get multi hosts/vms info
-    context_cfg = {}
-    if "host" in scenario_cfg:
-        context_cfg['host'] = Context.get_server(scenario_cfg["host"])
-
-    if "target" in scenario_cfg:
-        if is_ip_addr(scenario_cfg["target"]):
-            context_cfg['target'] = {}
-            context_cfg['target']["ipaddr"] = scenario_cfg["target"]
-        else:
-            context_cfg['target'] = Context.get_server(scenario_cfg["target"])
-            if _is_same_heat_context(scenario_cfg["host"],
-                                     scenario_cfg["target"]):
-                context_cfg["target"]["ipaddr"] = \
-                    context_cfg["target"]["private_ip"]
-            else:
-                context_cfg["target"]["ipaddr"] = \
-                    context_cfg["target"]["ip"]
-
-    if "targets" in scenario_cfg:
-        ip_list = []
-        for target in scenario_cfg["targets"]:
-            if is_ip_addr(target):
-                ip_list.append(target)
-                context_cfg['target'] = {}
-            else:
-                context_cfg['target'] = Context.get_server(target)
-                if _is_same_heat_context(scenario_cfg["host"], target):
-                    ip_list.append(context_cfg["target"]["private_ip"])
-                else:
-                    ip_list.append(context_cfg["target"]["ip"])
-        context_cfg['target']['ipaddr'] = ','.join(ip_list)
-
-    if "nodes" in scenario_cfg:
-        context_cfg["nodes"] = parse_nodes_with_context(scenario_cfg)
-    runner = base_runner.Runner.get(runner_cfg)
-
-    print "Starting runner of type '%s'" % runner_cfg["type"]
-    runner.run(scenario_cfg, context_cfg)
-
-    return runner
-
-
-def parse_nodes_with_context(scenario_cfg):
-    '''paras the 'nodes' fields in scenario '''
-    nodes = scenario_cfg["nodes"]
-
-    nodes_cfg = {}
-    for nodename in nodes:
-        nodes_cfg[nodename] = Context.get_server(nodes[nodename])
-
-    return nodes_cfg
-
-
-def runner_join(runner):
-    '''join (wait for) a runner, exit process at runner failure'''
-    status = runner.join()
-    base_runner.Runner.release(runner)
-    if status != 0:
-        sys.exit("Runner failed")
-
-
-def print_invalid_header(source_name, args):
-    print(("Invalid %(source)s passed:\n\n %(args)s\n")
-          % {"source": source_name, "args": args})
-
-
-def parse_task_args(src_name, args):
-    try:
-        kw = args and yaml.safe_load(args)
-        kw = {} if kw is None else kw
-    except yaml.parser.ParserError as e:
-        print_invalid_header(src_name, args)
-        print(("%(source)s has to be YAML. Details:\n\n%(err)s\n")
-              % {"source": src_name, "err": e})
-        raise TypeError()
-
-    if not isinstance(kw, dict):
-        print_invalid_header(src_name, args)
-        print(("%(src)s had to be dict, actually %(src_type)s\n")
-              % {"src": src_name, "src_type": type(kw)})
-        raise TypeError()
-    return kw
-
-
-def check_environment():
-    auth_url = os.environ.get('OS_AUTH_URL', None)
-    if not auth_url:
-        try:
-            source_env(constants.OPENSTACK_RC_FILE)
-        except IOError as e:
-            if e.errno != errno.EEXIST:
-                raise
-            LOG.debug('OPENRC file not found')
+        param = change_osloobj_to_paras(args)
+        Task().start(param)
index cb76c7a..6ff7962 100644 (file)
@@ -8,14 +8,9 @@
 ##############################################################################
 
 """ Handler for yardstick command 'testcase' """
-import os
-import yaml
-import sys
-
-from yardstick.cmd import print_hbar
-from yardstick.common.task_template import TaskTemplate
+from yardstick.benchmark.core.testcase import Testcase
 from yardstick.common.utils import cliargs
-from yardstick.definitions import YARDSTICK_ROOT_PATH
+from yardstick.cmd.commands import change_osloobj_to_paras
 
 
 class TestcaseCommands(object):
@@ -23,92 +18,14 @@ class TestcaseCommands(object):
 
        Set of commands to discover and display test cases.
     '''
-    def __init__(self):
-        self.test_case_path = YARDSTICK_ROOT_PATH + 'tests/opnfv/test_cases/'
-        self.testcase_list = []
 
     def do_list(self, args):
         '''List existing test cases'''
-
-        try:
-            testcase_files = os.listdir(self.test_case_path)
-        except Exception as e:
-            print(("Failed to list dir:\n%(path)s\n%(err)s\n")
-                  % {"path": self.test_case_path, "err": e})
-            raise e
-        testcase_files.sort()
-
-        for testcase_file in testcase_files:
-            record = self._get_record(testcase_file)
-            self.testcase_list.append(record)
-
-        self._format_print(self.testcase_list)
-        return True
+        param = change_osloobj_to_paras(args)
+        Testcase().list_all(param)
 
     @cliargs("casename", type=str, help="test case name", nargs=1)
     def do_show(self, args):
         '''Show details of a specific test case'''
-        testcase_name = args.casename[0]
-        testcase_path = self.test_case_path + testcase_name + ".yaml"
-        try:
-            with open(testcase_path) as f:
-                try:
-                    testcase_info = f.read()
-                    print testcase_info
-
-                except Exception as e:
-                    print(("Failed to load test cases:"
-                           "\n%(testcase_file)s\n%(err)s\n")
-                          % {"testcase_file": testcase_path, "err": e})
-                    raise e
-        except IOError as ioerror:
-            sys.exit(ioerror)
-        return True
-
-    def _get_record(self, testcase_file):
-
-        try:
-            with open(self.test_case_path + testcase_file) as f:
-                try:
-                    testcase_info = f.read()
-                except Exception as e:
-                    print(("Failed to load test cases:"
-                           "\n%(testcase_file)s\n%(err)s\n")
-                          % {"testcase_file": testcase_file, "err": e})
-                    raise e
-                description, installer, deploy_scenarios = \
-                    self._parse_testcase(testcase_info)
-
-                record = {'Name': testcase_file.split(".")[0],
-                          'Description': description,
-                          'installer': installer,
-                          'deploy_scenarios': deploy_scenarios}
-                return record
-        except IOError as ioerror:
-            sys.exit(ioerror)
-
-    def _parse_testcase(self, testcase_info):
-
-        kw = {}
-        rendered_testcase = TaskTemplate.render(testcase_info, **kw)
-        testcase_cfg = yaml.load(rendered_testcase)
-        test_precondition = testcase_cfg.get('precondition', None)
-        installer_type = 'all'
-        deploy_scenarios = 'all'
-        if test_precondition is not None:
-            installer_type = test_precondition.get('installer_type', 'all')
-            deploy_scenarios = test_precondition.get('deploy_scenarios', 'all')
-
-        description = testcase_info.split("\n")[2][1:].strip()
-        return description, installer_type, deploy_scenarios
-
-    def _format_print(self, testcase_list):
-        '''format output'''
-
-        print_hbar(88)
-        print("| %-21s | %-60s" % ("Testcase Name", "Description"))
-        print_hbar(88)
-        for testcase_record in testcase_list:
-            print "| %-16s | %-60s" % (testcase_record['Name'],
-                                       testcase_record['Description'])
-        print_hbar(88)
+        param = change_osloobj_to_paras(args)
+        Testcase().show(param)