Merge "Clone the corresponding branch of the repo according to the job branch"
authorJing Lu <lvjing5@huawei.com>
Thu, 1 Dec 2016 01:11:09 +0000 (01:11 +0000)
committerGerrit Code Review <gerrit@opnfv.org>
Thu, 1 Dec 2016 01:11:09 +0000 (01:11 +0000)
42 files changed:
api/actions/test.py
api/urls.py
api/utils/common.py
api/views.py
tests/unit/api/actions/test_env.py [deleted file]
tests/unit/api/actions/test_result.py [deleted file]
tests/unit/api/actions/test_test.py [deleted file]
tests/unit/api/test_views.py [deleted file]
tests/unit/api/utils/test_common.py
tests/unit/api/utils/test_daemonthread.py [deleted file]
tests/unit/test_ssh.py
yardstick/benchmark/contexts/node.py
yardstick/benchmark/runners/arithmetic.py
yardstick/benchmark/runners/base.py
yardstick/benchmark/runners/duration.py
yardstick/benchmark/runners/iteration.py
yardstick/benchmark/runners/sequence.py
yardstick/benchmark/scenarios/availability/actionrollbackers.py
yardstick/benchmark/scenarios/availability/attacker/attacker_baremetal.py
yardstick/benchmark/scenarios/availability/attacker/attacker_general.py
yardstick/benchmark/scenarios/availability/attacker/attacker_process.py
yardstick/benchmark/scenarios/availability/attacker/baseattacker.py
yardstick/benchmark/scenarios/availability/director.py
yardstick/benchmark/scenarios/availability/monitor/basemonitor.py
yardstick/benchmark/scenarios/availability/monitor/monitor_command.py
yardstick/benchmark/scenarios/availability/monitor/monitor_general.py
yardstick/benchmark/scenarios/availability/monitor/monitor_process.py
yardstick/benchmark/scenarios/availability/operation/baseoperation.py
yardstick/benchmark/scenarios/availability/operation/operation_general.py
yardstick/benchmark/scenarios/availability/result_checker/baseresultchecker.py
yardstick/benchmark/scenarios/availability/result_checker/result_checker_general.py
yardstick/benchmark/scenarios/availability/scenario_general.py
yardstick/benchmark/scenarios/availability/serviceha.py
yardstick/benchmark/scenarios/compute/cachestat.py
yardstick/benchmark/scenarios/compute/cpuload.py
yardstick/benchmark/scenarios/compute/cyclictest.py
yardstick/benchmark/scenarios/compute/memload.py
yardstick/benchmark/scenarios/networking/netutilization.py
yardstick/benchmark/scenarios/storage/storperf.py
yardstick/dispatcher/http.py
yardstick/dispatcher/influxdb.py
yardstick/ssh.py

index b1dc212..fda0ffd 100644 (file)
@@ -7,7 +7,6 @@
 # http://www.apache.org/licenses/LICENSE-2.0
 ##############################################################################
 import uuid
-import json
 import os
 import logging
 
@@ -22,12 +21,7 @@ def runTestCase(args):
         opts = args.get('opts', {})
         testcase = args['testcase']
     except KeyError:
-        logger.error('Lack of testcase argument')
-        result = {
-            'status': 'error',
-            'message': 'need testcase name'
-        }
-        return json.dumps(result)
+        return common_utils.error_handler('Lack of testcase argument')
 
     testcase = os.path.join(conf.TEST_CASE_PATH,
                             conf.TEST_CASE_PRE + testcase + '.yaml')
@@ -41,8 +35,4 @@ def runTestCase(args):
     logger.debug('Start to execute command list')
     common_utils.exec_command_task(command_list, task_id)
 
-    result = {
-        'status': 'success',
-        'task_id': task_id
-    }
-    return json.dumps(result)
+    return common_utils.result_handler('success', task_id)
index eaaf8b6..323e5cb 100644 (file)
@@ -11,7 +11,7 @@ from api.utils.common import Url
 
 
 urlpatterns = [
-    Url('/yardstick/test/action', views.Test, 'test'),
-    Url('/yardstick/result/action', views.Result, 'result'),
+    Url('/yardstick/testcases/release/action', views.Release, 'release'),
+    Url('/yardstick/results', views.Results, 'results'),
     Url('/yardstick/env/action', views.Env, 'env')
 ]
index 09cfc04..e3e64a7 100644 (file)
@@ -8,7 +8,8 @@
 ##############################################################################
 import collections
 import logging
-import json
+
+from flask import jsonify
 
 from api.utils.daemonthread import DaemonThread
 from yardstick.cmd.cli import YardstickCLI
@@ -50,7 +51,7 @@ def error_handler(message):
         'status': 'error',
         'message': message
     }
-    return json.dumps(result)
+    return jsonify(result)
 
 
 def result_handler(status, data):
@@ -58,7 +59,7 @@ def result_handler(status, data):
         'status': status,
         'result': data
     }
-    return json.dumps(result)
+    return jsonify(result)
 
 
 class Url(object):
index 7357625..065de67 100644 (file)
@@ -19,7 +19,7 @@ from api.actions import env as env_action
 logger = logging.getLogger(__name__)
 
 
-class Test(Resource):
+class Release(Resource):
     def post(self):
         action = common_utils.translate_to_str(request.json.get('action', ''))
         args = common_utils.translate_to_str(request.json.get('args', {}))
@@ -31,7 +31,7 @@ class Test(Resource):
             return common_utils.error_handler('Wrong action')
 
 
-class Result(Resource):
+class Results(Resource):
     def get(self):
         args = common_utils.translate_to_str(request.args)
         action = args.get('action', '')
diff --git a/tests/unit/api/actions/test_env.py b/tests/unit/api/actions/test_env.py
deleted file mode 100644 (file)
index e674d73..0000000
+++ /dev/null
@@ -1,50 +0,0 @@
-##############################################################################
-# Copyright (c) 2016 Huawei Technologies Co.,Ltd and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
-import unittest
-import mock
-
-from api.actions import env
-
-
-class CreateInfluxDBContainerTestCase(unittest.TestCase):
-
-    @mock.patch('api.actions.env._create_influxdb_container')
-    def test_create_influxdb_container(self, mock_create_container):
-        env.createInfluxDBContainer({})
-        mock_create_container.assert_called_with()
-
-
-class CreateInfluxdbContainerTestCase(unittest.TestCase):
-
-    @mock.patch('api.actions.env.Client')
-    def test_create_influxdb_container(self, mock_influx_client):
-        env._create_influxdb_container()
-        self.assertFalse(mock_influx_client()._create_container.called)
-
-
-class ConfigInfluxdbTestCase(unittest.TestCase):
-
-    @mock.patch('api.actions.env.influx.get_data_db_client')
-    def test_config_influxdb(self, mock_influx_client):
-        env._config_influxdb()
-        mock_influx_client.assert_called_with()
-
-
-class ConfigOutputFile(unittest.TestCase):
-
-    def test_config_output_file(self):
-        pass
-
-
-def main():
-    unittest.main()
-
-
-if __name__ == '__main__':
-    main()
diff --git a/tests/unit/api/actions/test_result.py b/tests/unit/api/actions/test_result.py
deleted file mode 100644 (file)
index 1686319..0000000
+++ /dev/null
@@ -1,29 +0,0 @@
-##############################################################################
-# Copyright (c) 2016 Huawei Technologies Co.,Ltd and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
-import unittest
-import json
-
-from api.actions import result
-
-
-class GetResultTestCase(unittest.TestCase):
-
-    def test_getResult_with_no_taskid_arg(self):
-        args = {}
-        output = json.loads(result.getResult(args))
-
-        self.assertEqual('error', output['status'])
-
-
-def main():
-    unittest.main()
-
-
-if __name__ == '__main__':
-    main()
diff --git a/tests/unit/api/actions/test_test.py b/tests/unit/api/actions/test_test.py
deleted file mode 100644 (file)
index 7ebe9fc..0000000
+++ /dev/null
@@ -1,29 +0,0 @@
-##############################################################################
-# Copyright (c) 2016 Huawei Technologies Co.,Ltd and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
-import unittest
-import json
-
-from api.actions import test
-
-
-class RunTestCase(unittest.TestCase):
-
-    def test_runTestCase_with_no_testcase_arg(self):
-        args = {}
-        output = json.loads(test.runTestCase(args))
-
-        self.assertEqual('error', output['status'])
-
-
-def main():
-    unittest.main()
-
-
-if __name__ == '__main__':
-    main()
diff --git a/tests/unit/api/test_views.py b/tests/unit/api/test_views.py
deleted file mode 100644 (file)
index b835567..0000000
+++ /dev/null
@@ -1,57 +0,0 @@
-##############################################################################
-# Copyright (c) 2016 Huawei Technologies Co.,Ltd and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
-import unittest
-import mock
-import json
-
-from api.views import Test
-from api.views import Result
-from api.views import Env
-
-
-class TestTestCase(unittest.TestCase):
-
-    @mock.patch('api.views.request')
-    def test_post(self, mock_request):
-        mock_request.json.get.side_effect = ['hello', {}]
-
-        result = json.loads(Test().post())
-
-        self.assertEqual('error', result['status'])
-
-
-class ResultTestCase(unittest.TestCase):
-
-    @mock.patch('api.views.request')
-    def test_get(self, mock_request):
-        mock_request.args.get.return_value = 'hello'
-
-        print Result().get()
-        result = json.loads(Result().get())
-
-        self.assertEqual('error', result['status'])
-
-
-class EnvTestCase(unittest.TestCase):
-
-    @mock.patch('api.views.request')
-    def test_post(self, mock_request):
-        mock_request.json.get.side_effect = ['hello', {}]
-
-        result = json.loads(Env().post())
-
-        self.assertEqual('error', result['status'])
-
-
-def main():
-    unittest.main()
-
-
-if __name__ == '__main__':
-    main()
index 9e050c7..5d17740 100644 (file)
@@ -7,7 +7,6 @@
 # http://www.apache.org/licenses/LICENSE-2.0
 ##############################################################################
 import unittest
-import json
 
 from api.utils import common
 
@@ -58,35 +57,6 @@ class GetCommandListTestCase(unittest.TestCase):
         self.assertEqual(result_list, output_list)
 
 
-class ErrorHandlerTestCase(unittest.TestCase):
-
-    def test_error_handler(self):
-        message = 'hello world'
-        output_dict = json.loads(common.error_handler(message))
-
-        result = {
-            'status': 'error',
-            'message': message
-        }
-
-        self.assertEqual(result, output_dict)
-
-
-class ResultHandlerTestCase(unittest.TestCase):
-
-    def test_result_handler(self):
-        status = 1
-        data = ['hello world']
-        output_dict = json.loads(common.result_handler(status, data))
-
-        result = {
-            'status': status,
-            'result': data
-        }
-
-        self.assertEqual(result, output_dict)
-
-
 def main():
     unittest.main()
 
diff --git a/tests/unit/api/utils/test_daemonthread.py b/tests/unit/api/utils/test_daemonthread.py
deleted file mode 100644 (file)
index f07f0fe..0000000
+++ /dev/null
@@ -1,37 +0,0 @@
-##############################################################################
-# Copyright (c) 2016 Huawei Technologies Co.,Ltd and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
-import unittest
-import mock
-
-from api.utils.daemonthread import DaemonThread
-
-
-class DaemonThreadTestCase(unittest.TestCase):
-
-    @mock.patch('api.utils.daemonthread.os')
-    def test_run(self, mock_os):
-        def func(common_list, task_id):
-            return task_id
-
-        common_list = []
-        task_id = '1234'
-        thread = DaemonThread(func, (common_list, task_id))
-        thread.run()
-
-        mock_os.path.exist.return_value = True
-        pre_path = '../tests/opnfv/test_suites/'
-        mock_os.remove.assert_called_with(pre_path + '1234.yaml')
-
-
-def main():
-    unittest.main()
-
-
-if __name__ == '__main__':
-    main()
index a270524..1e021a0 100644 (file)
@@ -18,6 +18,8 @@
 
 import os
 import unittest
+from cStringIO import StringIO
+
 import mock
 
 from yardstick import ssh
@@ -274,6 +276,23 @@ class SSHRunTestCase(unittest.TestCase):
         send_calls = [call("line1"), call("line2"), call("e2")]
         self.assertEqual(send_calls, self.fake_session.send.mock_calls)
 
+    @mock.patch("yardstick.ssh.select")
+    def test_run_stdin_keep_open(self, mock_select):
+        """Test run method with stdin.
+
+        Third send call was called with "e2" because only 3 bytes was sent
+        by second call. So remainig 2 bytes of "line2" was sent by third call.
+        """
+        mock_select.select.return_value = ([], [], [])
+        self.fake_session.exit_status_ready.side_effect = [0, 0, 0, True]
+        self.fake_session.send_ready.return_value = True
+        self.fake_session.send.side_effect = len
+        fake_stdin = StringIO("line1\nline2\n")
+        self.test_client.run("cmd", stdin=fake_stdin, keep_stdin_open=True)
+        call = mock.call
+        send_calls = [call("line1\nline2\n")]
+        self.assertEqual(send_calls, self.fake_session.send.mock_calls)
+
     @mock.patch("yardstick.ssh.select")
     def test_run_select_error(self, mock_select):
         self.fake_session.exit_status_ready.return_value = False
index 67db442..78bce82 100644 (file)
@@ -83,7 +83,7 @@ class NodeContext(Context):
             return None
         elif len(nodes) > 1:
             LOG.error("Duplicate nodes!!!")
-            LOG.error("Nodes: %r" % nodes)
+            LOG.error("Nodes: %r", nodes)
             sys.exit(-1)
 
         # A clone is created in order to avoid affecting the
index 74a236f..69ea915 100755 (executable)
@@ -93,7 +93,7 @@ def _worker_process(queue, cls, method_name, scenario_cfg,
         if aborted.is_set():
             break
 
-        LOG.debug("runner=%(runner)s seq=%(sequence)s START" %
+        LOG.debug("runner=%(runner)s seq=%(sequence)s START",
                   {"runner": runner_cfg["runner_id"], "sequence": sequence})
 
         for i, value in enumerate(comb_values):
@@ -109,7 +109,7 @@ def _worker_process(queue, cls, method_name, scenario_cfg,
             if sla_action == "assert":
                 raise
             elif sla_action == "monitor":
-                LOG.warning("SLA validation failed: %s" % assertion.args)
+                LOG.warning("SLA validation failed: %s", assertion.args)
                 errors = assertion.args
         except Exception as e:
             errors = traceback.format_exc()
@@ -129,7 +129,7 @@ def _worker_process(queue, cls, method_name, scenario_cfg,
 
         queue.put(record)
 
-        LOG.debug("runner=%(runner)s seq=%(sequence)s END" %
+        LOG.debug("runner=%(runner)s seq=%(sequence)s END",
                   {"runner": runner_cfg["runner_id"], "sequence": sequence})
 
         sequence += 1
index 2374992..8f3f75f 100755 (executable)
@@ -63,7 +63,7 @@ def _execute_shell_command(command):
     except Exception:
         exitcode = -1
         output = traceback.format_exc()
-        log.error("exec command '%s' error:\n " % command)
+        log.error("exec command '%s' error:\n ", command)
         log.error(traceback.format_exc())
 
     return exitcode, output
@@ -76,10 +76,10 @@ def _single_action(seconds, command, queue):
     log.debug("single action: executing command: '%s'", command)
     ret_code, data = _execute_shell_command(command)
     if ret_code < 0:
-        log.error("single action error! command:%s" % command)
+        log.error("single action error! command:%s", command)
         queue.put({'single-action-data': data})
         return
-    log.debug("single action data: \n%s" % data)
+    log.debug("single action data: \n%s", data)
     queue.put({'single-action-data': data})
 
 
@@ -96,7 +96,7 @@ def _periodic_action(interval, command, queue):
             log.error("periodic action error! command:%s", command)
             queue.put({'periodic-action-data': data})
             break
-        log.debug("periodic action data: \n%s" % data)
+        log.debug("periodic action data: \n%s", data)
         queue.put({'periodic-action-data': data})
 
 
@@ -127,7 +127,7 @@ class Runner(object):
         """
         # if there is no runner, start the output serializer subprocess
         if len(Runner.runners) == 0:
-            log.debug("Starting dump process file '%s'" %
+            log.debug("Starting dump process file '%s'",
                       config["output_filename"])
             Runner.queue = multiprocessing.Queue()
             Runner.dump_process = multiprocessing.Process(
@@ -196,13 +196,13 @@ class Runner(object):
         '''run a potentially configured post-stop action'''
         if "post-stop-action" in self.config:
             command = self.config["post-stop-action"]["command"]
-            log.debug("post stop action: command: '%s'" % command)
+            log.debug("post stop action: command: '%s'", command)
             ret_code, data = _execute_shell_command(command)
             if ret_code < 0:
                 log.error("post action error! command:%s", command)
                 self.result_queue.put({'post-stop-action-data': data})
                 return
-            log.debug("post-stop data: \n%s" % data)
+            log.debug("post-stop data: \n%s", data)
             self.result_queue.put({'post-stop-action-data': data})
 
     def run(self, scenario_cfg, context_cfg):
@@ -219,13 +219,13 @@ class Runner(object):
         # run a potentially configured pre-start action
         if "pre-start-action" in self.config:
             command = self.config["pre-start-action"]["command"]
-            log.debug("pre start action: command: '%s'" % command)
+            log.debug("pre start action: command: '%s'", command)
             ret_code, data = _execute_shell_command(command)
             if ret_code < 0:
                 log.error("pre-start action error! command:%s", command)
                 self.result_queue.put({'pre-start-action-data': data})
                 return
-            log.debug("pre-start data: \n%s" % data)
+            log.debug("pre-start data: \n%s", data)
             self.result_queue.put({'pre-start-action-data': data})
 
         if "single-shot-action" in self.config:
index 1f51f51..1412c0c 100644 (file)
@@ -58,7 +58,7 @@ def _worker_process(queue, cls, method_name, scenario_cfg,
     start = time.time()
     while True:
 
-        LOG.debug("runner=%(runner)s seq=%(sequence)s START" %
+        LOG.debug("runner=%(runner)s seq=%(sequence)s START",
                   {"runner": runner_cfg["runner_id"], "sequence": sequence})
 
         data = {}
@@ -71,7 +71,7 @@ def _worker_process(queue, cls, method_name, scenario_cfg,
             if sla_action == "assert":
                 raise
             elif sla_action == "monitor":
-                LOG.warning("SLA validation failed: %s" % assertion.args)
+                LOG.warning("SLA validation failed: %s", assertion.args)
                 errors = assertion.args
         except Exception as e:
             errors = traceback.format_exc()
@@ -91,7 +91,7 @@ def _worker_process(queue, cls, method_name, scenario_cfg,
 
         queue.put(record)
 
-        LOG.debug("runner=%(runner)s seq=%(sequence)s END" %
+        LOG.debug("runner=%(runner)s seq=%(sequence)s END",
                   {"runner": runner_cfg["runner_id"], "sequence": sequence})
 
         sequence += 1
index b23b32b..3a839b6 100644 (file)
@@ -60,7 +60,7 @@ def _worker_process(queue, cls, method_name, scenario_cfg,
     if "run" in run_step:
         while True:
 
-            LOG.debug("runner=%(runner)s seq=%(sequence)s START" %
+            LOG.debug("runner=%(runner)s seq=%(sequence)s START",
                       {"runner": runner_cfg["runner_id"],
                        "sequence": sequence})
 
@@ -74,7 +74,7 @@ def _worker_process(queue, cls, method_name, scenario_cfg,
                 if sla_action == "assert":
                     raise
                 elif sla_action == "monitor":
-                    LOG.warning("SLA validation failed: %s" % assertion.args)
+                    LOG.warning("SLA validation failed: %s", assertion.args)
                     errors = assertion.args
             except Exception as e:
                 errors = traceback.format_exc()
@@ -94,7 +94,7 @@ def _worker_process(queue, cls, method_name, scenario_cfg,
 
             queue.put(record)
 
-            LOG.debug("runner=%(runner)s seq=%(sequence)s END" %
+            LOG.debug("runner=%(runner)s seq=%(sequence)s END",
                       {"runner": runner_cfg["runner_id"],
                        "sequence": sequence})
 
index fe53412..3b06e2a 100644 (file)
@@ -67,7 +67,7 @@ def _worker_process(queue, cls, method_name, scenario_cfg,
     for value in sequence_values:
         options[arg_name] = value
 
-        LOG.debug("runner=%(runner)s seq=%(sequence)s START" %
+        LOG.debug("runner=%(runner)s seq=%(sequence)s START",
                   {"runner": runner_cfg["runner_id"], "sequence": sequence})
 
         data = {}
@@ -80,7 +80,7 @@ def _worker_process(queue, cls, method_name, scenario_cfg,
             if sla_action == "assert":
                 raise
             elif sla_action == "monitor":
-                LOG.warning("SLA validation failed: %s" % assertion.args)
+                LOG.warning("SLA validation failed: %s", assertion.args)
                 errors = assertion.args
         except Exception as e:
             errors = traceback.format_exc()
@@ -100,7 +100,7 @@ def _worker_process(queue, cls, method_name, scenario_cfg,
 
         queue.put(record)
 
-        LOG.debug("runner=%(runner)s seq=%(sequence)s END" %
+        LOG.debug("runner=%(runner)s seq=%(sequence)s END",
                   {"runner": runner_cfg["runner_id"], "sequence": sequence})
 
         sequence += 1
index 4b732a1..38f57d4 100644 (file)
@@ -28,8 +28,8 @@ class AttackerRollbacker(ActionRollbacker):
 
     def rollback(self):
         LOG.debug(
-            "\033[93m recovering attacker %s \033[0m"
-            % (self.underlyingAttacker.key))
+            "\033[93m recovering attacker %s \033[0m",
+            self.underlyingAttacker.key)
         self.underlyingAttacker.recover()
 
 
@@ -40,6 +40,6 @@ class OperationRollbacker(ActionRollbacker):
 
     def rollback(self):
         LOG.debug(
-            "\033[93m rollback operation %s \033[0m"
-            % (self.underlyingOperation.key))
+            "\033[93m rollback operation %s \033[0m",
+            self.underlyingOperation.key)
         self.underlyingOperation.rollback()
index 6561f6b..3b1f8ef 100644 (file)
@@ -24,7 +24,7 @@ def _execute_shell_command(command, stdin=None):
     except Exception:
         exitcode = -1
         output = traceback.format_exc()
-        LOG.error("exec command '%s' error:\n " % command)
+        LOG.error("exec command '%s' error:\n ", command)
         LOG.error(traceback.format_exc())
 
     return exitcode, output
@@ -34,7 +34,7 @@ class BaremetalAttacker(BaseAttacker):
     __attacker_type__ = 'bare-metal-down'
 
     def setup(self):
-        LOG.debug("config:%s context:%s" % (self._config, self._context))
+        LOG.debug("config:%s context:%s", self._config, self._context)
         host = self._context.get(self._config['host'], None)
         ip = host.get("ip", None)
         user = host.get("user", "root")
@@ -65,10 +65,10 @@ class BaremetalAttacker(BaseAttacker):
             "/bin/sh -s {0} -W 10".format(self.host_ip),
             stdin=open(self.check_script, "r"))
 
-        LOG.debug("check ret: %s out:%s err:%s" %
-                  (exit_status, stdout, stderr))
+        LOG.debug("check ret: %s out:%s err:%s",
+                  exit_status, stdout, stderr)
         if not stdout or "running" not in stdout:
-            LOG.info("the host (ipmi_ip:%s) is not running!" % self.ipmi_ip)
+            LOG.info("the host (ipmi_ip:%s) is not running!", self.ipmi_ip)
             return False
 
         return True
@@ -76,8 +76,8 @@ class BaremetalAttacker(BaseAttacker):
     def inject_fault(self):
         exit_status, stdout, stderr = self.connection.execute(
             "shutdown -h now")
-        LOG.debug("inject fault ret: %s out:%s err:%s" %
-                  (exit_status, stdout, stderr))
+        LOG.debug("inject fault ret: %s out:%s err:%s",
+                  exit_status, stdout, stderr)
         if not exit_status:
             LOG.info("inject fault success")
 
@@ -91,7 +91,7 @@ class BaremetalAttacker(BaseAttacker):
             ssh_port = host.get("ssh_port", ssh.DEFAULT_PORT)
             pwd = host.get("pwd", None)
 
-            LOG.debug("jump_host ip:%s user:%s" % (ip, user))
+            LOG.debug("jump_host ip:%s user:%s", ip, user)
             self.jump_connection = ssh.SSH(user, ip, password=pwd,
                                            port=ssh_port)
             self.jump_connection.wait(timeout=600)
index 5e7716e..a452c37 100644 (file)
@@ -20,7 +20,7 @@ class GeneralAttacker(BaseAttacker):
     __attacker_type__ = 'general-attacker'
 
     def setup(self):
-        LOG.debug("config:%s context:%s" % (self._config, self._context))
+        LOG.debug("config:%s context:%s", self._config, self._context)
         host = self._context.get(self._config['host'], None)
         ip = host.get("ip", None)
         user = host.get("user", "root")
@@ -79,8 +79,8 @@ class GeneralAttacker(BaseAttacker):
                       .format(stdout))
         else:
             LOG.error(
-                "the inject_fault's error, stdout:%s, stderr:%s" %
-                (stdout, stderr))
+                "the inject_fault's error, stdout:%s, stderr:%s",
+                stdout, stderr)
 
     def recover(self):
         if "rollback_parameter" in self._config:
index 0a844f5..2ccc231 100644 (file)
@@ -19,7 +19,7 @@ class ProcessAttacker(BaseAttacker):
     __attacker_type__ = 'kill-process'
 
     def setup(self):
-        LOG.debug("config:%s context:%s" % (self._config, self._context))
+        LOG.debug("config:%s context:%s", self._config, self._context)
         host = self._context.get(self._config['host'], None)
         ip = host.get("ip", None)
         user = host.get("user", "root")
@@ -54,8 +54,8 @@ class ProcessAttacker(BaseAttacker):
             return True
         else:
             LOG.error(
-                "the host envrioment is error, stdout:%s, stderr:%s" %
-                (stdout, stderr))
+                "the host envrioment is error, stdout:%s, stderr:%s",
+                stdout, stderr)
         return False
 
     def inject_fault(self):
index 78276ef..f96e577 100644 (file)
@@ -26,7 +26,7 @@ class AttackerMgr(object):
         self._attacker_list = []
 
     def init_attackers(self, attacker_cfgs, context):
-        LOG.debug("attackerMgr confg: %s" % attacker_cfgs)
+        LOG.debug("attackerMgr confg: %s", attacker_cfgs)
 
         for cfg in attacker_cfgs:
             attacker_cls = BaseAttacker.get_attacker_cls(cfg)
index 267933d..104c683 100644 (file)
@@ -63,7 +63,7 @@ class Director(object):
 
     def createActionPlayer(self, type, key):
         LOG.debug(
-            "the type of current action is %s, the key is %s" % (type, key))
+            "the type of current action is %s, the key is %s", type, key)
         if type == ActionType.ATTACKER:
             return actionplayers.AttackerPlayer(self.attackerMgr[key])
         if type == ActionType.MONITOR:
@@ -77,13 +77,13 @@ class Director(object):
 
     def createActionRollbacker(self, type, key):
         LOG.debug(
-            "the type of current action is %s, the key is %s" % (type, key))
+            "the type of current action is %s, the key is %s", type, key)
         if type == ActionType.ATTACKER:
             return actionrollbackers.AttackerRollbacker(self.attackerMgr[key])
         if type == ActionType.OPERATION:
             return actionrollbackers.OperationRollbacker(
                 self.operationMgr[key])
-        LOG.debug("no rollbacker created for %s" % (key))
+        LOG.debug("no rollbacker created for %s", key)
 
     def verify(self):
         result = True
index d26c99c..38d1c4e 100644 (file)
@@ -27,7 +27,7 @@ class MonitorMgr(object):
         self._monitor_list = []
 
     def init_monitors(self, monitor_cfgs, context):
-        LOG.debug("monitorMgr config: %s" % monitor_cfgs)
+        LOG.debug("monitorMgr config: %s", monitor_cfgs)
 
         for monitor_cfg in monitor_cfgs:
             monitor_type = monitor_cfg["monitor_type"]
@@ -87,7 +87,7 @@ class BaseMonitor(multiprocessing.Process):
         return os.path.join(base_path, path)
 
     def run(self):
-        LOG.debug("config:%s context:%s" % (self._config, self._context))
+        LOG.debug("config:%s context:%s", self._config, self._context)
 
         self.setup()
         monitor_time = self._config.get("monitor_time", 0)
@@ -140,7 +140,7 @@ class BaseMonitor(multiprocessing.Process):
     def wait_monitor(self):
         self.join()
         self._result = self._queue.get()
-        LOG.debug("the monitor result:%s" % self._result)
+        LOG.debug("the monitor result:%s", self._result)
 
     def setup(self):  # pragma: no cover
         pass
index b55cc31..366d16e 100644 (file)
@@ -24,7 +24,7 @@ def _execute_shell_command(command):
     except Exception:
         exitcode = -1
         output = traceback.format_exc()
-        LOG.error("exec command '%s' error:\n " % command)
+        LOG.error("exec command '%s' error:\n ", command)
         LOG.error(traceback.format_exc())
 
     return exitcode, output
@@ -62,8 +62,8 @@ class MonitorOpenstackCmd(basemonitor.BaseMonitor):
                 "/bin/bash -s '{0}'".format(self.cmd),
                 stdin=open(self.check_script, "r"))
 
-            LOG.debug("the ret stats: %s stdout: %s stderr: %s" %
-                      (exit_status, stdout, stderr))
+            LOG.debug("the ret stats: %s stdout: %s stderr: %s",
+                      exit_status, stdout, stderr)
         else:
             exit_status, stdout = _execute_shell_command(self.cmd)
         if exit_status:
@@ -72,10 +72,10 @@ class MonitorOpenstackCmd(basemonitor.BaseMonitor):
 
     def verify_SLA(self):
         outage_time = self._result.get('outage_time', None)
-        LOG.debug("the _result:%s" % self._result)
+        LOG.debug("the _result:%s", self._result)
         max_outage_time = self._config["sla"]["max_outage_time"]
         if outage_time > max_outage_time:
-            LOG.info("SLA failure: %f > %f" % (outage_time, max_outage_time))
+            LOG.info("SLA failure: %f > %f", outage_time, max_outage_time)
             return False
         else:
             LOG.info("the sla is passed")
index f9ddb25..359cde6 100644 (file)
@@ -61,14 +61,14 @@ class GeneralMonitor(basemonitor.BaseMonitor):
         return True
 
     def verify_SLA(self):
-        LOG.debug("the _result:%s" % self._result)
+        LOG.debug("the _result:%s", self._result)
         outage_time = self._result.get('outage_time', None)
         max_outage_time = self._config["sla"]["max_outage_time"]
         if outage_time is None:
             LOG.error("There is no outage_time in monitor result.")
             return False
         if outage_time > max_outage_time:
-            LOG.error("SLA failure: %f > %f" % (outage_time, max_outage_time))
+            LOG.error("SLA failure: %f > %f", outage_time, max_outage_time)
             return False
         else:
             return True
index 403ec4d..a88b8d4 100644 (file)
@@ -39,17 +39,17 @@ class MonitorProcess(basemonitor.BaseMonitor):
             "/bin/sh -s {0}".format(self.process_name),
             stdin=open(self.check_script, "r"))
         if not stdout or int(stdout) <= 0:
-            LOG.info("the process (%s) is not running!" % self.process_name)
+            LOG.info("the process (%s) is not running!", self.process_name)
             return False
 
         return True
 
     def verify_SLA(self):
-        LOG.debug("the _result:%s" % self._result)
+        LOG.debug("the _result:%s", self._result)
         outage_time = self._result.get('outage_time', None)
         max_outage_time = self._config["sla"]["max_recover_time"]
         if outage_time > max_outage_time:
-            LOG.error("SLA failure: %f > %f" % (outage_time, max_outage_time))
+            LOG.error("SLA failure: %f > %f", outage_time, max_outage_time)
             return False
         else:
             return True
index e776e87..80efd1b 100644 (file)
@@ -26,7 +26,7 @@ class OperationMgr(object):
         self._operation_list = []
 
     def init_operations(self, operation_cfgs, context):
-        LOG.debug("operationMgr confg: %s" % operation_cfgs)
+        LOG.debug("operationMgr confg: %s", operation_cfgs)
         for cfg in operation_cfgs:
             operation_type = cfg['operation_type']
             operation_cls = BaseOperation.get_operation_cls(operation_type)
index aa28472..b3a20c3 100644 (file)
@@ -19,7 +19,7 @@ class GeneralOperaion(BaseOperation):
     __operation__type__ = "general-operation"
 
     def setup(self):
-        LOG.debug("config:%s context:%s" % (self._config, self._context))
+        LOG.debug("config:%s context:%s", self._config, self._context)
         host = self._context.get(self._config['host'], None)
         ip = host.get("ip", None)
         user = host.get("user", "root")
@@ -67,8 +67,8 @@ class GeneralOperaion(BaseOperation):
             LOG.debug("success,the operation's output is: {0}".format(stdout))
         else:
             LOG.error(
-                "the operation's error, stdout:%s, stderr:%s" %
-                (stdout, stderr))
+                "the operation's error, stdout:%s, stderr:%s",
+                stdout, stderr)
 
     def rollback(self):
         if "rollback_parameter" in self._config:
index 1bdb9f2..a24f26e 100644 (file)
@@ -26,7 +26,7 @@ class ResultCheckerMgr(object):
         self._result_checker_list = []
 
     def init_ResultChecker(self, resultchecker_cfgs, context):
-        LOG.debug("resultcheckerMgr confg: %s" % resultchecker_cfgs)
+        LOG.debug("resultcheckerMgr confg: %s", resultchecker_cfgs)
 
         for cfg in resultchecker_cfgs:
             resultchecker_type = cfg['checker_type']
index ae896c2..8c9d160 100644 (file)
@@ -20,7 +20,7 @@ class GeneralResultChecker(BaseResultChecker):
     __result_checker__type__ = "general-result-checker"
 
     def setup(self):
-        LOG.debug("config:%s context:%s" % (self._config, self._context))
+        LOG.debug("config:%s context:%s", self._config, self._context)
         host = self._context.get(self._config['host'], None)
         ip = host.get("ip", None)
         user = host.get("user", "root")
@@ -67,7 +67,7 @@ class GeneralResultChecker(BaseResultChecker):
             LOG.debug("action script of the operation is: {0}"
                       .format(self.verify_script))
 
-        LOG.debug("exit_status ,stdout : {0} ,{1}".format(exit_status, stdout))
+        LOG.debug("exit_status ,stdout : %s ,%s", exit_status, stdout)
         if exit_status == 0 and stdout:
             self.actualResult = stdout
             LOG.debug("verifying resultchecker: {0}".format(self.key))
@@ -104,6 +104,6 @@ class GeneralResultChecker(BaseResultChecker):
             LOG.error(stderr)
 
         LOG.debug(
-            "verifying resultchecker: {0},the result is : {1}"
-            .format(self.key, self.success))
+            "verifying resultchecker: %s,the result is : %s", self.key,
+            self.success)
         return self.success
index 0a128aa..b064c67 100644 (file)
@@ -22,7 +22,7 @@ class ScenarioGeneral(base.Scenario):
 
     def __init__(self, scenario_cfg, context_cfg):
         LOG.debug(
-            "scenario_cfg:%s context_cfg:%s" % (scenario_cfg, context_cfg))
+            "scenario_cfg:%s context_cfg:%s", scenario_cfg, context_cfg)
         self.scenario_cfg = scenario_cfg
         self.context_cfg = context_cfg
 
index 10f2c4f..46a197c 100755 (executable)
@@ -21,8 +21,8 @@ class ServiceHA(base.Scenario):
 
     def __init__(self, scenario_cfg, context_cfg):
         LOG.debug(
-            "scenario_cfg:%s context_cfg:%s" %
-            (scenario_cfg, context_cfg))
+            "scenario_cfg:%s context_cfg:%s",
+            scenario_cfg, context_cfg)
         self.scenario_cfg = scenario_cfg
         self.context_cfg = context_cfg
         self.setup_done = False
index 1177020..25300dd 100644 (file)
@@ -92,7 +92,7 @@ class CACHEstat(base.Scenario):
 
     def _execute_command(self, cmd):
         """Execute a command on server."""
-        LOG.info("Executing: %s" % cmd)
+        LOG.info("Executing: %s", cmd)
         status, stdout, stderr = self.client.execute(cmd)
         if status:
             raise RuntimeError("Failed executing command: ",
index a7fae44..9d71038 100644 (file)
@@ -96,7 +96,7 @@ class CPULoad(base.Scenario):
 
     def _execute_command(self, cmd):
         """Execute a command on server."""
-        LOG.info("Executing: %s" % cmd)
+        LOG.info("Executing: %s", cmd)
         status, stdout, stderr = self.client.execute(cmd)
         if status != 0:
             raise RuntimeError("Failed executing command: ",
index 6a1afe2..a6c4d95 100644 (file)
@@ -69,14 +69,14 @@ class Cyclictest(base.Scenario):
         rpm_dir = setup_options["rpm_dir"]
         script_dir = setup_options["script_dir"]
         image_dir = setup_options["image_dir"]
-        LOG.debug("Send RPMs from %s to workspace %s" %
-                  (rpm_dir, self.WORKSPACE))
+        LOG.debug("Send RPMs from %s to workspace %s",
+                  rpm_dir, self.WORKSPACE)
         client.put(rpm_dir, self.WORKSPACE, recursive=True)
-        LOG.debug("Send scripts from %s to workspace %s" %
-                  (script_dir, self.WORKSPACE))
+        LOG.debug("Send scripts from %s to workspace %s",
+                  script_dir, self.WORKSPACE)
         client.put(script_dir, self.WORKSPACE, recursive=True)
-        LOG.debug("Send guest image from %s to workspace %s" %
-                  (image_dir, self.WORKSPACE))
+        LOG.debug("Send guest image from %s to workspace %s",
+                  image_dir, self.WORKSPACE)
         client.put(image_dir, self.WORKSPACE, recursive=True)
 
     def _connect_host(self):
@@ -102,7 +102,7 @@ class Cyclictest(base.Scenario):
         self.guest.wait(timeout=600)
 
     def _run_setup_cmd(self, client, cmd):
-        LOG.debug("Run cmd: %s" % cmd)
+        LOG.debug("Run cmd: %s", cmd)
         status, stdout, stderr = client.execute(cmd)
         if status:
             if re.search(self.REBOOT_CMD_PATTERN, cmd):
index 48088f8..e1ba93d 100644 (file)
@@ -61,7 +61,7 @@ class MEMLoad(base.Scenario):
 
     def _execute_command(self, cmd):
         """Execute a command on server."""
-        LOG.info("Executing: %s" % cmd)
+        LOG.info("Executing: %s", cmd)
         status, stdout, stderr = self.client.execute(cmd)
         if status:
             raise RuntimeError("Failed executing command: ",
index ecde756..1ea92cc 100644 (file)
@@ -83,7 +83,7 @@ class NetUtilization(base.Scenario):
 
     def _execute_command(self, cmd):
         """Execute a command on target."""
-        LOG.info("Executing: %s" % cmd)
+        LOG.info("Executing: %s", cmd)
         status, stdout, stderr = self.client.execute(cmd)
         if status:
             raise RuntimeError("Failed executing command: ",
index d39c23a..06c329d 100644 (file)
@@ -75,8 +75,8 @@ class StorPerf(base.Scenario):
         setup_query_content = json.loads(setup_query.content)
         if setup_query_content["stack_created"]:
             self.setup_done = True
-            LOG.debug("stack_created: %s"
-                      setup_query_content["stack_created"])
+            LOG.debug("stack_created: %s",
+                      setup_query_content["stack_created"])
 
     def setup(self):
         """Set the configuration."""
@@ -88,8 +88,8 @@ class StorPerf(base.Scenario):
             if env_argument in self.options:
                 env_args[env_argument] = self.options[env_argument]
 
-        LOG.info("Creating a stack on node %s with parameters %s" %
-                 (self.target, env_args))
+        LOG.info("Creating a stack on node %s with parameters %s",
+                 self.target, env_args)
         setup_res = requests.post('http://%s:5000/api/v1.0/configurations'
                                   % self.target, json=env_args)
 
@@ -99,7 +99,7 @@ class StorPerf(base.Scenario):
             raise RuntimeError("Failed to create a stack, error message:",
                                setup_res_content["message"])
         elif setup_res.status_code == 200:
-            LOG.info("stack_id: %s" % setup_res_content["stack_id"])
+            LOG.info("stack_id: %s", setup_res_content["stack_id"])
 
             while not self.setup_done:
                 self._query_setup_state()
@@ -148,7 +148,7 @@ class StorPerf(base.Scenario):
             if job_argument in self.options:
                 job_args[job_argument] = self.options[job_argument]
 
-        LOG.info("Starting a job with parameters %s" % job_args)
+        LOG.info("Starting a job with parameters %s", job_args)
         job_res = requests.post('http://%s:5000/api/v1.0/jobs' % self.target,
                                 json=job_args)
 
@@ -159,7 +159,7 @@ class StorPerf(base.Scenario):
                                job_res_content["message"])
         elif job_res.status_code == 200:
             job_id = job_res_content["job_id"]
-            LOG.info("Started job id: %s..." % job_id)
+            LOG.info("Started job id: %s...", job_id)
 
             time.sleep(self.timeout)
             terminate_res = requests.delete('http://%s:5000/api/v1.0/jobs' %
index 2298d00..98e772d 100644 (file)
@@ -81,14 +81,14 @@ class HttpDispatcher(DispatchBase):
                 case_name = v["scenario_cfg"]["tc"]
                 break
         if case_name == "":
-            LOG.error('Test result : %s' % json.dumps(self.result))
+            LOG.error('Test result : %s', json.dumps(self.result))
             LOG.error('The case_name cannot be found, no data will be posted.')
             return
 
         self.result["case_name"] = case_name
 
         try:
-            LOG.debug('Test result : %s' % json.dumps(self.result))
+            LOG.debug('Test result : %s', json.dumps(self.result))
             res = requests.post(self.target,
                                 data=json.dumps(self.result),
                                 headers=self.headers,
index 8673253..fc9f3e9 100644 (file)
@@ -127,7 +127,7 @@ class InfluxdbDispatcher(DispatchBase):
         return make_lines(msg).encode('utf-8')
 
     def record_result_data(self, data):
-        LOG.debug('Test result : %s' % json.dumps(data))
+        LOG.debug('Test result : %s', json.dumps(data))
         self.raw_result.append(data)
         if self.target == '':
             # if the target was not set, do not do anything
@@ -148,13 +148,13 @@ class InfluxdbDispatcher(DispatchBase):
             return 0
 
         if self.tc == "":
-            LOG.error('Test result : %s' % json.dumps(data))
+            LOG.error('Test result : %s', json.dumps(data))
             LOG.error('The case_name cannot be found, no data will be posted.')
             return -1
 
         try:
             line = self._data_to_line_protocol(data)
-            LOG.debug('Test result line format : %s' % line)
+            LOG.debug('Test result line format : %s', line)
             res = requests.post(self.influxdb_url,
                                 data=line,
                                 auth=(self.username, self.password),
@@ -171,5 +171,5 @@ class InfluxdbDispatcher(DispatchBase):
         return 0
 
     def flush_result_data(self):
-        LOG.debug('Test result all : %s' % json.dumps(self.raw_result))
+        LOG.debug('Test result all : %s', json.dumps(self.raw_result))
         return 0
index d287b4d..8485dcc 100644 (file)
@@ -151,10 +151,12 @@ class SSH(object):
         self._client = False
 
     def run(self, cmd, stdin=None, stdout=None, stderr=None,
-            raise_on_error=True, timeout=3600):
+            raise_on_error=True, timeout=3600,
+            keep_stdin_open=False):
         """Execute specified command on the server.
 
         :param cmd:             Command to be executed.
+        :type cmd:              str
         :param stdin:           Open file or string to pass to stdin.
         :param stdout:          Open file to connect to stdout.
         :param stderr:          Open file to connect to stderr.
@@ -162,6 +164,8 @@ class SSH(object):
                                 then exception will be raized if non-zero code.
         :param timeout:         Timeout in seconds for command execution.
                                 Default 1 hour. No timeout if set to 0.
+        :param keep_stdin_open: don't close stdin on empty reads
+        :type keep_stdin_open:  bool
         """
 
         client = self._get_client()
@@ -171,10 +175,12 @@ class SSH(object):
 
         return self._run(client, cmd, stdin=stdin, stdout=stdout,
                          stderr=stderr, raise_on_error=raise_on_error,
-                         timeout=timeout)
+                         timeout=timeout,
+                         keep_stdin_open=keep_stdin_open)
 
     def _run(self, client, cmd, stdin=None, stdout=None, stderr=None,
-             raise_on_error=True, timeout=3600):
+             raise_on_error=True, timeout=3600,
+             keep_stdin_open=False):
 
         transport = client.get_transport()
         session = transport.open_session()
@@ -197,14 +203,14 @@ class SSH(object):
 
             if session.recv_ready():
                 data = session.recv(4096)
-                self.log.debug("stdout: %r" % data)
+                self.log.debug("stdout: %r", data)
                 if stdout is not None:
                     stdout.write(data)
                 continue
 
             if session.recv_stderr_ready():
                 stderr_data = session.recv_stderr(4096)
-                self.log.debug("stderr: %r" % stderr_data)
+                self.log.debug("stderr: %r", stderr_data)
                 if stderr is not None:
                     stderr.write(stderr_data)
                 continue
@@ -214,13 +220,15 @@ class SSH(object):
                     if not data_to_send:
                         data_to_send = stdin.read(4096)
                         if not data_to_send:
-                            stdin.close()
-                            session.shutdown_write()
-                            writes = []
-                            continue
-                    sent_bytes = session.send(data_to_send)
-                    # LOG.debug("sent: %s" % data_to_send[:sent_bytes])
-                    data_to_send = data_to_send[sent_bytes:]
+                            # we may need to keep stdin open
+                            if not keep_stdin_open:
+                                stdin.close()
+                                session.shutdown_write()
+                                writes = []
+                    if data_to_send:
+                        sent_bytes = session.send(data_to_send)
+                        # LOG.debug("sent: %s" % data_to_send[:sent_bytes])
+                        data_to_send = data_to_send[sent_bytes:]
 
             if session.exit_status_ready():
                 break
@@ -267,10 +275,10 @@ class SSH(object):
             try:
                 return self.execute("uname")
             except (socket.error, SSHError) as e:
-                self.log.debug("Ssh is still unavailable: %r" % e)
+                self.log.debug("Ssh is still unavailable: %r", e)
                 time.sleep(interval)
             if time.time() > (start_time + timeout):
-                raise SSHTimeout("Timeout waiting for '%s'" % self.host)
+                raise SSHTimeout("Timeout waiting for '%s'", self.host)
 
     def put(self, files, remote_path=b'.', recursive=False):
         client = self._get_client()