Return the ansible playbook running results in driver.py and args_handler.py 69/21169/4
authorzhifeng.jiang <jiang.zhifeng@zte.com.cn>
Thu, 15 Sep 2016 10:18:14 +0000 (18:18 +0800)
committerzhifeng.jiang <jiang.zhifeng@zte.com.cn>
Fri, 23 Sep 2016 14:31:22 +0000 (22:31 +0800)
JIRA:QTIP-99

Change-Id: I875ad52bc69a843e39deb92ad45da90c2e737aae
Signed-off-by: zhifeng.jiang <jiang.zhifeng@zte.com.cn>
func/args_handler.py
func/driver.py

index 90d902b..50d803e 100644 (file)
@@ -55,12 +55,13 @@ def prepare_ansible_env(benchmark_test_case):
 def run_benchmark(installer_type, pwd, benchmark, benchmark_details,
                   proxy_info, env_setup, benchmark_test_case):
     driver = Driver()
-    driver.drive_bench(installer_type, pwd, benchmark,
-                       env_setup.roles_dict.items(), _get_f_name(benchmark_test_case),
-                       benchmark_details, env_setup.ip_pw_dict.items(), proxy_info)
+    return driver.drive_bench(installer_type, pwd, benchmark,
+                              env_setup.roles_dict.items(),
+                              _get_f_name(benchmark_test_case),
+                              benchmark_details, env_setup.ip_pw_dict.items(), proxy_info)
 
 
 def prepare_and_run_benchmark(installer_type, pwd, benchmark_test_case):
     benchmark, benchmark_details, proxy_info, env_setup = prepare_ansible_env(benchmark_test_case)
-    run_benchmark(installer_type, pwd, benchmark, benchmark_details,
-                  proxy_info, env_setup, benchmark_test_case)
+    return run_benchmark(installer_type, pwd, benchmark, benchmark_details,
+                         proxy_info, env_setup, benchmark_test_case)
index ff40a4c..88d673f 100644 (file)
@@ -7,8 +7,10 @@
 # http://www.apache.org/licenses/LICENSE-2.0
 ##############################################################################
 from utils import logger_utils
+from operator import add
 from ansible_api import AnsibleApi
 
+
 logger = logger_utils.QtipLogger('driver').get
 
 
@@ -65,7 +67,7 @@ class Driver:
         ansible_api.execute_playbook('./data/hosts',
                                      './benchmarks/playbooks/{0}.yaml'.format(benchmark),
                                      './data/QtipKey', extra_vars)
-        return ansible_api.get_detail_playbook_stats()
+        return self.get_ansible_result(extra_vars['role'], ansible_api.get_detail_playbook_stats())
 
     def drive_bench(self, installer_type, pwd, benchmark, roles, benchmark_fname,
                     benchmark_detail=None, pip_dict=None, proxy_info=None):
@@ -73,8 +75,18 @@ class Driver:
         pip_dict = sorted(pip_dict)
         var_json = self.get_common_var_json(installer_type, pwd, benchmark_fname,
                                             benchmark_detail, pip_dict, proxy_info)
-        map(lambda role: self.run_ansible_playbook
-            (benchmark, self.merge_two_dicts(var_json,
-                                             self.get_special_var_json(role, roles,
-                                                                       benchmark_detail,
-                                                                       pip_dict))), roles)
+        result = map(lambda role: self.run_ansible_playbook
+                     (benchmark, self.merge_two_dicts(var_json,
+                                                      self.get_special_var_json(role, roles,
+                                                                                benchmark_detail,
+                                                                                pip_dict))), roles)
+        return reduce(self._merge_ansible_result, result)
+
+    def get_ansible_result(self, role, stats):
+        result = reduce(add, map(lambda x: x[1]['failures'] + x[1]['unreachable'], stats))
+        return {'result': result,
+                'detail': {role: stats}}
+
+    def _merge_ansible_result(self, result_1, result_2):
+        return {'result': result_1['result'] + result_2['result'],
+                'detail': self.merge_two_dicts(result_1['detail'], result_2['detail'])}