[dovetail tool]check and get results for each cmd 49/26349/4
authorLeo Wang <grakiss.wanglei@huawei.com>
Wed, 21 Dec 2016 07:34:44 +0000 (02:34 -0500)
committerLeo wang <grakiss.wanglei@huawei.com>
Thu, 22 Dec 2016 02:14:26 +0000 (02:14 +0000)
JIRA: DOVETAIL-166

Check the results of each cmds executed in test case
1. the results of pre_condition, post_condition, cmds need to be checked,
   so it can get a quick fail, dont need to go through the next step
2. it's more accurate to show where error occurred as early as possible
3. get results from shell scripts

Change-Id: I5c1e59839c55b92de0e83e7e1eb552aa364b3f80
Signed-off-by: Leo Wang <grakiss.wanglei@huawei.com>
dovetail/conf/functest_config.yml
dovetail/container.py
dovetail/report.py
dovetail/run.py
dovetail/test_runner.py
dovetail/testcase.py
dovetail/testcase/example.tc002.yml
dovetail/testcase/example.tc003.yml [new file with mode: 0644]
dovetail/testcase/ipv6.tc001.yml
dovetail/utils/dovetail_utils.py

index ceb894b..6aea280 100644 (file)
@@ -6,12 +6,12 @@ functest:
          -e BUILD_TAG=dovetail -e CI_DEBUG=true -e DEPLOY_TYPE=baremetal'
   opts: '-id --privileged=true'
   pre_condition:
-    - 'echo test for precondition'
+    - 'echo test for precondition in functest'
   cmds:
     - 'functest env prepare'
     - 'functest testcase run {{validate_testcase}}'
   post_condition:
-    - 'echo test for postcondition'
+    - 'echo test for postcondition in functest'
   result:
     dir: '/home/opnfv/functest/results'
     store_type: 'file'
index 59fc0d8..b3450fa 100644 (file)
@@ -67,8 +67,10 @@ class Container:
                              (docker_image))
         else:
             cmd = 'sudo docker pull %s' % (docker_image)
-            dt_utils.exec_cmd(cmd, cls.logger)
-            cls.has_pull_latest_image[type] = True
+            ret, msg = dt_utils.exec_cmd(cmd, cls.logger)
+            if ret == 0:
+                cls.logger.debug('docker pull %s success!', docker_image)
+                cls.has_pull_latest_image[type] = True
 
     @classmethod
     def clean(cls, container_id):
@@ -80,6 +82,6 @@ class Container:
     @classmethod
     def exec_cmd(cls, container_id, sub_cmd, exit_on_error=False):
         if sub_cmd == "":
-            return
+            return (1, 'sub_cmd is empty')
         cmd = 'sudo docker exec %s /bin/bash -c "%s"' % (container_id, sub_cmd)
-        dt_utils.exec_cmd(cmd, cls.logger, exit_on_error)
+        return dt_utils.exec_cmd(cmd, cls.logger, exit_on_error)
index 654e6bf..8c302b6 100644 (file)
@@ -168,12 +168,13 @@ class Report:
         type = testcase.validate_type()
         crawler = CrawlerFactory.create(type)
         if crawler is None:
+            cls.logger.error('crawler is None:%s', testcase.name())
             return None
 
         if validate_testcase in cls.results[type]:
             return cls.results[type][validate_testcase]
 
-        result = crawler.crawl(validate_testcase)
+        result = crawler.crawl(testcase)
 
         if result is not None:
             cls.results[type][validate_testcase] = result
@@ -187,25 +188,13 @@ class Report:
         return result
 
 
-class CrawlerFactory:
-
-    @staticmethod
-    def create(type):
-        if type == 'functest':
-            return FunctestCrawler()
-
-        if type == 'yardstick':
-            return YardstickCrawler()
-
-        return None
-
-
 class FunctestCrawler:
 
     logger = None
 
     def __init__(self):
         self.type = 'functest'
+        self.logger.debug('create crawler:%s', self.type)
 
     @classmethod
     def create_log(cls):
@@ -258,7 +247,8 @@ class FunctestCrawler:
 
     def crawl_from_url(self, testcase=None):
         url = \
-            dt_cfg.dovetail_config[self.type]['result']['db_url'] % testcase
+            dt_cfg.dovetail_config[self.type]['result']['db_url'] % \
+            testcase.validate_testcase()
         self.logger.debug("Query to rest api: %s" % url)
         try:
             data = json.load(urllib2.urlopen(url))
@@ -275,6 +265,7 @@ class YardstickCrawler:
 
     def __init__(self):
         self.type = 'yardstick'
+        self.logger.debug('create crawler:%s', self.type)
 
     @classmethod
     def create_log(cls):
@@ -292,7 +283,7 @@ class YardstickCrawler:
 
     def crawl_from_file(self, testcase=None):
         file_path = os.path.join(dt_cfg.dovetail_config['result_dir'],
-                                 testcase + '.out')
+                                 testcase.validate_testcase() + '.out')
         if not os.path.exists(file_path):
             self.logger.info('result file not found: %s' % file_path)
             return None
@@ -312,17 +303,39 @@ class YardstickCrawler:
         return None
 
 
-class CheckerFactory:
+class ShellCrawler:
 
-    @staticmethod
-    def create(type):
-        if type == 'functest':
-            return FunctestChecker()
+    def __init__(self):
+        self.type = 'shell'
 
-        if type == 'yardstick':
-            return YardstickChecker()
+    def crawl(self, testcase=None):
+        return self.crawl_from_file(testcase)
 
-        return None
+    def crawl_from_file(self, testcase=None):
+        file_path = os.path.join(dt_cfg.dovetail_config['result_dir'],
+                                 testcase.name()) + '.out'
+        if not os.path.exists(file_path):
+            return None
+        try:
+            with open(file_path, 'r') as json_data:
+                result = json.load(json_data)
+            return result
+        except Exception:
+            return None
+
+
+class CrawlerFactory:
+
+    CRAWLER_MAP = {'functest': FunctestCrawler,
+                   'yardstick': YardstickCrawler,
+                   'shell': ShellCrawler}
+
+    @classmethod
+    def create(cls, type):
+        try:
+            return cls.CRAWLER_MAP[type]()
+        except KeyError:
+            return None
 
 
 class ResultChecker:
@@ -388,3 +401,27 @@ class YardstickChecker:
         else:
             testcase.passed(result['criteria'] == 'PASS')
         return
+
+
+class ShellChecker:
+
+    @staticmethod
+    def check(testcase, result):
+        try:
+            testcase.passed(result['pass'])
+        except Exception:
+            testcase.passed(False)
+
+
+class CheckerFactory:
+
+    CHECKER_MAP = {'functest': FunctestChecker,
+                   'yardstick': YardstickChecker,
+                   'shell': ShellChecker}
+
+    @classmethod
+    def create(cls, type):
+        try:
+            return cls.CHECKER_MAP[type]()
+        except KeyError:
+            return None
index 8a91e4b..85e6566 100755 (executable)
@@ -64,8 +64,8 @@ def run_test(testsuite, testarea, logger):
             end_time = time.time()
             duration = end_time - start_time
 
-        db_result = Report.get_result(testcase)
-        Report.check_result(testcase, db_result)
+        result = Report.get_result(testcase)
+        Report.check_result(testcase, result)
 
     return duration
 
index 8a95b1f..70dd234 100644 (file)
@@ -7,8 +7,11 @@
 # http://www.apache.org/licenses/LICENSE-2.0
 #
 
+import os
+import json
 import utils.dovetail_utils as dt_utils
 import utils.dovetail_logger as dt_logger
+from utils.dovetail_config import DovetailConfig as dt_cfg
 
 from container import Container
 
@@ -19,34 +22,48 @@ class DockerRunner(object):
 
     def __init__(self, testcase):
         self.testcase = testcase
+        self.logger.debug('create runner: %s', self.type)
 
     @classmethod
     def create_log(cls):
-        cls.logger = dt_logger.Logger(__file__).getLogger()
+        cls.logger = dt_logger.Logger(__name__ + '.DockerRunner').getLogger()
 
     def run(self):
         Container.pull_image(self.testcase.validate_type())
         container_id = Container.create(self.testcase.validate_type())
+        if container_id == '':
+            self.logger.error('failed to create container')
+            return
+
         self.logger.debug('container id:%s' % container_id)
 
         if not self.testcase.prepared():
+            failed = False
             cmds = self.testcase.pre_condition()
             if cmds:
                 for cmd in cmds:
-                    Container.exec_cmd(container_id, cmd)
-            self.testcase.prepared(True)
+                    ret, msg = Container.exec_cmd(container_id, cmd)
+                    if ret != 0:
+                        failed = True
+                        break
+            if not failed:
+                self.testcase.prepared(True)
 
         if not self.testcase.prepare_cmd():
             self.logger.error('failed to prepare testcase:%s',
                               self.testcase.name)
         else:
             for cmd in self.testcase.cmds:
-                Container.exec_cmd(container_id, cmd)
+                ret, msg = Container.exec_cmd(container_id, cmd)
+                if ret != 0:
+                    self.logger.error('Failed to exec %s, ret:%d, msg:%s',
+                                      cmd, ret, msg)
+                    break
 
         cmds = self.testcase.post_condition()
         if cmds:
             for cmd in cmds:
-                Container.exec_cmd(container_id, cmd)
+                ret, msg = Container.exec_cmd(container_id, cmd)
         self.testcase.cleaned(True)
 
         Container.clean(container_id)
@@ -55,15 +72,15 @@ class DockerRunner(object):
 class FunctestRunner(DockerRunner):
 
     def __init__(self, testcase):
+        self.type = 'functest'
         super(FunctestRunner, self).__init__(testcase)
-        self.name = 'functest'
 
 
 class YardstickRunner(DockerRunner):
 
     def __init__(self, testcase):
+        self.type = 'yardstick'
         super(YardstickRunner, self).__init__(testcase)
-        self.name = 'yardstick'
 
 
 class ShellRunner(object):
@@ -72,16 +89,55 @@ class ShellRunner(object):
 
     @classmethod
     def create_log(cls):
-        cls.logger = dt_logger.Logger(__file__).getLogger()
+        cls.logger = dt_logger.Logger(__name__ + '.ShellRunner').getLogger()
 
     def __init__(self, testcase):
         super(ShellRunner, self).__init__()
         self.testcase = testcase
-        self.name = 'shell'
+        self.type = 'shell'
+        self.logger.debug('create runner:%s', self.type)
 
     def run(self):
-        for cmd in self.testcase.cmds:
-            dt_utils.exec_cmd(cmd, self.logger)
+        passed = True
+        failed = False
+        result = {'pass': True, 'results': []}
+        if not self.testcase.prepared():
+            cmds = self.testcase.pre_condition()
+            for cmd in cmds:
+                ret, msg = dt_utils.exec_cmd(cmd, self.logger)
+                result['results'].append((cmd, ret, msg))
+                if ret != 0:
+                    failed = True
+                    break
+            if not failed:
+                self.testcase.prepared(True)
+
+        if not self.testcase.prepare_cmd():
+            self.logger.error('failed to prepare cmd:%s',
+                              self.testcase.name())
+        else:
+            for cmd in self.testcase.cmds:
+                ret, msg = dt_utils.exec_cmd(cmd, self.logger)
+                result['results'].append((cmd, ret, msg))
+                if ret != 0:
+                    passed = False
+
+        result['pass'] = passed
+
+        cmds = self.testcase.post_condition()
+        for cmd in cmds:
+            ret, msg = dt_utils.exec_cmd(cmd, self.logger)
+            result['results'].append((cmd, ret, msg))
+
+        result_filename = os.path.join(dt_cfg.dovetail_config['result_dir'],
+                                       self.testcase.name()) + '.out'
+        self.logger.debug('save result:%s', result_filename)
+        try:
+            with open(result_filename, 'w') as f:
+                f.write(json.dumps(result))
+        except Exception as e:
+            self.logger.exception('Failed to write result into file:%s, \
+                                   except:%s', result_filename, e)
 
 
 class TestRunnerFactory(object):
index dd0fd2b..040c6f9 100644 (file)
@@ -93,17 +93,33 @@ class Testcase(object):
         return self._result_acquired(self.validate_testcase(), acquired)
 
     def pre_condition(self):
-        return self.pre_condition_cls(self.validate_type())
+        try:
+            pre_condition = self.testcase['validate']['pre_condition']
+            if pre_condition == '':
+                pre_condition = self.pre_condition_cls(self.validate_type())
+            return pre_condition
+        except:
+            self.logger.debug('testcase:%s pre_condition is empty',
+                              self.name())
+            return ''
 
     def post_condition(self):
-        return self.post_condition_cls(self.validate_type())
+        try:
+            post_condition = self.testcase['validate']['post_condition']
+            if post_condition == '':
+                post_condition = self.post_condition_cls(self.validate_type())
+            return post_condition
+        except:
+            self.logger.debug('testcae:%s post_condition is empty',
+                              self.name())
+            return ''
 
     def run(self):
         runner = TestRunnerFactory.create(self)
         try:
             runner.run()
-        except AttributeError:
-            pass
+        except AttributeError as e:
+            self.logger.exception('testcase:%s except:%s', self.name, e)
 
     # testcase in upstream testing project
     # validate_testcase_list = {'functest': {}, 'yardstick': {}, 'shell': {}}
@@ -187,7 +203,7 @@ class FunctestTestcase(Testcase):
 
     def __init__(self, testcase_yaml):
         super(FunctestTestcase, self).__init__(testcase_yaml)
-        self.name = 'functest'
+        self.type = 'functest'
 
     def prepare_cmd(self):
         ret = super(FunctestTestcase, self).prepare_cmd()
@@ -208,7 +224,7 @@ class YardstickTestcase(Testcase):
 
     def __init__(self, testcase_yaml):
         super(YardstickTestcase, self).__init__(testcase_yaml)
-        self.name = 'yardstick'
+        self.type = 'yardstick'
 
 
 class ShellTestcase(Testcase):
@@ -217,7 +233,7 @@ class ShellTestcase(Testcase):
 
     def __init__(self, testcase_yaml):
         super(ShellTestcase, self).__init__(testcase_yaml)
-        self.name = 'shell'
+        self.type = 'shell'
 
 
 class TestcaseFactory(object):
index 89d000c..b929607 100644 (file)
@@ -8,6 +8,7 @@ dovetail.example.tc002:
       - "echo pre_condition"
     cmds:
       - "echo test2"
+      - "mkdir xxx"
     post_condition:
       - "echo post_condition"
   report:
diff --git a/dovetail/testcase/example.tc003.yml b/dovetail/testcase/example.tc003.yml
new file mode 100644 (file)
index 0000000..7916e01
--- /dev/null
@@ -0,0 +1,14 @@
+dovetail.example.tc003:
+  name: dovetail.example.tc003
+  objective: doing something useful
+  validate:
+    type: shell
+    testcase: "run shell22"
+    pre_condition:
+    cmds:
+      - "echo test2"
+    post_condition:
+      - ""
+  report:
+    sub_testcase_list:
+
index 0bc0baa..916d091 100644 (file)
@@ -5,12 +5,12 @@ dovetail.ipv6.tc001:
     type: functest
     testcase: tempest_smoke_serial
     pre_condition:
-      - 'echo test for precondition'
+      - 'echo test for precondition in testcase'
     cmds:
       - 'functest env prepare'
       - 'functest testcase run {{validate_testcase}}'
     post_condition:
-      - 'echo test for precondition'
+      - 'echo test for precondition in testcase'
   report:
     sub_testcase_list:
       - tempest.api.network.test_networks.BulkNetworkOpsIpV6Test.test_bulk_create_delete_network
index 1c68b7f..8c4865d 100644 (file)
@@ -34,7 +34,7 @@ def exec_log(verbose, logger, msg, level, flush=False):
             sys.stdout.flush()
 
 
-def exec_cmd(cmd, logger=None, exit_on_error=True, info=False,
+def exec_cmd(cmd, logger=None, exit_on_error=False, info=False,
              err_msg="", verbose=True):
     msg_err = ("The command '%s' failed." % cmd) if not err_msg else err_msg
     msg_exec = ("Executing command: '%s'" % cmd)