Correct documented return values
[functest-xtesting.git] / xtesting / core / behaveframework.py
index d8a61ef..3dc6038 100644 (file)
@@ -14,6 +14,7 @@ from __future__ import division
 import logging
 import os
 import time
+
 import json
 
 from behave.__main__ import main as behave_main
@@ -28,11 +29,9 @@ class BehaveFramework(testcase.TestCase):
     # pylint: disable=too-many-instance-attributes
 
     __logger = logging.getLogger(__name__)
-    dir_results = "/var/lib/xtesting/results"
 
     def __init__(self, **kwargs):
-        super(BehaveFramework, self).__init__(**kwargs)
-        self.res_dir = os.path.join(self.dir_results, self.case_name)
+        super().__init__(**kwargs)
         self.json_file = os.path.join(self.res_dir, 'output.json')
         self.total_tests = 0
         self.pass_tests = 0
@@ -42,14 +41,8 @@ class BehaveFramework(testcase.TestCase):
 
     def parse_results(self):
         """Parse output.json and get the details in it."""
-
-        try:
-            with open(self.json_file) as stream_:
-                self.response = json.load(stream_)
-        except IOError:
-            self.__logger.error("Error reading the file %s", self.json_file)
-
-        try:
+        with open(self.json_file) as stream_:
+            self.response = json.load(stream_)
             if self.response:
                 self.total_tests = len(self.response)
             for item in self.response:
@@ -59,21 +52,14 @@ class BehaveFramework(testcase.TestCase):
                     self.fail_tests += 1
                 elif item['status'] == 'skipped':
                     self.skip_tests += 1
-        except KeyError:
-            self.__logger.error("Error in json - %s", self.response)
-
-        try:
             self.result = 100 * (
                 self.pass_tests / self.total_tests)
-        except ZeroDivisionError:
-            self.__logger.error("No test has been run")
-
-        self.details = {}
-        self.details['total_tests'] = self.total_tests
-        self.details['pass_tests'] = self.pass_tests
-        self.details['fail_tests'] = self.fail_tests
-        self.details['skip_tests'] = self.skip_tests
-        self.details['tests'] = self.response
+            self.details = {}
+            self.details['total_tests'] = self.total_tests
+            self.details['pass_tests'] = self.pass_tests
+            self.details['fail_tests'] = self.fail_tests
+            self.details['skip_tests'] = self.skip_tests
+            self.details['tests'] = self.response
 
     def run(self, **kwargs):
         """Run the BehaveFramework feature files
@@ -92,7 +78,6 @@ class BehaveFramework(testcase.TestCase):
         """
         try:
             suites = kwargs["suites"]
-            tags = kwargs.get("tags", [])
         except KeyError:
             self.__logger.exception("Mandatory args were not passed")
             return self.EX_RUN_ERROR
@@ -102,9 +87,15 @@ class BehaveFramework(testcase.TestCase):
             except Exception:  # pylint: disable=broad-except
                 self.__logger.exception("Cannot create %s", self.res_dir)
                 return self.EX_RUN_ERROR
-        config = ['--tags='+','.join(tags),
-                  '--format=json',
-                  '--outfile='+self.json_file]
+        config = ['--junit', '--junit-directory={}'.format(self.res_dir),
+                  '--format=json', '--outfile={}'.format(self.json_file)]
+        html_file = os.path.join(self.res_dir, 'output.html')
+        config += ['--format=behave_html_formatter:HTMLFormatter',
+                   '--outfile={}'.format(html_file)]
+        if kwargs.get("tags", False):
+            config += ['--tags='+','.join(kwargs.get("tags", []))]
+        if kwargs.get("console", False):
+            config += ['--format=pretty', '--outfile=-']
         for feature in suites:
             config.append(feature)
         self.start_time = time.time()