Switch from generate_report to PrettyTable 21/34621/8
authorCédric Ollivier <cedric.ollivier@orange.com>
Thu, 11 May 2017 11:01:16 +0000 (13:01 +0200)
committerCédric Ollivier <cedric.ollivier@orange.com>
Tue, 16 May 2017 13:01:57 +0000 (15:01 +0200)
run_tests.py now relies on PrettyTable as most of the openstack clients.
generate_report.py and its related unit tests are simply removed.

It sets padding_width=5 in testcase.py too to conform with
run_tests.py.

Now report is printed in every case.

Change-Id: Id9ce93f984503f25d6a2150482f397853fa3dd64
Signed-off-by: Cédric Ollivier <cedric.ollivier@orange.com>
docs/com/pres/framework/framework.md
functest/ci/generate_report.py [deleted file]
functest/ci/run_tests.py
functest/core/feature.py
functest/core/testcase.py
functest/tests/unit/ci/test_generate_report.py [deleted file]
functest/tests/unit/ci/test_run_tests.py
functest/tests/unit/core/test_testcase.py

index 61249e0..3c1aae1 100644 (file)
@@ -102,7 +102,6 @@ if result == testcase.TestCase.EX_OK:
     if GlobalVariables.REPORT_FLAG:
         test_case.push_to_db()
     result = test_case.is_successful()
-duration = test_case.get_duration()
 ```
 
 
diff --git a/functest/ci/generate_report.py b/functest/ci/generate_report.py
deleted file mode 100644 (file)
index e400b1b..0000000
+++ /dev/null
@@ -1,149 +0,0 @@
-#!/usr/bin/env python
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-import json
-import logging
-import re
-import urllib2
-
-import functest.utils.functest_utils as ft_utils
-from functest.utils.constants import CONST
-
-COL_1_LEN = 25
-COL_2_LEN = 15
-COL_3_LEN = 12
-COL_4_LEN = 15
-COL_5_LEN = 75
-
-# If we run from CI (Jenkins) we will push the results to the DB
-# and then we can print the url to the specific test result
-
-
-logger = logging.getLogger(__name__)
-
-
-def init(tiers_to_run=[]):
-    test_cases_arr = []
-    for tier in tiers_to_run:
-        for test in tier.get_tests():
-            test_cases_arr.append({'test_name': test.get_name(),
-                                   'tier_name': tier.get_name(),
-                                   'result': 'Not executed',
-                                   'duration': '0',
-                                   'url': ''})
-    return test_cases_arr
-
-
-def get_results_from_db():
-    url = "%s?build_tag=%s" % (ft_utils.get_db_url(),
-                               CONST.BUILD_TAG)
-    logger.debug("Query to rest api: %s" % url)
-    try:
-        data = json.load(urllib2.urlopen(url))
-        return data['results']
-    except:
-        logger.error("Cannot read content from the url: %s" % url)
-        return None
-
-
-def get_data(test, results):
-    test_result = test['result']
-    url = ''
-    for test_db in results:
-        if test['test_name'] in test_db['case_name']:
-            id = test_db['_id']
-            url = ft_utils.get_db_url() + '/' + id
-            test_result = test_db['criteria']
-
-    return {"url": url, "result": test_result}
-
-
-def print_line(w1, w2='', w3='', w4='', w5=''):
-    str = ('| ' + w1.ljust(COL_1_LEN - 1) +
-           '| ' + w2.ljust(COL_2_LEN - 1) +
-           '| ' + w3.ljust(COL_3_LEN - 1) +
-           '| ' + w4.ljust(COL_4_LEN - 1))
-    if CONST.__getattribute__('IS_CI_RUN'):
-        str += ('| ' + w5.ljust(COL_5_LEN - 1))
-    str += '|\n'
-    return str
-
-
-def print_line_no_columns(str):
-    TOTAL_LEN = COL_1_LEN + COL_2_LEN + COL_3_LEN + COL_4_LEN + 2
-    if CONST.__getattribute__('IS_CI_RUN'):
-        TOTAL_LEN += COL_5_LEN + 1
-    return ('| ' + str.ljust(TOTAL_LEN) + "|\n")
-
-
-def print_separator(char="=", delimiter="+"):
-    str = ("+" + char * COL_1_LEN +
-           delimiter + char * COL_2_LEN +
-           delimiter + char * COL_3_LEN +
-           delimiter + char * COL_4_LEN)
-    if CONST.__getattribute__('IS_CI_RUN'):
-        str += (delimiter + char * COL_5_LEN)
-    str += '+\n'
-    return str
-
-
-def main(args=[]):
-    executed_test_cases = args
-
-    if CONST.__getattribute__('IS_CI_RUN'):
-        results = get_results_from_db()
-        if results is not None:
-            for test in executed_test_cases:
-                data = get_data(test, results)
-                test.update({"url": data['url'],
-                             "result": data['result']})
-
-    TOTAL_LEN = COL_1_LEN + COL_2_LEN + COL_3_LEN + COL_4_LEN
-    if CONST.__getattribute__('IS_CI_RUN'):
-        TOTAL_LEN += COL_5_LEN
-    MID = TOTAL_LEN / 2
-
-    if CONST.__getattribute__('BUILD_TAG') is not None:
-        if re.search("daily", CONST.__getattribute__('BUILD_TAG')) is not None:
-            CONST.__setattr__('CI_LOOP', 'daily')
-        else:
-            CONST.__setattr__('CI_LOOP', 'weekly')
-
-    str = ''
-    str += print_separator('=', delimiter="=")
-    str += print_line_no_columns(' ' * (MID - 8) + 'FUNCTEST REPORT')
-    str += print_separator('=', delimiter="=")
-    str += print_line_no_columns(' ')
-    str += print_line_no_columns(" Deployment description:")
-    str += print_line_no_columns("   INSTALLER: %s"
-                                 % CONST.__getattribute__('INSTALLER_TYPE'))
-    if CONST.__getattribute__('DEPLOY_SCENARIO') is not None:
-        str += print_line_no_columns("   SCENARIO:  %s"
-                                     % CONST.__getattribute__(
-                                         'DEPLOY_SCENARIO'))
-    if CONST.__getattribute__('BUILD_TAG') is not None:
-        str += print_line_no_columns("   BUILD TAG: %s"
-                                     % CONST.__getattribute__('BUILD_TAG'))
-    if CONST.__getattribute__('CI_LOOP') is not None:
-        str += print_line_no_columns("   CI LOOP:   %s"
-                                     % CONST.__getattribute__('CI_LOOP'))
-    str += print_line_no_columns(' ')
-    str += print_separator('=')
-    if CONST.__getattribute__('IS_CI_RUN'):
-        str += print_line('TEST CASE', 'TIER', 'DURATION', 'RESULT', 'URL')
-    else:
-        str += print_line('TEST CASE', 'TIER', 'DURATION', 'RESULT')
-    str += print_separator('=')
-    for test in executed_test_cases:
-        str += print_line(test['test_name'],
-                          test['tier_name'],
-                          test['duration'],
-                          test['result'],
-                          test['url'])
-        str += print_separator('-')
-
-    logger.info("\n\n\n%s" % str)
index 7035992..493d5f9 100755 (executable)
@@ -17,7 +17,8 @@ import os
 import re
 import sys
 
-import functest.ci.generate_report as generate_report
+import prettytable
+
 import functest.ci.tier_builder as tb
 import functest.core.testcase as testcase
 import functest.utils.functest_utils as ft_utils
@@ -99,13 +100,6 @@ def cleanup():
     os_clean.main()
 
 
-def update_test_info(test_name, result, duration):
-    for test in GlobalVariables.EXECUTED_TEST_CASES:
-        if test['test_name'] == test_name:
-            test.update({"result": result,
-                         "duration": duration})
-
-
 def get_run_dict(testname):
     try:
         dict = ft_utils.get_dict_by_test(testname)
@@ -120,8 +114,6 @@ def get_run_dict(testname):
 
 
 def run_test(test, tier_name, testcases=None):
-    duration = "XX:XX"
-    result_str = "PASS"
     test_name = test.get_name()
     logger.info("\n")  # blank line
     print_separator("=")
@@ -145,6 +137,7 @@ def run_test(test, tier_name, testcases=None):
             cls = getattr(module, run_dict['class'])
             test_dict = ft_utils.get_dict_by_test(test_name)
             test_case = cls(**test_dict)
+            GlobalVariables.EXECUTED_TEST_CASES.append(test_case)
             try:
                 kwargs = run_dict['args']
                 result = test_case.run(**kwargs)
@@ -154,8 +147,7 @@ def run_test(test, tier_name, testcases=None):
                 if GlobalVariables.REPORT_FLAG:
                     test_case.push_to_db()
                 result = test_case.is_successful()
-            duration = test_case.get_duration()
-            logger.info("\n%s\n", test_case)
+            logger.info("Test result:\n\n%s\n", test_case)
         except ImportError:
             logger.exception("Cannot import module {}".format(
                 run_dict['module']))
@@ -167,22 +159,13 @@ def run_test(test, tier_name, testcases=None):
 
     if test.needs_clean() and GlobalVariables.CLEAN_FLAG:
         cleanup()
-
     if result != testcase.TestCase.EX_OK:
         logger.error("The test case '%s' failed. " % test_name)
         GlobalVariables.OVERALL_RESULT = Result.EX_ERROR
-        result_str = "FAIL"
-
         if test.is_blocking():
-            if not testcases or testcases == "all":
-                # if it is a single test we don't print the whole results table
-                update_test_info(test_name, result_str, duration)
-                generate_report.main(GlobalVariables.EXECUTED_TEST_CASES)
             raise BlockingTestFailed("The test case {} failed and is blocking"
                                      .format(test.get_name()))
 
-    update_test_info(test_name, result_str, duration)
-
 
 def run_tier(tier):
     tier_name = tier.get_name()
@@ -214,12 +197,9 @@ def run_all(tiers):
                            tier.get_test_names()))
 
     logger.info("Tests to be executed:%s" % summary)
-    GlobalVariables.EXECUTED_TEST_CASES = generate_report.init(tiers_to_run)
     for tier in tiers_to_run:
         run_tier(tier)
 
-    generate_report.main(GlobalVariables.EXECUTED_TEST_CASES)
-
 
 def main(**kwargs):
 
@@ -238,8 +218,6 @@ def main(**kwargs):
         if kwargs['test']:
             source_rc_file()
             if _tiers.get_tier(kwargs['test']):
-                GlobalVariables.EXECUTED_TEST_CASES = generate_report.init(
-                    [_tiers.get_tier(kwargs['test'])])
                 run_tier(_tiers.get_tier(kwargs['test']))
             elif _tiers.get_test(kwargs['test']):
                 run_test(_tiers.get_test(kwargs['test']),
@@ -261,6 +239,26 @@ def main(**kwargs):
     except Exception as e:
         logger.error(e)
         GlobalVariables.OVERALL_RESULT = Result.EX_ERROR
+
+    msg = prettytable.PrettyTable(
+        header_style='upper', padding_width=5,
+        field_names=['env var', 'value'])
+    for env_var in ['INSTALLER_TYPE', 'DEPLOY_SCENARIO', 'BUILD_TAG',
+                    'CI_LOOP']:
+        msg.add_row([env_var, CONST.__getattribute__(env_var)])
+    logger.info("Deployment description: \n\n%s\n", msg)
+
+    msg = prettytable.PrettyTable(
+        header_style='upper', padding_width=5,
+        field_names=['test case', 'project', 'tier', 'duration', 'result'])
+    for test_case in GlobalVariables.EXECUTED_TEST_CASES:
+        result = 'PASS' if(test_case.is_successful(
+                ) == test_case.EX_OK) else 'FAIL'
+        msg.add_row([test_case.case_name, test_case.project_name,
+                     _tiers.get_tier_name(test_case.case_name),
+                     test_case.get_duration(), result])
+    logger.info("FUNCTEST REPORT: \n\n%s\n", msg)
+
     logger.info("Execution exit value: %s" % GlobalVariables.OVERALL_RESULT)
     return GlobalVariables.OVERALL_RESULT
 
index 8563c92..140c9bb 100644 (file)
@@ -83,7 +83,6 @@ class Feature(base.TestCase):
             ft_utils.logger_test_results(
                 self.project_name, self.case_name,
                 self.result, self.details)
-            self.__logger.info("%s %s", self.project_name, self.result)
         except Exception:  # pylint: disable=broad-except
             self.__logger.exception("%s FAILED", self.project_name)
         self.__logger.info("Test result is stored in '%s'", self.result_file)
index 317c4f5..d8b63ef 100644 (file)
@@ -52,7 +52,7 @@ class TestCase(object):
             result = 'PASS' if(self.is_successful(
                 ) == TestCase.EX_OK) else 'FAIL'
             msg = prettytable.PrettyTable(
-                header_style='upper',
+                header_style='upper', padding_width=5,
                 field_names=['test case', 'project', 'duration',
                              'result'])
             msg.add_row([self.case_name, self.project_name,
diff --git a/functest/tests/unit/ci/test_generate_report.py b/functest/tests/unit/ci/test_generate_report.py
deleted file mode 100644 (file)
index 2c5ce2e..0000000
+++ /dev/null
@@ -1,129 +0,0 @@
-#!/usr/bin/env python
-
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-
-import logging
-import unittest
-import urllib2
-
-import mock
-
-from functest.ci import generate_report as gen_report
-from functest.tests.unit import test_utils
-from functest.utils import functest_utils as ft_utils
-from functest.utils.constants import CONST
-
-
-class GenerateReportTesting(unittest.TestCase):
-
-    logging.disable(logging.CRITICAL)
-
-    def test_init(self):
-        test_array = gen_report.init()
-        self.assertEqual(test_array, [])
-
-    @mock.patch('functest.ci.generate_report.urllib2.urlopen',
-                side_effect=urllib2.URLError('no host given'))
-    def test_get_results_from_db_fail(self, mock_method):
-        url = "%s?build_tag=%s" % (ft_utils.get_db_url(),
-                                   CONST.__getattribute__('BUILD_TAG'))
-        self.assertIsNone(gen_report.get_results_from_db())
-        mock_method.assert_called_once_with(url)
-
-    @mock.patch('functest.ci.generate_report.urllib2.urlopen',
-                return_value={'results': []})
-    def test_get_results_from_db_success(self, mock_method):
-        url = "%s?build_tag=%s" % (ft_utils.get_db_url(),
-                                   CONST.__getattribute__('BUILD_TAG'))
-        self.assertEqual(gen_report.get_results_from_db(), None)
-        mock_method.assert_called_once_with(url)
-
-    def test_get_data(self):
-        self.assertIsInstance(gen_report.get_data({'result': ''}, ''), dict)
-
-    def test_print_line_with_ci_run(self):
-        CONST.__setattr__('IS_CI_RUN', True)
-        w1 = 'test_print_line'
-        test_str = ("| %s| %s| %s| %s| %s|\n"
-                    % (w1.ljust(gen_report.COL_1_LEN - 1),
-                       ''.ljust(gen_report.COL_2_LEN - 1),
-                       ''.ljust(gen_report.COL_3_LEN - 1),
-                       ''.ljust(gen_report.COL_4_LEN - 1),
-                       ''.ljust(gen_report.COL_5_LEN - 1)))
-        self.assertEqual(gen_report.print_line(w1), test_str)
-
-    def test_print_line_without_ci_run(self):
-        CONST.__setattr__('IS_CI_RUN', False)
-        w1 = 'test_print_line'
-        test_str = ("| %s| %s| %s| %s|\n"
-                    % (w1.ljust(gen_report.COL_1_LEN - 1),
-                       ''.ljust(gen_report.COL_2_LEN - 1),
-                       ''.ljust(gen_report.COL_3_LEN - 1),
-                       ''.ljust(gen_report.COL_4_LEN - 1)))
-        self.assertEqual(gen_report.print_line(w1), test_str)
-
-    def test_print_line_no_column_with_ci_run(self):
-        CONST.__setattr__('IS_CI_RUN', True)
-        TOTAL_LEN = gen_report.COL_1_LEN + gen_report.COL_2_LEN
-        TOTAL_LEN += gen_report.COL_3_LEN + gen_report.COL_4_LEN + 2
-        TOTAL_LEN += gen_report.COL_5_LEN + 1
-        test_str = ("| %s|\n" % 'test'.ljust(TOTAL_LEN))
-        self.assertEqual(gen_report.print_line_no_columns('test'), test_str)
-
-    def test_print_line_no_column_without_ci_run(self):
-        CONST.__setattr__('IS_CI_RUN', False)
-        TOTAL_LEN = gen_report.COL_1_LEN + gen_report.COL_2_LEN
-        TOTAL_LEN += gen_report.COL_3_LEN + gen_report.COL_4_LEN + 2
-        test_str = ("| %s|\n" % 'test'.ljust(TOTAL_LEN))
-        self.assertEqual(gen_report.print_line_no_columns('test'), test_str)
-
-    def test_print_separator_with_ci_run(self):
-        CONST.__setattr__('IS_CI_RUN', True)
-        test_str = ("+" + "=" * gen_report.COL_1_LEN +
-                    "+" + "=" * gen_report.COL_2_LEN +
-                    "+" + "=" * gen_report.COL_3_LEN +
-                    "+" + "=" * gen_report.COL_4_LEN +
-                    "+" + "=" * gen_report.COL_5_LEN)
-        test_str += '+\n'
-        self.assertEqual(gen_report.print_separator(), test_str)
-
-    def test_print_separator_without_ci_run(self):
-        CONST.__setattr__('IS_CI_RUN', False)
-        test_str = ("+" + "=" * gen_report.COL_1_LEN +
-                    "+" + "=" * gen_report.COL_2_LEN +
-                    "+" + "=" * gen_report.COL_3_LEN +
-                    "+" + "=" * gen_report.COL_4_LEN)
-        test_str += "+\n"
-        self.assertEqual(gen_report.print_separator(), test_str)
-
-    @mock.patch('functest.ci.generate_report.logger.info')
-    def test_main_with_ci_run(self, mock_method):
-        CONST.__setattr__('IS_CI_RUN', True)
-        gen_report.main()
-        mock_method.assert_called_once_with(test_utils.SubstrMatch('URL'))
-
-    @mock.patch('functest.ci.generate_report.logger.info')
-    def test_main_with_ci_loop(self, mock_method):
-        CONST.__setattr__('CI_LOOP', 'daily')
-        gen_report.main()
-        mock_method.assert_called_once_with(test_utils.SubstrMatch('CI LOOP'))
-
-    @mock.patch('functest.ci.generate_report.logger.info')
-    def test_main_with_scenario(self, mock_method):
-        CONST.__setattr__('DEPLOY_SCENARIO', 'test_scenario')
-        gen_report.main()
-        mock_method.assert_called_once_with(test_utils.SubstrMatch('SCENARIO'))
-
-    @mock.patch('functest.ci.generate_report.logger.info')
-    def test_main_with_build_tag(self, mock_method):
-        CONST.__setattr__('BUILD_TAG', 'test_build_tag')
-        gen_report.main()
-        mock_method.assert_called_once_with(test_utils.
-                                            SubstrMatch('BUILD TAG'))
-
-
-if __name__ == "__main__":
-    unittest.main(verbosity=2)
index ef08282..d005239 100644 (file)
@@ -70,17 +70,6 @@ class RunTestsTesting(unittest.TestCase):
         run_tests.cleanup()
         self.assertTrue(mock_os_clean.called)
 
-    def test_update_test_info(self):
-        run_tests.GlobalVariables.EXECUTED_TEST_CASES = [self.test]
-        run_tests.update_test_info('test_name',
-                                   'test_result',
-                                   'test_duration')
-        exp = self.test
-        exp.update({"result": 'test_result',
-                    "duration": 'test_duration'})
-        self.assertEqual(run_tests.GlobalVariables.EXECUTED_TEST_CASES,
-                         [exp])
-
     def test_get_run_dict_if_defined_default(self):
         mock_obj = mock.Mock()
         with mock.patch('functest.ci.run_tests.'
@@ -148,10 +137,8 @@ class RunTestsTesting(unittest.TestCase):
             mock.patch('functest.ci.run_tests.source_rc_file'), \
             mock.patch('functest.ci.run_tests.generate_os_snapshot'), \
             mock.patch('functest.ci.run_tests.cleanup'), \
-            mock.patch('functest.ci.run_tests.update_test_info'), \
             mock.patch('functest.ci.run_tests.get_run_dict',
                        return_value=test_run_dict), \
-            mock.patch('functest.ci.run_tests.generate_report.main'), \
                 self.assertRaises(run_tests.BlockingTestFailed) as context:
             run_tests.GlobalVariables.CLEAN_FLAG = True
             run_tests.run_test(mock_test, 'tier_name')
@@ -176,21 +163,17 @@ class RunTestsTesting(unittest.TestCase):
 
     @mock.patch('functest.ci.run_tests.logger.info')
     def test_run_all_default(self, mock_logger_info):
-        with mock.patch('functest.ci.run_tests.run_tier') as mock_method, \
-            mock.patch('functest.ci.run_tests.generate_report.init'), \
-                mock.patch('functest.ci.run_tests.generate_report.main'):
+        with mock.patch('functest.ci.run_tests.run_tier') as mock_method:
             CONST.__setattr__('CI_LOOP', 'test_ci_loop')
             run_tests.run_all(self.tiers)
             mock_method.assert_any_call(self.tier)
             self.assertTrue(mock_logger_info.called)
 
     @mock.patch('functest.ci.run_tests.logger.info')
-    def test_run_all__missing_tier(self, mock_logger_info):
-        with mock.patch('functest.ci.run_tests.generate_report.init'), \
-                mock.patch('functest.ci.run_tests.generate_report.main'):
-            CONST.__setattr__('CI_LOOP', 'loop_re_not_available')
-            run_tests.run_all(self.tiers)
-            self.assertTrue(mock_logger_info.called)
+    def test_run_all_missing_tier(self, mock_logger_info):
+        CONST.__setattr__('CI_LOOP', 'loop_re_not_available')
+        run_tests.run_all(self.tiers)
+        self.assertTrue(mock_logger_info.called)
 
     def test_main_failed(self):
         kwargs = {'test': 'test_name', 'noclean': True, 'report': True}
@@ -221,7 +204,6 @@ class RunTestsTesting(unittest.TestCase):
         with mock.patch('functest.ci.run_tests.tb.TierBuilder',
                         return_value=mock_obj), \
             mock.patch('functest.ci.run_tests.source_rc_file'), \
-            mock.patch('functest.ci.run_tests.generate_report.init'), \
                 mock.patch('functest.ci.run_tests.run_tier') as m:
             self.assertEqual(run_tests.main(**kwargs),
                              run_tests.Result.EX_OK)
@@ -234,7 +216,6 @@ class RunTestsTesting(unittest.TestCase):
         with mock.patch('functest.ci.run_tests.tb.TierBuilder',
                         return_value=mock_obj), \
             mock.patch('functest.ci.run_tests.source_rc_file'), \
-            mock.patch('functest.ci.run_tests.generate_report.init'), \
                 mock.patch('functest.ci.run_tests.run_test') as m:
             self.assertEqual(run_tests.main(**kwargs),
                              run_tests.Result.EX_OK)
@@ -248,7 +229,6 @@ class RunTestsTesting(unittest.TestCase):
         with mock.patch('functest.ci.run_tests.tb.TierBuilder',
                         return_value=mock_obj), \
             mock.patch('functest.ci.run_tests.source_rc_file'), \
-            mock.patch('functest.ci.run_tests.generate_report.init'), \
                 mock.patch('functest.ci.run_tests.run_all') as m:
             self.assertEqual(run_tests.main(**kwargs),
                              run_tests.Result.EX_OK)
@@ -262,7 +242,6 @@ class RunTestsTesting(unittest.TestCase):
         with mock.patch('functest.ci.run_tests.tb.TierBuilder',
                         return_value=mock_obj), \
             mock.patch('functest.ci.run_tests.source_rc_file'), \
-            mock.patch('functest.ci.run_tests.generate_report.init'), \
                 mock.patch('functest.ci.run_tests.logger.debug') as m:
             self.assertEqual(run_tests.main(**kwargs),
                              run_tests.Result.EX_ERROR)
index 08a717a..7222967 100644 (file)
@@ -20,7 +20,6 @@ __author__ = "Cedric Ollivier <cedric.ollivier@orange.com>"
 
 
 class TestCaseTesting(unittest.TestCase):
-
     """The class testing TestCase."""
     # pylint: disable=missing-docstring,too-many-public-methods