Merge "Additional Unit Tests for core modules"
authorJose Lausuch <jose.lausuch@ericsson.com>
Tue, 16 May 2017 15:23:41 +0000 (15:23 +0000)
committerGerrit Code Review <gerrit@opnfv.org>
Tue, 16 May 2017 15:23:41 +0000 (15:23 +0000)
15 files changed:
docs/com/pres/framework/framework.md
functest/ci/generate_report.py [deleted file]
functest/ci/run_tests.py
functest/ci/testcases.yaml
functest/ci/tier_builder.py
functest/ci/tier_handler.py
functest/core/feature.py
functest/core/testcase.py
functest/tests/unit/ci/test_generate_report.py [deleted file]
functest/tests/unit/ci/test_run_tests.py
functest/tests/unit/ci/test_tier_builder.py
functest/tests/unit/ci/test_tier_handler.py
functest/tests/unit/core/test_testcase.py
requirements.txt
test-requirements.txt

index b80ad3d..3c1aae1 100644 (file)
@@ -2,11 +2,11 @@
 
 created by [Cédric Ollivier](mailto:cedric.ollivier@orange.com)
 
-2017/04/24
+2017/05/06
 
 Note:
 
-- Functest integrates lots of heteregeounous testcases:
+- Functest integrates lots of heterogeneous testcases:
     - python vs bash
     - internal vs external
 - it aims to benefit from object programming
@@ -33,11 +33,11 @@ Note:
 ### Our target
 
 - limit run_tests.py instructions by defining:
-    - the basic testcase attritutes
+    - the basic testcase attributes
     - all common operations
     - the status codes expected
 - avoid duplicating codes between testcases
-- ease the developpement of third-party testcases (aka features)
+- ease the development of third-party testcases (aka features)
 
 
 
@@ -51,6 +51,7 @@ base model for single test case
 - project_name (default: 'functest')
 - case_name
 - criteria
+- result
 - start_time
 - stop_time
 - details
@@ -61,7 +62,8 @@ base model for single test case
 | Method            | Purpose                                    |
 |-------------------|--------------------------------------------|
 | run(**kwargs)     | run the test case                          |
-| check_criteria()  | interpret the results of the test case     |
+| is_successful()   | interpret the results of the test case     |
+| get_duration()    | return the duration of the test case       |
 | push_to_db()      | push the results of the test case to the DB|
 
 
@@ -69,7 +71,7 @@ base model for single test case
 
 - the subclasses must override the default implementation which is false on purpose
 - the new implementation must set the following attributes to push the results to DB:
-    - criteria
+    - result
     - start_time
     - stop_time
 
@@ -99,7 +101,7 @@ except KeyError:
 if result == testcase.TestCase.EX_OK:
     if GlobalVariables.REPORT_FLAG:
         test_case.push_to_db()
-    result = test_case.check_criteria()
+    result = test_case.is_successful()
 ```
 
 
@@ -121,7 +123,7 @@ class Test(testcase.TestCase):
     def run(self, **kwargs):
         self.start_time = time.time()
         print "Hello World"
-        self.criteria = 'PASS'
+        self.result = 100
         self.stop_time = time.time()
         return testcase.TestCase.EX_OK
 ```
@@ -132,7 +134,7 @@ class Test(testcase.TestCase):
 ```yaml
 case_name: first
 project_name: functest
-criteria: 'status == "PASS"'
+criteria: 100
 blocking: true
 clean_flag: false
 description: ''
@@ -164,7 +166,7 @@ base model for single feature
 
 - allows executing any Python method by calling execute()
 - sets the following attributes required to push the results to DB:
-    - criteria
+    - result
     - start_time
     - stop_time
 - doesn't fulfill details when pushing the results to the DB.
@@ -200,7 +202,7 @@ class Test(feature.Feature):
 ```yaml
 case_name: second
 project_name: functest
-criteria: 'status == "PASS"'
+criteria: 100
 blocking: true
 clean_flag: false
 description: ''
@@ -234,7 +236,7 @@ execute the cmd passed as arg.
 ```
 case_name: third
 project_name: functest
-criteria: 'status == "PASS"'
+criteria: 100
 blocking: true
 clean_flag: false
 description: ''
diff --git a/functest/ci/generate_report.py b/functest/ci/generate_report.py
deleted file mode 100644 (file)
index e400b1b..0000000
+++ /dev/null
@@ -1,149 +0,0 @@
-#!/usr/bin/env python
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-import json
-import logging
-import re
-import urllib2
-
-import functest.utils.functest_utils as ft_utils
-from functest.utils.constants import CONST
-
-COL_1_LEN = 25
-COL_2_LEN = 15
-COL_3_LEN = 12
-COL_4_LEN = 15
-COL_5_LEN = 75
-
-# If we run from CI (Jenkins) we will push the results to the DB
-# and then we can print the url to the specific test result
-
-
-logger = logging.getLogger(__name__)
-
-
-def init(tiers_to_run=[]):
-    test_cases_arr = []
-    for tier in tiers_to_run:
-        for test in tier.get_tests():
-            test_cases_arr.append({'test_name': test.get_name(),
-                                   'tier_name': tier.get_name(),
-                                   'result': 'Not executed',
-                                   'duration': '0',
-                                   'url': ''})
-    return test_cases_arr
-
-
-def get_results_from_db():
-    url = "%s?build_tag=%s" % (ft_utils.get_db_url(),
-                               CONST.BUILD_TAG)
-    logger.debug("Query to rest api: %s" % url)
-    try:
-        data = json.load(urllib2.urlopen(url))
-        return data['results']
-    except:
-        logger.error("Cannot read content from the url: %s" % url)
-        return None
-
-
-def get_data(test, results):
-    test_result = test['result']
-    url = ''
-    for test_db in results:
-        if test['test_name'] in test_db['case_name']:
-            id = test_db['_id']
-            url = ft_utils.get_db_url() + '/' + id
-            test_result = test_db['criteria']
-
-    return {"url": url, "result": test_result}
-
-
-def print_line(w1, w2='', w3='', w4='', w5=''):
-    str = ('| ' + w1.ljust(COL_1_LEN - 1) +
-           '| ' + w2.ljust(COL_2_LEN - 1) +
-           '| ' + w3.ljust(COL_3_LEN - 1) +
-           '| ' + w4.ljust(COL_4_LEN - 1))
-    if CONST.__getattribute__('IS_CI_RUN'):
-        str += ('| ' + w5.ljust(COL_5_LEN - 1))
-    str += '|\n'
-    return str
-
-
-def print_line_no_columns(str):
-    TOTAL_LEN = COL_1_LEN + COL_2_LEN + COL_3_LEN + COL_4_LEN + 2
-    if CONST.__getattribute__('IS_CI_RUN'):
-        TOTAL_LEN += COL_5_LEN + 1
-    return ('| ' + str.ljust(TOTAL_LEN) + "|\n")
-
-
-def print_separator(char="=", delimiter="+"):
-    str = ("+" + char * COL_1_LEN +
-           delimiter + char * COL_2_LEN +
-           delimiter + char * COL_3_LEN +
-           delimiter + char * COL_4_LEN)
-    if CONST.__getattribute__('IS_CI_RUN'):
-        str += (delimiter + char * COL_5_LEN)
-    str += '+\n'
-    return str
-
-
-def main(args=[]):
-    executed_test_cases = args
-
-    if CONST.__getattribute__('IS_CI_RUN'):
-        results = get_results_from_db()
-        if results is not None:
-            for test in executed_test_cases:
-                data = get_data(test, results)
-                test.update({"url": data['url'],
-                             "result": data['result']})
-
-    TOTAL_LEN = COL_1_LEN + COL_2_LEN + COL_3_LEN + COL_4_LEN
-    if CONST.__getattribute__('IS_CI_RUN'):
-        TOTAL_LEN += COL_5_LEN
-    MID = TOTAL_LEN / 2
-
-    if CONST.__getattribute__('BUILD_TAG') is not None:
-        if re.search("daily", CONST.__getattribute__('BUILD_TAG')) is not None:
-            CONST.__setattr__('CI_LOOP', 'daily')
-        else:
-            CONST.__setattr__('CI_LOOP', 'weekly')
-
-    str = ''
-    str += print_separator('=', delimiter="=")
-    str += print_line_no_columns(' ' * (MID - 8) + 'FUNCTEST REPORT')
-    str += print_separator('=', delimiter="=")
-    str += print_line_no_columns(' ')
-    str += print_line_no_columns(" Deployment description:")
-    str += print_line_no_columns("   INSTALLER: %s"
-                                 % CONST.__getattribute__('INSTALLER_TYPE'))
-    if CONST.__getattribute__('DEPLOY_SCENARIO') is not None:
-        str += print_line_no_columns("   SCENARIO:  %s"
-                                     % CONST.__getattribute__(
-                                         'DEPLOY_SCENARIO'))
-    if CONST.__getattribute__('BUILD_TAG') is not None:
-        str += print_line_no_columns("   BUILD TAG: %s"
-                                     % CONST.__getattribute__('BUILD_TAG'))
-    if CONST.__getattribute__('CI_LOOP') is not None:
-        str += print_line_no_columns("   CI LOOP:   %s"
-                                     % CONST.__getattribute__('CI_LOOP'))
-    str += print_line_no_columns(' ')
-    str += print_separator('=')
-    if CONST.__getattribute__('IS_CI_RUN'):
-        str += print_line('TEST CASE', 'TIER', 'DURATION', 'RESULT', 'URL')
-    else:
-        str += print_line('TEST CASE', 'TIER', 'DURATION', 'RESULT')
-    str += print_separator('=')
-    for test in executed_test_cases:
-        str += print_line(test['test_name'],
-                          test['tier_name'],
-                          test['duration'],
-                          test['result'],
-                          test['url'])
-        str += print_separator('-')
-
-    logger.info("\n\n\n%s" % str)
index 1396644..7676009 100755 (executable)
@@ -17,7 +17,8 @@ import os
 import re
 import sys
 
-import functest.ci.generate_report as generate_report
+import prettytable
+
 import functest.ci.tier_builder as tb
 import functest.core.testcase as testcase
 import functest.utils.functest_utils as ft_utils
@@ -39,6 +40,10 @@ class BlockingTestFailed(Exception):
     pass
 
 
+class TestNotEnabled(Exception):
+    pass
+
+
 class RunTestsParser(object):
 
     def __init__(self):
@@ -99,13 +104,6 @@ def cleanup():
     os_clean.main()
 
 
-def update_test_info(test_name, result, duration):
-    for test in GlobalVariables.EXECUTED_TEST_CASES:
-        if test['test_name'] == test_name:
-            test.update({"result": result,
-                         "duration": duration})
-
-
 def get_run_dict(testname):
     try:
         dict = ft_utils.get_dict_by_test(testname)
@@ -120,8 +118,9 @@ def get_run_dict(testname):
 
 
 def run_test(test, tier_name, testcases=None):
-    duration = "XX:XX"
-    result_str = "PASS"
+    if not test.is_enabled():
+        raise TestNotEnabled("The test case {} is not enabled"
+                             .format(test.get_name()))
     test_name = test.get_name()
     logger.info("\n")  # blank line
     print_separator("=")
@@ -145,6 +144,7 @@ def run_test(test, tier_name, testcases=None):
             cls = getattr(module, run_dict['class'])
             test_dict = ft_utils.get_dict_by_test(test_name)
             test_case = cls(**test_dict)
+            GlobalVariables.EXECUTED_TEST_CASES.append(test_case)
             try:
                 kwargs = run_dict['args']
                 result = test_case.run(**kwargs)
@@ -154,8 +154,7 @@ def run_test(test, tier_name, testcases=None):
                 if GlobalVariables.REPORT_FLAG:
                     test_case.push_to_db()
                 result = test_case.is_successful()
-            duration = test_case.get_duration()
-            logger.info("\n%s\n", test_case)
+            logger.info("Test result:\n\n%s\n", test_case)
         except ImportError:
             logger.exception("Cannot import module {}".format(
                 run_dict['module']))
@@ -167,22 +166,13 @@ def run_test(test, tier_name, testcases=None):
 
     if test.needs_clean() and GlobalVariables.CLEAN_FLAG:
         cleanup()
-
     if result != testcase.TestCase.EX_OK:
         logger.error("The test case '%s' failed. " % test_name)
         GlobalVariables.OVERALL_RESULT = Result.EX_ERROR
-        result_str = "FAIL"
-
         if test.is_blocking():
-            if not testcases or testcases == "all":
-                # if it is a single test we don't print the whole results table
-                update_test_info(test_name, result_str, duration)
-                generate_report.main(GlobalVariables.EXECUTED_TEST_CASES)
             raise BlockingTestFailed("The test case {} failed and is blocking"
                                      .format(test.get_name()))
 
-    update_test_info(test_name, result_str, duration)
-
 
 def run_tier(tier):
     tier_name = tier.get_name()
@@ -214,12 +204,9 @@ def run_all(tiers):
                            tier.get_test_names()))
 
     logger.info("Tests to be executed:%s" % summary)
-    GlobalVariables.EXECUTED_TEST_CASES = generate_report.init(tiers_to_run)
     for tier in tiers_to_run:
         run_tier(tier)
 
-    generate_report.main(GlobalVariables.EXECUTED_TEST_CASES)
-
 
 def main(**kwargs):
 
@@ -238,12 +225,10 @@ def main(**kwargs):
         if kwargs['test']:
             source_rc_file()
             if _tiers.get_tier(kwargs['test']):
-                GlobalVariables.EXECUTED_TEST_CASES = generate_report.init(
-                    [_tiers.get_tier(kwargs['test'])])
                 run_tier(_tiers.get_tier(kwargs['test']))
             elif _tiers.get_test(kwargs['test']):
                 run_test(_tiers.get_test(kwargs['test']),
-                         _tiers.get_tier(kwargs['test']),
+                         _tiers.get_tier_name(kwargs['test']),
                          kwargs['test'])
             elif kwargs['test'] == "all":
                 run_all(_tiers)
@@ -261,6 +246,26 @@ def main(**kwargs):
     except Exception as e:
         logger.error(e)
         GlobalVariables.OVERALL_RESULT = Result.EX_ERROR
+
+    msg = prettytable.PrettyTable(
+        header_style='upper', padding_width=5,
+        field_names=['env var', 'value'])
+    for env_var in ['INSTALLER_TYPE', 'DEPLOY_SCENARIO', 'BUILD_TAG',
+                    'CI_LOOP']:
+        msg.add_row([env_var, CONST.__getattribute__(env_var)])
+    logger.info("Deployment description: \n\n%s\n", msg)
+
+    msg = prettytable.PrettyTable(
+        header_style='upper', padding_width=5,
+        field_names=['test case', 'project', 'tier', 'duration', 'result'])
+    for test_case in GlobalVariables.EXECUTED_TEST_CASES:
+        result = 'PASS' if(test_case.is_successful(
+                ) == test_case.EX_OK) else 'FAIL'
+        msg.add_row([test_case.case_name, test_case.project_name,
+                     _tiers.get_tier_name(test_case.case_name),
+                     test_case.get_duration(), result])
+    logger.info("FUNCTEST REPORT: \n\n%s\n", msg)
+
     logger.info("Execution exit value: %s" % GlobalVariables.OVERALL_RESULT)
     return GlobalVariables.OVERALL_RESULT
 
index 7009e91..d98a2de 100644 (file)
@@ -19,7 +19,6 @@ tiers:
                     simple queries. When the config value of
                     snaps.use_keystone is True, functest must have access to
                     the cloud's private network.
-
                 dependencies:
                     installer: '^((?!netvirt).)*$'
                     scenario: ''
@@ -39,13 +38,13 @@ tiers:
                     simple queries. When the config value of
                     snaps.use_keystone is True, functest must have access to
                     the cloud's private network.
-
                 dependencies:
                     installer: '^((?!netvirt).)*$'
                     scenario: ''
                 run:
                     module: 'functest.opnfv_tests.openstack.snaps.api_check'
                     class: 'ApiCheck'
+
             -
                 case_name: snaps_health_check
                 project_name: functest
@@ -63,6 +62,7 @@ tiers:
                 run:
                     module: 'functest.opnfv_tests.openstack.snaps.health_check'
                     class: 'HealthCheck'
+
     -
         name: smoke
         order: 1
@@ -266,6 +266,7 @@ tiers:
         testcases:
             -
                 case_name: promise
+                enabled: false
                 project_name: promise
                 criteria: 100
                 blocking: false
@@ -283,6 +284,7 @@ tiers:
 
             -
                 case_name: doctor-notification
+                enabled: false
                 project_name: doctor
                 criteria: 100
                 blocking: false
@@ -300,6 +302,7 @@ tiers:
 
             -
                 case_name: bgpvpn
+                enabled: false
                 project_name: sdnvpn
                 criteria: 100
                 blocking: false
@@ -317,6 +320,7 @@ tiers:
 
             -
                 case_name: security_scan
+                enabled: false
                 project_name: securityscanning
                 criteria: 100
                 blocking: false
@@ -334,6 +338,7 @@ tiers:
 
             -
                 case_name: copper
+                enabled: false
                 project_name: copper
                 criteria: 100
                 blocking: false
@@ -351,6 +356,7 @@ tiers:
 
             -
                 case_name: multisite
+                enabled: false
                 project_name: multisite
                 criteria: 100
                 blocking: false
@@ -363,8 +369,10 @@ tiers:
                 run:
                     module: 'functest.opnfv_tests.openstack.tempest.tempest'
                     class: 'TempestMultisite'
+
             -
                 case_name: functest-odl-sfc
+                enabled: false
                 project_name: sfc
                 criteria: 100
                 blocking: false
@@ -379,8 +387,10 @@ tiers:
                     class: 'BashFeature'
                     args:
                         cmd: 'cd /home/opnfv/repos/sfc/sfc/tests/functest && python ./run_tests.py'
+
             -
                 case_name: onos_sfc
+                enabled: false
                 project_name: functest
                 criteria: 100
                 blocking: true
@@ -393,8 +403,10 @@ tiers:
                 run:
                     module: 'functest.opnfv_tests.sdn.onos.onos'
                     class: 'OnosSfc'
+
             -
                 case_name: parser-basics
+                enabled: false
                 project_name: parser
                 criteria: 100
                 blocking: false
@@ -409,8 +421,10 @@ tiers:
                     class: 'BashFeature'
                     args:
                         cmd: 'cd /home/opnfv/repos/parser/tests && ./functest_run.sh'
+
             -
                 case_name: domino-multinode
+                enabled: false
                 project_name: domino
                 criteria: 100
                 blocking: false
@@ -425,8 +439,10 @@ tiers:
                     class: 'BashFeature'
                     args:
                         cmd: 'cd /home/opnfv/repos/domino && ./tests/run_multinode.sh'
+
             -
                 case_name: gluon_vping
+                enabled: false
                 project_name: netready
                 criteria: 100
                 blocking: false
@@ -441,8 +457,10 @@ tiers:
                     class: 'BashFeature'
                     args:
                         cmd: 'cd /home/opnfv/repos/netready/test/functest && python ./gluon-test-suite.py'
+
             -
                 case_name: barometercollectd
+                enabled: false
                 project_name: barometer
                 criteria: 100
                 blocking: false
@@ -458,6 +476,7 @@ tiers:
                 run:
                      module: 'functest.opnfv_tests.features.barometer'
                      class: 'BarometerCollectd'
+
     -
         name: components
         order: 3
@@ -481,6 +500,7 @@ tiers:
                 run:
                     module: 'functest.opnfv_tests.openstack.tempest.tempest'
                     class: 'TempestFullParallel'
+
             -
                 case_name: tempest_custom
                 project_name: functest
@@ -499,6 +519,7 @@ tiers:
                 run:
                     module: 'functest.opnfv_tests.openstack.tempest.tempest'
                     class: 'TempestCustom'
+
             -
                 case_name: rally_full
                 project_name: functest
@@ -537,22 +558,26 @@ tiers:
                 run:
                     module: 'functest.opnfv_tests.vnf.ims.cloudify_ims'
                     class: 'CloudifyIms'
-#          -
-#               case_name: aaa
-#               project_name: functest
-#               criteria: 100
-#               blocking: false
-#               clean_flag: true
-#               description: >-
-#                   Test suite from Parser project.
-#               dependencies:
-#                   installer: ''
-#                   scenario: ''
-#               run:
-#                   module: 'functest.opnfv_tests.vnf.aaa.aaa'
-#                   class: 'AaaVnf'
+
+            -
+                case_name: aaa
+                enabled: false
+                project_name: functest
+                criteria: 100
+                blocking: false
+                clean_flag: true
+                description: >-
+                   Test suite from Parser project.
+                dependencies:
+                    installer: ''
+                    scenario: ''
+                run:
+                    module: 'functest.opnfv_tests.vnf.aaa.aaa'
+                    class: 'AaaVnf'
+
             -
                 case_name: orchestra_ims
+                enabled: false
                 project_name: functest
                 criteria: 100
                 blocking: false
@@ -568,6 +593,7 @@ tiers:
 
             -
                 case_name: opera_vims
+                enabled: false
                 project_name: opera
                 criteria: 100
                 blocking: false
@@ -583,6 +609,7 @@ tiers:
 
             -
                 case_name: vyos_vrouter
+                enabled: false
                 project_name: functest
                 criteria: 100
                 blocking: false
index 44b2725..12562f0 100644 (file)
@@ -47,12 +47,15 @@ class TierBuilder(object):
                 dep = th.Dependency(installer, scenario)
 
                 testcase = th.TestCase(name=dic_testcase['case_name'],
+                                       enabled=dic_testcase.get(
+                                           'enabled', True),
                                        dependency=dep,
                                        criteria=dic_testcase['criteria'],
                                        blocking=dic_testcase['blocking'],
                                        clean_flag=dic_testcase['clean_flag'],
                                        description=dic_testcase['description'])
-                if testcase.is_compatible(self.ci_installer, self.ci_scenario):
+                if (testcase.is_compatible(self.ci_installer, self.ci_scenario)
+                        and testcase.is_enabled()):
                     tier.add_test(testcase)
 
             self.tier_objects.append(tier)
@@ -72,6 +75,12 @@ class TierBuilder(object):
                 return self.tier_objects[i]
         return None
 
+    def get_tier_name(self, test_name):
+        for i in range(0, len(self.tier_objects)):
+            if self.tier_objects[i].is_test(test_name):
+                return self.tier_objects[i].name
+        return None
+
     def get_test(self, test_name):
         for i in range(0, len(self.tier_objects)):
             if self.tier_objects[i].is_test(test_name):
index fe7372a..36ce245 100644 (file)
@@ -105,12 +105,14 @@ class Tier(object):
 class TestCase(object):
 
     def __init__(self, name,
+                 enabled,
                  dependency,
                  criteria,
                  blocking,
                  clean_flag,
                  description=""):
         self.name = name
+        self.enabled = enabled
         self.dependency = dependency
         self.criteria = criteria
         self.blocking = blocking
@@ -138,6 +140,9 @@ class TestCase(object):
     def get_name(self):
         return self.name
 
+    def is_enabled(self):
+        return self.enabled
+
     def get_criteria(self):
         return self.criteria
 
index 8563c92..140c9bb 100644 (file)
@@ -83,7 +83,6 @@ class Feature(base.TestCase):
             ft_utils.logger_test_results(
                 self.project_name, self.case_name,
                 self.result, self.details)
-            self.__logger.info("%s %s", self.project_name, self.result)
         except Exception:  # pylint: disable=broad-except
             self.__logger.exception("%s FAILED", self.project_name)
         self.__logger.info("Test result is stored in '%s'", self.result_file)
index 49fae60..d8b63ef 100644 (file)
@@ -12,6 +12,8 @@
 import logging
 import os
 
+import prettytable
+
 import functest.utils.functest_utils as ft_utils
 
 __author__ = "Cedric Ollivier <cedric.ollivier@orange.com>"
@@ -49,14 +51,16 @@ class TestCase(object):
             assert self.case_name
             result = 'PASS' if(self.is_successful(
                 ) == TestCase.EX_OK) else 'FAIL'
-            return ('| {0:<23} | {1:<13} | {2:<10} | {3:<13} |'
-                    '\n{4:-<26}{4:-<16}{4:-<13}{4:-<16}{4}'.format(
-                        self.case_name, self.project_name,
-                        self.get_duration(), result, '+'))
+            msg = prettytable.PrettyTable(
+                header_style='upper', padding_width=5,
+                field_names=['test case', 'project', 'duration',
+                             'result'])
+            msg.add_row([self.case_name, self.project_name,
+                         self.get_duration(), result])
+            return msg.get_string()
         except AssertionError:
             self.__logger.error("We cannot print invalid objects")
-            return '| {0:^68} |\n{1:-<26}{1:-<16}{1:-<13}{1:-<16}{1}'.format(
-                'INVALID OBJECT', '+')
+            return super(TestCase, self).__str__()
 
     def get_duration(self):
         """Return the duration of the test case.
diff --git a/functest/tests/unit/ci/test_generate_report.py b/functest/tests/unit/ci/test_generate_report.py
deleted file mode 100644 (file)
index 2c5ce2e..0000000
+++ /dev/null
@@ -1,129 +0,0 @@
-#!/usr/bin/env python
-
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-
-import logging
-import unittest
-import urllib2
-
-import mock
-
-from functest.ci import generate_report as gen_report
-from functest.tests.unit import test_utils
-from functest.utils import functest_utils as ft_utils
-from functest.utils.constants import CONST
-
-
-class GenerateReportTesting(unittest.TestCase):
-
-    logging.disable(logging.CRITICAL)
-
-    def test_init(self):
-        test_array = gen_report.init()
-        self.assertEqual(test_array, [])
-
-    @mock.patch('functest.ci.generate_report.urllib2.urlopen',
-                side_effect=urllib2.URLError('no host given'))
-    def test_get_results_from_db_fail(self, mock_method):
-        url = "%s?build_tag=%s" % (ft_utils.get_db_url(),
-                                   CONST.__getattribute__('BUILD_TAG'))
-        self.assertIsNone(gen_report.get_results_from_db())
-        mock_method.assert_called_once_with(url)
-
-    @mock.patch('functest.ci.generate_report.urllib2.urlopen',
-                return_value={'results': []})
-    def test_get_results_from_db_success(self, mock_method):
-        url = "%s?build_tag=%s" % (ft_utils.get_db_url(),
-                                   CONST.__getattribute__('BUILD_TAG'))
-        self.assertEqual(gen_report.get_results_from_db(), None)
-        mock_method.assert_called_once_with(url)
-
-    def test_get_data(self):
-        self.assertIsInstance(gen_report.get_data({'result': ''}, ''), dict)
-
-    def test_print_line_with_ci_run(self):
-        CONST.__setattr__('IS_CI_RUN', True)
-        w1 = 'test_print_line'
-        test_str = ("| %s| %s| %s| %s| %s|\n"
-                    % (w1.ljust(gen_report.COL_1_LEN - 1),
-                       ''.ljust(gen_report.COL_2_LEN - 1),
-                       ''.ljust(gen_report.COL_3_LEN - 1),
-                       ''.ljust(gen_report.COL_4_LEN - 1),
-                       ''.ljust(gen_report.COL_5_LEN - 1)))
-        self.assertEqual(gen_report.print_line(w1), test_str)
-
-    def test_print_line_without_ci_run(self):
-        CONST.__setattr__('IS_CI_RUN', False)
-        w1 = 'test_print_line'
-        test_str = ("| %s| %s| %s| %s|\n"
-                    % (w1.ljust(gen_report.COL_1_LEN - 1),
-                       ''.ljust(gen_report.COL_2_LEN - 1),
-                       ''.ljust(gen_report.COL_3_LEN - 1),
-                       ''.ljust(gen_report.COL_4_LEN - 1)))
-        self.assertEqual(gen_report.print_line(w1), test_str)
-
-    def test_print_line_no_column_with_ci_run(self):
-        CONST.__setattr__('IS_CI_RUN', True)
-        TOTAL_LEN = gen_report.COL_1_LEN + gen_report.COL_2_LEN
-        TOTAL_LEN += gen_report.COL_3_LEN + gen_report.COL_4_LEN + 2
-        TOTAL_LEN += gen_report.COL_5_LEN + 1
-        test_str = ("| %s|\n" % 'test'.ljust(TOTAL_LEN))
-        self.assertEqual(gen_report.print_line_no_columns('test'), test_str)
-
-    def test_print_line_no_column_without_ci_run(self):
-        CONST.__setattr__('IS_CI_RUN', False)
-        TOTAL_LEN = gen_report.COL_1_LEN + gen_report.COL_2_LEN
-        TOTAL_LEN += gen_report.COL_3_LEN + gen_report.COL_4_LEN + 2
-        test_str = ("| %s|\n" % 'test'.ljust(TOTAL_LEN))
-        self.assertEqual(gen_report.print_line_no_columns('test'), test_str)
-
-    def test_print_separator_with_ci_run(self):
-        CONST.__setattr__('IS_CI_RUN', True)
-        test_str = ("+" + "=" * gen_report.COL_1_LEN +
-                    "+" + "=" * gen_report.COL_2_LEN +
-                    "+" + "=" * gen_report.COL_3_LEN +
-                    "+" + "=" * gen_report.COL_4_LEN +
-                    "+" + "=" * gen_report.COL_5_LEN)
-        test_str += '+\n'
-        self.assertEqual(gen_report.print_separator(), test_str)
-
-    def test_print_separator_without_ci_run(self):
-        CONST.__setattr__('IS_CI_RUN', False)
-        test_str = ("+" + "=" * gen_report.COL_1_LEN +
-                    "+" + "=" * gen_report.COL_2_LEN +
-                    "+" + "=" * gen_report.COL_3_LEN +
-                    "+" + "=" * gen_report.COL_4_LEN)
-        test_str += "+\n"
-        self.assertEqual(gen_report.print_separator(), test_str)
-
-    @mock.patch('functest.ci.generate_report.logger.info')
-    def test_main_with_ci_run(self, mock_method):
-        CONST.__setattr__('IS_CI_RUN', True)
-        gen_report.main()
-        mock_method.assert_called_once_with(test_utils.SubstrMatch('URL'))
-
-    @mock.patch('functest.ci.generate_report.logger.info')
-    def test_main_with_ci_loop(self, mock_method):
-        CONST.__setattr__('CI_LOOP', 'daily')
-        gen_report.main()
-        mock_method.assert_called_once_with(test_utils.SubstrMatch('CI LOOP'))
-
-    @mock.patch('functest.ci.generate_report.logger.info')
-    def test_main_with_scenario(self, mock_method):
-        CONST.__setattr__('DEPLOY_SCENARIO', 'test_scenario')
-        gen_report.main()
-        mock_method.assert_called_once_with(test_utils.SubstrMatch('SCENARIO'))
-
-    @mock.patch('functest.ci.generate_report.logger.info')
-    def test_main_with_build_tag(self, mock_method):
-        CONST.__setattr__('BUILD_TAG', 'test_build_tag')
-        gen_report.main()
-        mock_method.assert_called_once_with(test_utils.
-                                            SubstrMatch('BUILD TAG'))
-
-
-if __name__ == "__main__":
-    unittest.main(verbosity=2)
index ef08282..d005239 100644 (file)
@@ -70,17 +70,6 @@ class RunTestsTesting(unittest.TestCase):
         run_tests.cleanup()
         self.assertTrue(mock_os_clean.called)
 
-    def test_update_test_info(self):
-        run_tests.GlobalVariables.EXECUTED_TEST_CASES = [self.test]
-        run_tests.update_test_info('test_name',
-                                   'test_result',
-                                   'test_duration')
-        exp = self.test
-        exp.update({"result": 'test_result',
-                    "duration": 'test_duration'})
-        self.assertEqual(run_tests.GlobalVariables.EXECUTED_TEST_CASES,
-                         [exp])
-
     def test_get_run_dict_if_defined_default(self):
         mock_obj = mock.Mock()
         with mock.patch('functest.ci.run_tests.'
@@ -148,10 +137,8 @@ class RunTestsTesting(unittest.TestCase):
             mock.patch('functest.ci.run_tests.source_rc_file'), \
             mock.patch('functest.ci.run_tests.generate_os_snapshot'), \
             mock.patch('functest.ci.run_tests.cleanup'), \
-            mock.patch('functest.ci.run_tests.update_test_info'), \
             mock.patch('functest.ci.run_tests.get_run_dict',
                        return_value=test_run_dict), \
-            mock.patch('functest.ci.run_tests.generate_report.main'), \
                 self.assertRaises(run_tests.BlockingTestFailed) as context:
             run_tests.GlobalVariables.CLEAN_FLAG = True
             run_tests.run_test(mock_test, 'tier_name')
@@ -176,21 +163,17 @@ class RunTestsTesting(unittest.TestCase):
 
     @mock.patch('functest.ci.run_tests.logger.info')
     def test_run_all_default(self, mock_logger_info):
-        with mock.patch('functest.ci.run_tests.run_tier') as mock_method, \
-            mock.patch('functest.ci.run_tests.generate_report.init'), \
-                mock.patch('functest.ci.run_tests.generate_report.main'):
+        with mock.patch('functest.ci.run_tests.run_tier') as mock_method:
             CONST.__setattr__('CI_LOOP', 'test_ci_loop')
             run_tests.run_all(self.tiers)
             mock_method.assert_any_call(self.tier)
             self.assertTrue(mock_logger_info.called)
 
     @mock.patch('functest.ci.run_tests.logger.info')
-    def test_run_all__missing_tier(self, mock_logger_info):
-        with mock.patch('functest.ci.run_tests.generate_report.init'), \
-                mock.patch('functest.ci.run_tests.generate_report.main'):
-            CONST.__setattr__('CI_LOOP', 'loop_re_not_available')
-            run_tests.run_all(self.tiers)
-            self.assertTrue(mock_logger_info.called)
+    def test_run_all_missing_tier(self, mock_logger_info):
+        CONST.__setattr__('CI_LOOP', 'loop_re_not_available')
+        run_tests.run_all(self.tiers)
+        self.assertTrue(mock_logger_info.called)
 
     def test_main_failed(self):
         kwargs = {'test': 'test_name', 'noclean': True, 'report': True}
@@ -221,7 +204,6 @@ class RunTestsTesting(unittest.TestCase):
         with mock.patch('functest.ci.run_tests.tb.TierBuilder',
                         return_value=mock_obj), \
             mock.patch('functest.ci.run_tests.source_rc_file'), \
-            mock.patch('functest.ci.run_tests.generate_report.init'), \
                 mock.patch('functest.ci.run_tests.run_tier') as m:
             self.assertEqual(run_tests.main(**kwargs),
                              run_tests.Result.EX_OK)
@@ -234,7 +216,6 @@ class RunTestsTesting(unittest.TestCase):
         with mock.patch('functest.ci.run_tests.tb.TierBuilder',
                         return_value=mock_obj), \
             mock.patch('functest.ci.run_tests.source_rc_file'), \
-            mock.patch('functest.ci.run_tests.generate_report.init'), \
                 mock.patch('functest.ci.run_tests.run_test') as m:
             self.assertEqual(run_tests.main(**kwargs),
                              run_tests.Result.EX_OK)
@@ -248,7 +229,6 @@ class RunTestsTesting(unittest.TestCase):
         with mock.patch('functest.ci.run_tests.tb.TierBuilder',
                         return_value=mock_obj), \
             mock.patch('functest.ci.run_tests.source_rc_file'), \
-            mock.patch('functest.ci.run_tests.generate_report.init'), \
                 mock.patch('functest.ci.run_tests.run_all') as m:
             self.assertEqual(run_tests.main(**kwargs),
                              run_tests.Result.EX_OK)
@@ -262,7 +242,6 @@ class RunTestsTesting(unittest.TestCase):
         with mock.patch('functest.ci.run_tests.tb.TierBuilder',
                         return_value=mock_obj), \
             mock.patch('functest.ci.run_tests.source_rc_file'), \
-            mock.patch('functest.ci.run_tests.generate_report.init'), \
                 mock.patch('functest.ci.run_tests.logger.debug') as m:
             self.assertEqual(run_tests.main(**kwargs),
                              run_tests.Result.EX_ERROR)
index 438fa7c..feaf33a 100644 (file)
@@ -22,6 +22,7 @@ class TierBuilderTesting(unittest.TestCase):
                            'scenario': 'test_scenario'}
 
         self.testcase = {'dependencies': self.dependency,
+                         'enabled': 'true',
                          'case_name': 'test_name',
                          'criteria': 'test_criteria',
                          'blocking': 'test_blocking',
@@ -78,6 +79,13 @@ class TierBuilderTesting(unittest.TestCase):
         self.assertEqual(self.tierbuilder.get_tests('test_tier2'),
                          None)
 
+    def test_get_tier_name_ok(self):
+        self.assertEqual(self.tierbuilder.get_tier_name('test_name'),
+                         'test_tier')
+
+    def test_get_tier_name_ko(self):
+        self.assertEqual(self.tierbuilder.get_tier_name('test_name2'), None)
+
 
 if __name__ == "__main__":
     unittest.main(verbosity=2)
index 21df409..2800627 100644 (file)
@@ -32,6 +32,7 @@ class TierHandlerTesting(unittest.TestCase):
                                       'test_ci_loop',
                                       description='test_desc')
         self.testcase = tier_handler.TestCase('test_name',
+                                              'true',
                                               self.mock_depend,
                                               'test_criteria',
                                               'test_blocking',
@@ -116,6 +117,10 @@ class TierHandlerTesting(unittest.TestCase):
         self.assertEqual(self.tier.get_name(),
                          'test_tier')
 
+    def test_testcase_is_enabled(self):
+        self.assertEqual(self.testcase.is_enabled(),
+                         'true')
+
     def test_testcase_get_criteria(self):
         self.assertEqual(self.tier.get_order(),
                          'test_order')
index b25ce22..7222967 100644 (file)
@@ -20,7 +20,6 @@ __author__ = "Cedric Ollivier <cedric.ollivier@orange.com>"
 
 
 class TestCaseTesting(unittest.TestCase):
-
     """The class testing TestCase."""
     # pylint: disable=missing-docstring,too-many-public-methods
 
@@ -191,11 +190,13 @@ class TestCaseTesting(unittest.TestCase):
 
     def test_str_project_name_ko(self):
         self.test.project_name = None
-        self.assertIn("INVALID OBJECT", str(self.test))
+        self.assertIn("<functest.core.testcase.TestCase object at",
+                      str(self.test))
 
     def test_str_case_name_ko(self):
         self.test.case_name = None
-        self.assertIn("INVALID OBJECT", str(self.test))
+        self.assertIn("<functest.core.testcase.TestCase object at",
+                      str(self.test))
 
     def test_str_pass(self):
         duration = '01:01'
index e709220..976deef 100644 (file)
@@ -19,7 +19,7 @@ pexpect==4.0
 requests>=2.8.0
 robotframework==2.9.1
 robotframework-requests==0.3.8
-robotframework-sshlibrary==2.1.1
+robotframework-sshlibrary==2.1.3
 configObj==5.0.6
 Flask==0.10.1
 xmltodict==0.9.2
@@ -33,3 +33,4 @@ click==6.6
 openbaton-cli==2.2.1-beta7
 mock==1.3.0
 iniparse==0.4
+PrettyTable>=0.7.1,<0.8 # BSD
index 471e9c3..76e475d 100644 (file)
@@ -23,6 +23,7 @@ pyyaml==3.10
 requests==2.8.0
 robotframework==2.9.1
 robotframework-requests==0.3.8
-robotframework-sshlibrary==2.1.1
+robotframework-sshlibrary==2.1.3
 subprocess32==3.2.7
 virtualenv==15.1.0
+PrettyTable>=0.7.1,<0.8 # BSD