Extend rally test case exclusion feature
[functest-xtesting.git] / testcases / OpenStack / rally / run_rally-cert.py
index 536b4a5..8b8adce 100755 (executable)
@@ -26,7 +26,7 @@ import iniparse
 import yaml
 
 import functest.utils.functest_logger as ft_logger
-import functest.utils.functest_utils as functest_utils
+import functest.utils.functest_utils as ft_utils
 import functest.utils.openstack_utils as os_utils
 
 tests = ['authenticate', 'glance', 'cinder', 'heat', 'keystone',
@@ -72,11 +72,11 @@ else:
 logger = ft_logger.Logger("run_rally").getLogger()
 
 
-functest_yaml = functest_utils.get_functest_yaml()
-
 HOME = os.environ['HOME'] + "/"
-RALLY_DIR = functest_utils.FUNCTEST_REPO + '/' + \
-            functest_yaml.get("general").get("directories").get("dir_rally")
+RALLY_DIR = ft_utils.FUNCTEST_REPO + '/' + \
+            ft_utils.get_functest_config('general.directories.dir_rally')
+SANITY_MODE_DIR = RALLY_DIR + "scenario/sanity"
+FULL_MODE_DIR = RALLY_DIR + "scenario/full"
 TEMPLATE_DIR = RALLY_DIR + "scenario/templates"
 SUPPORT_DIR = RALLY_DIR + "scenario/support"
 TEMP_DIR = RALLY_DIR + "var"
@@ -88,25 +88,27 @@ TENANTS_AMOUNT = 3
 ITERATIONS_AMOUNT = 10
 CONCURRENCY = 4
 
-RESULTS_DIR = functest_yaml.get("general").get("directories").get(
-    "dir_rally_res")
-TEMPEST_CONF_FILE = functest_yaml.get("general").get("directories").get(
-    "dir_results") + '/tempest/tempest.conf'
-TEST_DB = functest_yaml.get("results").get("test_db_url")
-
-PRIVATE_NET_NAME = functest_yaml.get("rally").get("network_name")
-PRIVATE_SUBNET_NAME = functest_yaml.get("rally").get("subnet_name")
-PRIVATE_SUBNET_CIDR = functest_yaml.get("rally").get("subnet_cidr")
-ROUTER_NAME = functest_yaml.get("rally").get("router_name")
-
-GLANCE_IMAGE_NAME = functest_yaml.get("general").get("openstack").get(
-    "image_name")
-GLANCE_IMAGE_FILENAME = functest_yaml.get("general").get("openstack").get(
-    "image_file_name")
-GLANCE_IMAGE_FORMAT = functest_yaml.get("general").get("openstack").get(
-    "image_disk_format")
-GLANCE_IMAGE_PATH = functest_yaml.get("general").get("directories").get(
-    "dir_functest_data") + "/" + GLANCE_IMAGE_FILENAME
+RESULTS_DIR = \
+    ft_utils.get_functest_config('general.directories.dir_rally_res')
+TEMPEST_CONF_FILE = \
+    ft_utils.get_functest_config('general.directories.dir_results') + \
+    '/tempest/tempest.conf'
+TEST_DB = ft_utils.get_functest_config('results.test_db_url')
+
+PRIVATE_NET_NAME = ft_utils.get_functest_config('rally.network_name')
+PRIVATE_SUBNET_NAME = ft_utils.get_functest_config('rally.subnet_name')
+PRIVATE_SUBNET_CIDR = ft_utils.get_functest_config('rally.subnet_cidr')
+ROUTER_NAME = ft_utils.get_functest_config('rally.router_name')
+
+GLANCE_IMAGE_NAME = \
+    ft_utils.get_functest_config('general.openstack.image_name')
+GLANCE_IMAGE_FILENAME = \
+    ft_utils.get_functest_config('general.openstack.image_file_name')
+GLANCE_IMAGE_FORMAT = \
+    ft_utils.get_functest_config('general.openstack.image_disk_format')
+GLANCE_IMAGE_PATH = \
+    ft_utils.get_functest_config('general.directories.dir_functest_data') + \
+    "/" + GLANCE_IMAGE_FILENAME
 
 CINDER_VOLUME_TYPE_NAME = "volume_test"
 
@@ -173,10 +175,8 @@ def build_task_args(test_file_name):
     task_args['concurrency'] = CONCURRENCY
 
     if args.sanity:
-        task_args['full_mode'] = False
         task_args['smoke'] = True
     else:
-        task_args['full_mode'] = True
         task_args['smoke'] = args.smoke
 
     ext_net = os_utils.get_external_net(neutron_client)
@@ -187,7 +187,6 @@ def build_task_args(test_file_name):
 
     net_id = network_dict['net_id']
     task_args['netid'] = str(net_id)
-    task_args['live_migration'] = live_migration_supported()
 
     auth_url = os.getenv('OS_AUTH_URL')
     if auth_url is not None:
@@ -273,36 +272,66 @@ def get_cmd_output(proc):
     return result
 
 
-def apply_blacklist(case_file_name, result_file_name):
-    logger.debug("Applying blacklist...")
-    cases_file = open(case_file_name, 'r')
-    result_file = open(result_file_name, 'w')
+def excl_scenario():
     black_tests = []
 
     try:
+        with open(BLACKLIST_FILE, 'r') as black_list_file:
+            black_list_yaml = yaml.safe_load(black_list_file)
+
         installer_type = os.getenv('INSTALLER_TYPE')
         deploy_scenario = os.getenv('DEPLOY_SCENARIO')
         if (bool(installer_type) * bool(deploy_scenario)):
-            # if INSTALLER_TYPE and DEPLOY_SCENARIO are set we read the file
-            with open(BLACKLIST_FILE, 'r') as black_list_file:
-                black_list_yaml = yaml.safe_load(black_list_file)
-
-            for item in black_list_yaml:
-                scenarios = item['scenarios']
-                installers = item['installers']
-                if (deploy_scenario in scenarios and
-                        installer_type in installers):
-                    tests = item['tests']
-                    black_tests.extend(tests)
+            if 'scenario' in black_list_yaml.keys():
+                for item in black_list_yaml['scenario']:
+                    scenarios = item['scenarios']
+                    installers = item['installers']
+                    if (deploy_scenario in scenarios and
+                            installer_type in installers):
+                        tests = item['tests']
+                        black_tests.extend(tests)
     except:
-        black_tests = []
-        logger.debug("Blacklisting not applied.")
+        logger.debug("Scenario exclusion not applied.")
+
+    return black_tests
+
+
+def excl_func():
+    black_tests = []
+    func_list = []
+
+    try:
+        with open(BLACKLIST_FILE, 'r') as black_list_file:
+            black_list_yaml = yaml.safe_load(black_list_file)
+
+        if not live_migration_supported():
+            func_list.append("no_live_migration")
+
+        if 'functionality' in black_list_yaml.keys():
+            for item in black_list_yaml['functionality']:
+                functions = item['functions']
+                for func in func_list:
+                    if func in functions:
+                        tests = item['tests']
+                        black_tests.extend(tests)
+    except:
+        logger.debug("Functionality exclusion not applied.")
+
+    return black_tests
+
+
+def apply_blacklist(case_file_name, result_file_name):
+    logger.debug("Applying blacklist...")
+    cases_file = open(case_file_name, 'r')
+    result_file = open(result_file_name, 'w')
+
+    black_tests = list(set(excl_func() + excl_scenario()))
 
     include = True
     for cases_line in cases_file:
         if include:
             for black_tests_line in black_tests:
-                if black_tests_line == cases_line.strip().rstrip(':'):
+                if re.search(black_tests_line, cases_line.strip().rstrip(':')):
                     include = False
                     break
             else:
@@ -319,8 +348,16 @@ def prepare_test_list(test_name):
     scenario_file_name = '{}opnfv-{}.yaml'.format(RALLY_DIR + "scenario/",
                                                   test_name)
     if not os.path.exists(scenario_file_name):
-        logger.info("The scenario '%s' does not exist." % scenario_file_name)
-        exit(-1)
+        if args.sanity:
+            scenario_file_name = '{}opnfv-{}.yaml'.format(SANITY_MODE_DIR +
+                                                          "/", test_name)
+        else:
+            scenario_file_name = '{}opnfv-{}.yaml'.format(FULL_MODE_DIR +
+                                                          "/", test_name)
+        if not os.path.exists(scenario_file_name):
+            logger.info("The scenario '%s' does not exist."
+                        % scenario_file_name)
+            exit(-1)
 
     logger.debug('Scenario fetched from : {}'.format(scenario_file_name))
     test_file_name = '{}opnfv-{}.yaml'.format(TEMP_DIR + "/", test_name)
@@ -329,6 +366,17 @@ def prepare_test_list(test_name):
         os.makedirs(TEMP_DIR)
 
     apply_blacklist(scenario_file_name, test_file_name)
+    return test_file_name
+
+
+def file_is_empty(file_name):
+    try:
+        if os.stat(file_name).st_size > 0:
+            return False
+    except:
+        pass
+
+    return True
 
 
 def run_task(test_name):
@@ -346,7 +394,10 @@ def run_task(test_name):
         logger.error("Task file '%s' does not exist." % task_file)
         exit(-1)
 
-    prepare_test_list(test_name)
+    file_name = prepare_test_list(test_name)
+    if file_is_empty(file_name):
+        logger.info('No tests for scenario "{}"'.format(test_name))
+        return
 
     cmd_line = ("rally task start --abort-on-sla-failure " +
                 "--task {} ".format(task_file) +
@@ -373,7 +424,7 @@ def run_task(test_name):
 
     # check for result directory and create it otherwise
     if not os.path.exists(RESULTS_DIR):
-        logger.debug('%s does not exist, we create it.'.format(RESULTS_DIR))
+        logger.debug('{} does not exist, we create it.'.format(RESULTS_DIR))
         os.makedirs(RESULTS_DIR)
 
     # write html report file
@@ -409,12 +460,12 @@ def run_task(test_name):
     if args.report:
         stop_time = time.time()
         logger.debug("Push Rally detailed results into DB")
-        functest_utils.push_results_to_db("functest",
-                                          "Rally_details",
-                                          start_time,
-                                          stop_time,
-                                          status,
-                                          json_data)
+        ft_utils.push_results_to_db("functest",
+                                    "Rally_details",
+                                    start_time,
+                                    stop_time,
+                                    status,
+                                    json_data)
 
 
 def main():
@@ -513,7 +564,12 @@ def main():
     total_duration_str = time.strftime("%H:%M:%S", time.gmtime(total_duration))
     total_duration_str2 = "{0:<10}".format(total_duration_str)
     total_nb_tests_str = "{0:<13}".format(total_nb_tests)
-    success_rate = "{:0.2f}".format(total_success / len(SUMMARY))
+
+    if len(SUMMARY):
+        success_rate = total_success / len(SUMMARY)
+    else:
+        success_rate = 100
+    success_rate = "{:0.2f}".format(success_rate)
     success_rate_str = "{0:<10}".format(str(success_rate) + '%')
     report += "+===================+============+===============+===========+"
     report += "\n"
@@ -533,7 +589,7 @@ def main():
         case_name = "rally_full"
 
     # Evaluation of the success criteria
-    status = functest_utils.check_success_rate(case_name, success_rate)
+    status = ft_utils.check_success_rate(case_name, success_rate)
 
     exit_code = -1
     if status == "PASS":
@@ -541,12 +597,12 @@ def main():
 
     if args.report:
         logger.debug("Pushing Rally summary into DB...")
-        functest_utils.push_results_to_db("functest",
-                                          case_name,
-                                          start_time,
-                                          stop_time,
-                                          status,
-                                          payload)
+        ft_utils.push_results_to_db("functest",
+                                    case_name,
+                                    start_time,
+                                    stop_time,
+                                    status,
+                                    payload)
     if args.noclean:
         exit(exit_code)