Extend rally test case exclusion feature
[functest-xtesting.git] / testcases / OpenStack / rally / run_rally-cert.py
index 279bcde..8b8adce 100755 (executable)
 #
 """ tests configuration """
 
-import argparse
 import json
 import os
 import re
 import subprocess
 import time
-import functest.utils.functest_logger as ft_logger
-import functest.utils.functest_utils as functest_utils
-import functest.utils.openstack_utils as os_utils
+
+import argparse
 import iniparse
 import yaml
 
+import functest.utils.functest_logger as ft_logger
+import functest.utils.functest_utils as ft_utils
+import functest.utils.openstack_utils as os_utils
 
 tests = ['authenticate', 'glance', 'cinder', 'heat', 'keystone',
          'neutron', 'nova', 'quotas', 'requests', 'vm', 'all']
@@ -70,21 +71,16 @@ else:
 """ logging configuration """
 logger = ft_logger.Logger("run_rally").getLogger()
 
-REPO_PATH = os.environ['repos_dir'] + '/functest/'
-if not os.path.exists(REPO_PATH):
-    logger.error("Functest repository directory not found '%s'" % REPO_PATH)
-    exit(-1)
-
-
-with open(os.environ["CONFIG_FUNCTEST_YAML"]) as f:
-    functest_yaml = yaml.safe_load(f)
-f.close()
 
 HOME = os.environ['HOME'] + "/"
-RALLY_DIR = REPO_PATH + functest_yaml.get("general").get(
-    "directories").get("dir_rally")
+RALLY_DIR = ft_utils.FUNCTEST_REPO + '/' + \
+            ft_utils.get_functest_config('general.directories.dir_rally')
+SANITY_MODE_DIR = RALLY_DIR + "scenario/sanity"
+FULL_MODE_DIR = RALLY_DIR + "scenario/full"
 TEMPLATE_DIR = RALLY_DIR + "scenario/templates"
 SUPPORT_DIR = RALLY_DIR + "scenario/support"
+TEMP_DIR = RALLY_DIR + "var"
+BLACKLIST_FILE = RALLY_DIR + "blacklist.txt"
 
 FLAVOR_NAME = "m1.tiny"
 USERS_AMOUNT = 2
@@ -92,25 +88,27 @@ TENANTS_AMOUNT = 3
 ITERATIONS_AMOUNT = 10
 CONCURRENCY = 4
 
-RESULTS_DIR = functest_yaml.get("general").get("directories").get(
-    "dir_rally_res")
-TEMPEST_CONF_FILE = functest_yaml.get("general").get("directories").get(
-    "dir_results") + '/tempest/tempest.conf'
-TEST_DB = functest_yaml.get("results").get("test_db_url")
-
-PRIVATE_NET_NAME = functest_yaml.get("rally").get("network_name")
-PRIVATE_SUBNET_NAME = functest_yaml.get("rally").get("subnet_name")
-PRIVATE_SUBNET_CIDR = functest_yaml.get("rally").get("subnet_cidr")
-ROUTER_NAME = functest_yaml.get("rally").get("router_name")
-
-GLANCE_IMAGE_NAME = functest_yaml.get("general").get("openstack").get(
-    "image_name")
-GLANCE_IMAGE_FILENAME = functest_yaml.get("general").get("openstack").get(
-    "image_file_name")
-GLANCE_IMAGE_FORMAT = functest_yaml.get("general").get("openstack").get(
-    "image_disk_format")
-GLANCE_IMAGE_PATH = functest_yaml.get("general").get("directories").get(
-    "dir_functest_data") + "/" + GLANCE_IMAGE_FILENAME
+RESULTS_DIR = \
+    ft_utils.get_functest_config('general.directories.dir_rally_res')
+TEMPEST_CONF_FILE = \
+    ft_utils.get_functest_config('general.directories.dir_results') + \
+    '/tempest/tempest.conf'
+TEST_DB = ft_utils.get_functest_config('results.test_db_url')
+
+PRIVATE_NET_NAME = ft_utils.get_functest_config('rally.network_name')
+PRIVATE_SUBNET_NAME = ft_utils.get_functest_config('rally.subnet_name')
+PRIVATE_SUBNET_CIDR = ft_utils.get_functest_config('rally.subnet_cidr')
+ROUTER_NAME = ft_utils.get_functest_config('rally.router_name')
+
+GLANCE_IMAGE_NAME = \
+    ft_utils.get_functest_config('general.openstack.image_name')
+GLANCE_IMAGE_FILENAME = \
+    ft_utils.get_functest_config('general.openstack.image_file_name')
+GLANCE_IMAGE_FORMAT = \
+    ft_utils.get_functest_config('general.openstack.image_disk_format')
+GLANCE_IMAGE_PATH = \
+    ft_utils.get_functest_config('general.directories.dir_functest_data') + \
+    "/" + GLANCE_IMAGE_FILENAME
 
 CINDER_VOLUME_TYPE_NAME = "volume_test"
 
@@ -167,6 +165,7 @@ def build_task_args(test_file_name):
     task_args['image_name'] = GLANCE_IMAGE_NAME
     task_args['flavor_name'] = FLAVOR_NAME
     task_args['glance_image_location'] = GLANCE_IMAGE_PATH
+    task_args['glance_image_format'] = GLANCE_IMAGE_FORMAT
     task_args['tmpl_dir'] = TEMPLATE_DIR
     task_args['sup_dir'] = SUPPORT_DIR
     task_args['users_amount'] = USERS_AMOUNT
@@ -176,10 +175,8 @@ def build_task_args(test_file_name):
     task_args['concurrency'] = CONCURRENCY
 
     if args.sanity:
-        task_args['full_mode'] = False
         task_args['smoke'] = True
     else:
-        task_args['full_mode'] = True
         task_args['smoke'] = args.smoke
 
     ext_net = os_utils.get_external_net(neutron_client)
@@ -190,7 +187,6 @@ def build_task_args(test_file_name):
 
     net_id = network_dict['net_id']
     task_args['netid'] = str(net_id)
-    task_args['live_migration'] = live_migration_supported()
 
     auth_url = os.getenv('OS_AUTH_URL')
     if auth_url is not None:
@@ -276,6 +272,113 @@ def get_cmd_output(proc):
     return result
 
 
+def excl_scenario():
+    black_tests = []
+
+    try:
+        with open(BLACKLIST_FILE, 'r') as black_list_file:
+            black_list_yaml = yaml.safe_load(black_list_file)
+
+        installer_type = os.getenv('INSTALLER_TYPE')
+        deploy_scenario = os.getenv('DEPLOY_SCENARIO')
+        if (bool(installer_type) * bool(deploy_scenario)):
+            if 'scenario' in black_list_yaml.keys():
+                for item in black_list_yaml['scenario']:
+                    scenarios = item['scenarios']
+                    installers = item['installers']
+                    if (deploy_scenario in scenarios and
+                            installer_type in installers):
+                        tests = item['tests']
+                        black_tests.extend(tests)
+    except:
+        logger.debug("Scenario exclusion not applied.")
+
+    return black_tests
+
+
+def excl_func():
+    black_tests = []
+    func_list = []
+
+    try:
+        with open(BLACKLIST_FILE, 'r') as black_list_file:
+            black_list_yaml = yaml.safe_load(black_list_file)
+
+        if not live_migration_supported():
+            func_list.append("no_live_migration")
+
+        if 'functionality' in black_list_yaml.keys():
+            for item in black_list_yaml['functionality']:
+                functions = item['functions']
+                for func in func_list:
+                    if func in functions:
+                        tests = item['tests']
+                        black_tests.extend(tests)
+    except:
+        logger.debug("Functionality exclusion not applied.")
+
+    return black_tests
+
+
+def apply_blacklist(case_file_name, result_file_name):
+    logger.debug("Applying blacklist...")
+    cases_file = open(case_file_name, 'r')
+    result_file = open(result_file_name, 'w')
+
+    black_tests = list(set(excl_func() + excl_scenario()))
+
+    include = True
+    for cases_line in cases_file:
+        if include:
+            for black_tests_line in black_tests:
+                if re.search(black_tests_line, cases_line.strip().rstrip(':')):
+                    include = False
+                    break
+            else:
+                result_file.write(str(cases_line))
+        else:
+            if cases_line.isspace():
+                include = True
+
+    cases_file.close()
+    result_file.close()
+
+
+def prepare_test_list(test_name):
+    scenario_file_name = '{}opnfv-{}.yaml'.format(RALLY_DIR + "scenario/",
+                                                  test_name)
+    if not os.path.exists(scenario_file_name):
+        if args.sanity:
+            scenario_file_name = '{}opnfv-{}.yaml'.format(SANITY_MODE_DIR +
+                                                          "/", test_name)
+        else:
+            scenario_file_name = '{}opnfv-{}.yaml'.format(FULL_MODE_DIR +
+                                                          "/", test_name)
+        if not os.path.exists(scenario_file_name):
+            logger.info("The scenario '%s' does not exist."
+                        % scenario_file_name)
+            exit(-1)
+
+    logger.debug('Scenario fetched from : {}'.format(scenario_file_name))
+    test_file_name = '{}opnfv-{}.yaml'.format(TEMP_DIR + "/", test_name)
+
+    if not os.path.exists(TEMP_DIR):
+        os.makedirs(TEMP_DIR)
+
+    apply_blacklist(scenario_file_name, test_file_name)
+    return test_file_name
+
+
+def file_is_empty(file_name):
+    try:
+        if os.stat(file_name).st_size > 0:
+            return False
+    except:
+        pass
+
+    return True
+
+
 def run_task(test_name):
     #
     # the "main" function of the script who launch rally for a task
@@ -291,13 +394,10 @@ def run_task(test_name):
         logger.error("Task file '%s' does not exist." % task_file)
         exit(-1)
 
-    test_file_name = '{}opnfv-{}.yaml'.format(RALLY_DIR + "scenario/",
-                                              test_name)
-    if not os.path.exists(test_file_name):
-        logger.error("The scenario '%s' does not exist." % test_file_name)
-        exit(-1)
-
-    logger.debug('Scenario fetched from : {}'.format(test_file_name))
+    file_name = prepare_test_list(test_name)
+    if file_is_empty(file_name):
+        logger.info('No tests for scenario "{}"'.format(test_name))
+        return
 
     cmd_line = ("rally task start --abort-on-sla-failure " +
                 "--task {} ".format(task_file) +
@@ -324,7 +424,7 @@ def run_task(test_name):
 
     # check for result directory and create it otherwise
     if not os.path.exists(RESULTS_DIR):
-        logger.debug('%s does not exist, we create it.'.format(RESULTS_DIR))
+        logger.debug('{} does not exist, we create it.'.format(RESULTS_DIR))
         os.makedirs(RESULTS_DIR)
 
     # write html report file
@@ -360,13 +460,12 @@ def run_task(test_name):
     if args.report:
         stop_time = time.time()
         logger.debug("Push Rally detailed results into DB")
-        functest_utils.push_results_to_db("functest",
-                                          "Rally_details",
-                                          logger,
-                                          start_time,
-                                          stop_time,
-                                          status,
-                                          json_data)
+        ft_utils.push_results_to_db("functest",
+                                    "Rally_details",
+                                    start_time,
+                                    stop_time,
+                                    status,
+                                    json_data)
 
 
 def main():
@@ -408,22 +507,12 @@ def main():
         exit(-1)
 
     logger.debug("Creating network '%s'..." % PRIVATE_NET_NAME)
-    network_dict = os_utils.create_network_full(neutron_client,
-                                                PRIVATE_NET_NAME,
-                                                PRIVATE_SUBNET_NAME,
-                                                ROUTER_NAME,
-                                                PRIVATE_SUBNET_CIDR)
+    network_dict = os_utils.create_shared_network_full(PRIVATE_NET_NAME,
+                                                       PRIVATE_SUBNET_NAME,
+                                                       ROUTER_NAME,
+                                                       PRIVATE_SUBNET_CIDR)
     if not network_dict:
-        logger.error("Failed to create network...")
-        exit(-1)
-    else:
-        if not os_utils.update_neutron_net(neutron_client,
-                                           network_dict['net_id'],
-                                           shared=True):
-            logger.error("Failed to update network...")
-            exit(-1)
-        else:
-            logger.debug("Network '%s' available..." % PRIVATE_NET_NAME)
+        exit(1)
 
     if args.test_name == "all":
         for test_name in tests:
@@ -475,7 +564,12 @@ def main():
     total_duration_str = time.strftime("%H:%M:%S", time.gmtime(total_duration))
     total_duration_str2 = "{0:<10}".format(total_duration_str)
     total_nb_tests_str = "{0:<13}".format(total_nb_tests)
-    success_rate = "{:0.2f}".format(total_success / len(SUMMARY))
+
+    if len(SUMMARY):
+        success_rate = total_success / len(SUMMARY)
+    else:
+        success_rate = 100
+    success_rate = "{:0.2f}".format(success_rate)
     success_rate_str = "{0:<10}".format(str(success_rate) + '%')
     report += "+===================+============+===============+===========+"
     report += "\n"
@@ -495,7 +589,7 @@ def main():
         case_name = "rally_full"
 
     # Evaluation of the success criteria
-    status = functest_utils.check_success_rate(case_name, success_rate)
+    status = ft_utils.check_success_rate(case_name, success_rate)
 
     exit_code = -1
     if status == "PASS":
@@ -503,13 +597,12 @@ def main():
 
     if args.report:
         logger.debug("Pushing Rally summary into DB...")
-        functest_utils.push_results_to_db("functest",
-                                          case_name,
-                                          logger,
-                                          start_time,
-                                          stop_time,
-                                          status,
-                                          payload)
+        ft_utils.push_results_to_db("functest",
+                                    case_name,
+                                    start_time,
+                                    stop_time,
+                                    status,
+                                    payload)
     if args.noclean:
         exit(exit_code)