Refactor Rally to adopt OO
authorhelenyao <yaohelan@huawei.com>
Mon, 23 Jan 2017 01:51:25 +0000 (20:51 -0500)
committerhelenyao <yaohelan@huawei.com>
Wed, 25 Jan 2017 09:02:06 +0000 (04:02 -0500)
JIRA: FUNCTEST-541

Change-Id: Ife04f58dc31ca9ba6316cc31577f81a6bf0e0405
Signed-off-by: helenyao <yaohelan@huawei.com>
functest/ci/exec_test.sh
functest/ci/prepare_env.py
functest/ci/run_tests.py
functest/ci/testcases.yaml
functest/opnfv_tests/openstack/rally/__init__.py [new file with mode: 0644]
functest/opnfv_tests/openstack/rally/rally.py [new file with mode: 0644]
functest/opnfv_tests/openstack/rally/run_rally-cert.py [deleted file]

index 7c96d69..54a7c62 100755 (executable)
@@ -93,13 +93,6 @@ function run_test(){
         "vims")
             python ${FUNCTEST_TEST_DIR}/vnf/ims/vims.py $clean_flag $report
         ;;
-        "rally_full")
-            python ${FUNCTEST_TEST_DIR}/openstack/rally/run_rally-cert.py $clean_flag all $report
-        ;;
-        "rally_sanity")
-            python ${FUNCTEST_TEST_DIR}/openstack/rally/run_rally-cert.py \
-                $clean_flag --sanity all $report
-        ;;
         "onos")
             python ${FUNCTEST_TEST_DIR}/sdn/onos/teston/onos.py
         ;;
index 74c751a..8bbdf18 100755 (executable)
@@ -184,11 +184,6 @@ def source_rc_file():
                 CONST.OS_TENANT_NAME = value
             elif key == 'OS_PASSWORD':
                 CONST.OS_PASSWORD = value
-    logger.debug("Used credentials: %s" % str)
-    logger.debug("OS_AUTH_URL:%s" % CONST.OS_AUTH_URL)
-    logger.debug("OS_USERNAME:%s" % CONST.OS_USERNAME)
-    logger.debug("OS_TENANT_NAME:%s" % CONST.OS_TENANT_NAME)
-    logger.debug("OS_PASSWORD:%s" % CONST.OS_PASSWORD)
 
 
 def patch_config_file():
index a5f1ab9..ef08001 100755 (executable)
@@ -93,10 +93,6 @@ def source_rc_file():
             elif key == 'OS_PASSWORD':
                 ft_constants.OS_PASSWORD = value
                 CONST.OS_PASSWORD = value
-    logger.debug("OS_AUTH_URL:%s" % CONST.OS_AUTH_URL)
-    logger.debug("OS_USERNAME:%s" % CONST.OS_USERNAME)
-    logger.debug("OS_TENANT_NAME:%s" % CONST.OS_TENANT_NAME)
-    logger.debug("OS_PASSWORD:%s" % CONST.OS_PASSWORD)
 
 
 def generate_os_snapshot():
index ede0828..7859692 100755 (executable)
@@ -81,6 +81,9 @@ tiers:
                 dependencies:
                     installer: ''
                     scenario: '^((?!bgpvpn).)*$'
+                run:
+                    module: 'functest.opnfv_tests.openstack.rally.rally'
+                    class: 'RallySanity'
 
             -
                 name: odl
@@ -325,6 +328,9 @@ tiers:
                 dependencies:
                     installer: '^((?!netvirt).)*$'
                     scenario: ''
+                run:
+                    module: 'functest.opnfv_tests.openstack.rally.rally'
+                    class: 'RallyFull'
 
     -
         name: vnf
diff --git a/functest/opnfv_tests/openstack/rally/__init__.py b/functest/opnfv_tests/openstack/rally/__init__.py
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/functest/opnfv_tests/openstack/rally/rally.py b/functest/opnfv_tests/openstack/rally/rally.py
new file mode 100644 (file)
index 0000000..de55311
--- /dev/null
@@ -0,0 +1,559 @@
+#!/usr/bin/python
+#
+# Copyright (c) 2015 All rights reserved
+# This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+
+import json
+import os
+import re
+import subprocess
+import time
+
+import iniparse
+import yaml
+
+from functest.core import testcase_base
+from functest.utils.constants import CONST
+import functest.utils.functest_logger as ft_logger
+import functest.utils.functest_utils as ft_utils
+import functest.utils.openstack_utils as os_utils
+
+logger = ft_logger.Logger('Rally').getLogger()
+
+
+class RallyBase(testcase_base.TestcaseBase):
+    TESTS = ['authenticate', 'glance', 'cinder', 'heat', 'keystone',
+             'neutron', 'nova', 'quotas', 'requests', 'vm', 'all']
+    GLANCE_IMAGE_NAME = CONST.openstack_image_name
+    GLANCE_IMAGE_FILENAME = CONST.openstack_image_file_name
+    GLANCE_IMAGE_PATH = os.path.join(CONST.dir_functest_data,
+                                     GLANCE_IMAGE_FILENAME)
+    GLANCE_IMAGE_FORMAT = CONST.openstack_image_disk_format
+    FLAVOR_NAME = "m1.tiny"
+
+    RALLY_DIR = os.path.join(CONST.dir_repo_functest, CONST.dir_rally)
+    RALLY_SCENARIO_DIR = os.path.join(RALLY_DIR, "scenario")
+    TEMPLATE_DIR = os.path.join(RALLY_SCENARIO_DIR, "templates")
+    SUPPORT_DIR = os.path.join(RALLY_SCENARIO_DIR, "support")
+    USERS_AMOUNT = 2
+    TENANTS_AMOUNT = 3
+    ITERATIONS_AMOUNT = 10
+    CONCURRENCY = 4
+    RESULTS_DIR = os.path.join(CONST.dir_results, 'rally')
+    TEMPEST_CONF_FILE = os.path.join(CONST.dir_results,
+                                     'tempest/tempest.conf')
+    BLACKLIST_FILE = os.path.join(RALLY_DIR, "blacklist.txt")
+    TEMP_DIR = os.path.join(RALLY_DIR, "var")
+
+    CINDER_VOLUME_TYPE_NAME = "volume_test"
+    RALLY_PRIVATE_NET_NAME = CONST.rally_network_name
+    RALLY_PRIVATE_SUBNET_NAME = CONST.rally_subnet_name
+    RALLY_PRIVATE_SUBNET_CIDR = CONST.rally_subnet_cidr
+    RALLY_ROUTER_NAME = CONST.rally_router_name
+
+    def __init__(self):
+        super(RallyBase, self).__init__()
+        self.mode = ''
+        self.summary = []
+        self.scenario_dir = ''
+        self.nova_client = os_utils.get_nova_client()
+        self.neutron_client = os_utils.get_neutron_client()
+        self.cinder_client = os_utils.get_cinder_client()
+        self.network_dict = {}
+        self.volume_type = None
+
+    def _build_task_args(self, test_file_name):
+        task_args = {'service_list': [test_file_name]}
+        task_args['image_name'] = self.GLANCE_IMAGE_NAME
+        task_args['flavor_name'] = self.FLAVOR_NAME
+        task_args['glance_image_location'] = self.GLANCE_IMAGE_PATH
+        task_args['glance_image_format'] = self.GLANCE_IMAGE_FORMAT
+        task_args['tmpl_dir'] = self.TEMPLATE_DIR
+        task_args['sup_dir'] = self.SUPPORT_DIR
+        task_args['users_amount'] = self.USERS_AMOUNT
+        task_args['tenants_amount'] = self.TENANTS_AMOUNT
+        task_args['use_existing_users'] = False
+        task_args['iterations'] = self.ITERATIONS_AMOUNT
+        task_args['concurrency'] = self.CONCURRENCY
+        task_args['smoke'] = self.smoke
+
+        ext_net = os_utils.get_external_net(self.neutron_client)
+        if ext_net:
+            task_args['floating_network'] = str(ext_net)
+        else:
+            task_args['floating_network'] = ''
+
+        net_id = self.network_dict['net_id']
+        if net_id:
+            task_args['netid'] = str(net_id)
+        else:
+            task_args['netid'] = ''
+
+        auth_url = CONST.OS_AUTH_URL
+        if auth_url is not None:
+            task_args['request_url'] = auth_url.rsplit(":", 1)[0]
+        else:
+            task_args['request_url'] = ''
+
+        return task_args
+
+    def _prepare_test_list(self, test_name):
+        test_yaml_file_name = 'opnfv-{}.yaml'.format(test_name)
+        scenario_file_name = os.path.join(self.RALLY_SCENARIO_DIR,
+                                          test_yaml_file_name)
+
+        if not os.path.exists(scenario_file_name):
+            scenario_file_name = os.path.join(self.scenario_dir,
+                                              test_yaml_file_name)
+
+            if not os.path.exists(scenario_file_name):
+                raise Exception("The scenario '%s' does not exist."
+                                % scenario_file_name)
+
+        logger.debug('Scenario fetched from : {}'.format(scenario_file_name))
+        test_file_name = os.path.join(self.TEMP_DIR, test_yaml_file_name)
+
+        if not os.path.exists(self.TEMP_DIR):
+            os.makedirs(self.TEMP_DIR)
+
+        self.apply_blacklist(scenario_file_name, test_file_name)
+        return test_file_name
+
+    @staticmethod
+    def get_task_id(cmd_raw):
+        """
+        get task id from command rally result
+        :param cmd_raw:
+        :return: task_id as string
+        """
+        taskid_re = re.compile('^Task +(.*): started$')
+        for line in cmd_raw.splitlines(True):
+            line = line.strip()
+            match = taskid_re.match(line)
+            if match:
+                return match.group(1)
+        return None
+
+    @staticmethod
+    def task_succeed(json_raw):
+        """
+        Parse JSON from rally JSON results
+        :param json_raw:
+        :return: Bool
+        """
+        rally_report = json.loads(json_raw)
+        for report in rally_report:
+            if report is None or report.get('result') is None:
+                return False
+
+            for result in report.get('result'):
+                if result is None or len(result.get('error')) > 0:
+                    return False
+
+        return True
+
+    @staticmethod
+    def live_migration_supported():
+        config = iniparse.ConfigParser()
+        if (config.read(RallyBase.TEMPEST_CONF_FILE) and
+                config.has_section('compute-feature-enabled') and
+                config.has_option('compute-feature-enabled',
+                                  'live_migration')):
+            return config.getboolean('compute-feature-enabled',
+                                     'live_migration')
+
+        return False
+
+    @staticmethod
+    def get_cmd_output(proc):
+        result = ""
+        while proc.poll() is None:
+            line = proc.stdout.readline()
+            result += line
+        return result
+
+    @staticmethod
+    def excl_scenario():
+        black_tests = []
+        try:
+            with open(RallyBase.BLACKLIST_FILE, 'r') as black_list_file:
+                black_list_yaml = yaml.safe_load(black_list_file)
+
+            installer_type = CONST.INSTALLER_TYPE
+            deploy_scenario = CONST.DEPLOY_SCENARIO
+            if (bool(installer_type) * bool(deploy_scenario)):
+                if 'scenario' in black_list_yaml.keys():
+                    for item in black_list_yaml['scenario']:
+                        scenarios = item['scenarios']
+                        installers = item['installers']
+                        if (deploy_scenario in scenarios and
+                                installer_type in installers):
+                            tests = item['tests']
+                            black_tests.extend(tests)
+        except Exception:
+            logger.debug("Scenario exclusion not applied.")
+
+        return black_tests
+
+    @staticmethod
+    def excl_func():
+        black_tests = []
+        func_list = []
+
+        try:
+            with open(RallyBase.BLACKLIST_FILE, 'r') as black_list_file:
+                black_list_yaml = yaml.safe_load(black_list_file)
+
+            if not RallyBase.live_migration_supported():
+                func_list.append("no_live_migration")
+
+            if 'functionality' in black_list_yaml.keys():
+                for item in black_list_yaml['functionality']:
+                    functions = item['functions']
+                    for func in func_list:
+                        if func in functions:
+                            tests = item['tests']
+                            black_tests.extend(tests)
+        except Exception:
+            logger.debug("Functionality exclusion not applied.")
+
+        return black_tests
+
+    @staticmethod
+    def apply_blacklist(case_file_name, result_file_name):
+        logger.debug("Applying blacklist...")
+        cases_file = open(case_file_name, 'r')
+        result_file = open(result_file_name, 'w')
+
+        black_tests = list(set(RallyBase.excl_func() +
+                           RallyBase.excl_scenario()))
+
+        include = True
+        for cases_line in cases_file:
+            if include:
+                for black_tests_line in black_tests:
+                    if re.search(black_tests_line,
+                                 cases_line.strip().rstrip(':')):
+                        include = False
+                        break
+                else:
+                    result_file.write(str(cases_line))
+            else:
+                if cases_line.isspace():
+                    include = True
+
+        cases_file.close()
+        result_file.close()
+
+    @staticmethod
+    def file_is_empty(file_name):
+        try:
+            if os.stat(file_name).st_size > 0:
+                return False
+        except:
+            pass
+
+        return True
+
+    def _run_task(self, test_name):
+        logger.info('Starting test scenario "{}" ...'.format(test_name))
+
+        task_file = os.path.join(self.RALLY_DIR, 'task.yaml')
+        if not os.path.exists(task_file):
+            logger.error("Task file '%s' does not exist." % task_file)
+            raise Exception("Task file '%s' does not exist." % task_file)
+
+        file_name = self._prepare_test_list(test_name)
+        if self.file_is_empty(file_name):
+            logger.info('No tests for scenario "{}"'.format(test_name))
+            return
+
+        cmd_line = ("rally task start --abort-on-sla-failure "
+                    "--task {0} "
+                    "--task-args \"{1}\""
+                    .format(task_file, self._build_task_args(test_name)))
+        logger.debug('running command line: {}'.format(cmd_line))
+
+        p = subprocess.Popen(cmd_line, stdout=subprocess.PIPE,
+                             stderr=subprocess.STDOUT, shell=True)
+        output = self._get_output(p, test_name)
+        task_id = self.get_task_id(output)
+        logger.debug('task_id : {}'.format(task_id))
+
+        if task_id is None:
+            logger.error('Failed to retrieve task_id, validating task...')
+            cmd_line = ("rally task validate "
+                        "--task {0} "
+                        "--task-args \"{1}\""
+                        .format(task_file, self.__build_task_args(test_name)))
+            logger.debug('running command line: {}'.format(cmd_line))
+            p = subprocess.Popen(cmd_line, stdout=subprocess.PIPE,
+                                 stderr=subprocess.STDOUT, shell=True)
+            output = self.get_cmd_output(p)
+            logger.error("Task validation result:" + "\n" + output)
+            return
+
+        # check for result directory and create it otherwise
+        if not os.path.exists(self.RESULTS_DIR):
+            logger.debug('{} does not exist, we create it.'
+                         .format(self.RESULTS_DIR))
+            os.makedirs(self.RESULTS_DIR)
+
+        # write html report file
+        report_html_name = 'opnfv-{}.html'.format(test_name)
+        report_html_dir = os.path.join(self.RESULTS_DIR, report_html_name)
+        cmd_line = "rally task report {} --out {}".format(task_id,
+                                                          report_html_dir)
+
+        logger.debug('running command line: {}'.format(cmd_line))
+        os.popen(cmd_line)
+
+        # get and save rally operation JSON result
+        cmd_line = "rally task results %s" % task_id
+        logger.debug('running command line: {}'.format(cmd_line))
+        cmd = os.popen(cmd_line)
+        json_results = cmd.read()
+        report_json_name = 'opnfv-{}.json'.format(test_name)
+        report_json_dir = os.path.join(self.RESULTS_DIR, report_json_name)
+        with open(report_json_dir, 'w') as f:
+            logger.debug('saving json file')
+            f.write(json_results)
+
+        """ parse JSON operation result """
+        if self.task_succeed(json_results):
+            logger.info('Test scenario: "{}" OK.'.format(test_name) + "\n")
+        else:
+            logger.info('Test scenario: "{}" Failed.'.format(test_name) + "\n")
+
+    def _get_output(self, proc, test_name):
+        result = ""
+        nb_tests = 0
+        overall_duration = 0.0
+        success = 0.0
+        nb_totals = 0
+
+        while proc.poll() is None:
+            line = proc.stdout.readline()
+            if ("Load duration" in line or
+                    "started" in line or
+                    "finished" in line or
+                    " Preparing" in line or
+                    "+-" in line or
+                    "|" in line):
+                result += line
+            elif "test scenario" in line:
+                result += "\n" + line
+            elif "Full duration" in line:
+                result += line + "\n\n"
+
+            # parse output for summary report
+            if ("| " in line and
+                    "| action" not in line and
+                    "| Starting" not in line and
+                    "| Completed" not in line and
+                    "| ITER" not in line and
+                    "|   " not in line and
+                    "| total" not in line):
+                nb_tests += 1
+            elif "| total" in line:
+                percentage = ((line.split('|')[8]).strip(' ')).strip('%')
+                try:
+                    success += float(percentage)
+                except ValueError:
+                    logger.info('Percentage error: %s, %s' %
+                                (percentage, line))
+                nb_totals += 1
+            elif "Full duration" in line:
+                duration = line.split(': ')[1]
+                try:
+                    overall_duration += float(duration)
+                except ValueError:
+                    logger.info('Duration error: %s, %s' % (duration, line))
+
+        overall_duration = "{:10.2f}".format(overall_duration)
+        if nb_totals == 0:
+            success_avg = 0
+        else:
+            success_avg = "{:0.2f}".format(success / nb_totals)
+
+        scenario_summary = {'test_name': test_name,
+                            'overall_duration': overall_duration,
+                            'nb_tests': nb_tests,
+                            'success': success_avg}
+        self.summary.append(scenario_summary)
+
+        logger.debug("\n" + result)
+
+        return result
+
+    def _prepare_env(self):
+        logger.debug('Validating the test name...')
+        if not (self.test_name in self.TESTS):
+            raise Exception("Test name '%s' is invalid" % self.test_name)
+
+        volume_types = os_utils.list_volume_types(self.cinder_client,
+                                                  private=False)
+        if volume_types:
+            logger.debug("Using existing volume type(s)...")
+        else:
+            logger.debug('Creating volume type...')
+            self.volume_type = os_utils.create_volume_type(
+                self.cinder_client, self.CINDER_VOLUME_TYPE_NAME)
+            if self.volume_type is None:
+                raise Exception("Failed to create volume type '%s'" %
+                                self.CINDER_VOLUME_TYPE_NAME)
+            logger.debug("Volume type '%s' is created succesfully." %
+                         self.CINDER_VOLUME_TYPE_NAME)
+
+        logger.debug('Getting or creating image...')
+        self.image_exists, self.image_id = os_utils.get_or_create_image(
+            self.GLANCE_IMAGE_NAME,
+            self.GLANCE_IMAGE_PATH,
+            self.GLANCE_IMAGE_FORMAT)
+        if self.image_id is None:
+            raise Exception("Failed to get or create image '%s'" %
+                            self.GLANCE_IMAGE_NAME)
+
+        logger.debug("Creating network '%s'..." % self.RALLY_PRIVATE_NET_NAME)
+        self.network_dict = os_utils.create_shared_network_full(
+            self.RALLY_PRIVATE_NET_NAME,
+            self.RALLY_PRIVATE_SUBNET_NAME,
+            self.RALLY_ROUTER_NAME,
+            self.RALLY_PRIVATE_SUBNET_CIDR)
+        if self.network_dict is None:
+            raise Exception("Failed to create shared network '%s'" %
+                            self.RALLY_PRIVATE_NET_NAME)
+
+    def _run_tests(self):
+        if self.test_name == 'all':
+            for test in self.TESTS:
+                if (test == 'all' or test == 'vm'):
+                    continue
+                self._run_task(test)
+        else:
+            self._run_task(self.test_name)
+
+    def _generate_report(self):
+        report = (
+            "\n"
+            "                                                              "
+            "\n"
+            "                     Rally Summary Report\n"
+            "\n"
+            "+===================+============+===============+===========+"
+            "\n"
+            "| Module            | Duration   | nb. Test Run  | Success   |"
+            "\n"
+            "+===================+============+===============+===========+"
+            "\n")
+        payload = []
+
+        # for each scenario we draw a row for the table
+        total_duration = 0.0
+        total_nb_tests = 0
+        total_success = 0.0
+        for s in self.summary:
+            name = "{0:<17}".format(s['test_name'])
+            duration = float(s['overall_duration'])
+            total_duration += duration
+            duration = time.strftime("%M:%S", time.gmtime(duration))
+            duration = "{0:<10}".format(duration)
+            nb_tests = "{0:<13}".format(s['nb_tests'])
+            total_nb_tests += int(s['nb_tests'])
+            success = "{0:<10}".format(str(s['success']) + '%')
+            total_success += float(s['success'])
+            report += ("" +
+                       "| " + name + " | " + duration + " | " +
+                       nb_tests + " | " + success + "|\n" +
+                       "+-------------------+------------"
+                       "+---------------+-----------+\n")
+            payload.append({'module': name,
+                            'details': {'duration': s['overall_duration'],
+                                        'nb tests': s['nb_tests'],
+                                        'success': s['success']}})
+
+        total_duration_str = time.strftime("%H:%M:%S",
+                                           time.gmtime(total_duration))
+        total_duration_str2 = "{0:<10}".format(total_duration_str)
+        total_nb_tests_str = "{0:<13}".format(total_nb_tests)
+
+        if len(self.summary):
+            success_rate = total_success / len(self.summary)
+        else:
+            success_rate = 100
+        success_rate = "{:0.2f}".format(success_rate)
+        success_rate_str = "{0:<10}".format(str(success_rate) + '%')
+        report += ("+===================+============"
+                   "+===============+===========+")
+        report += "\n"
+        report += ("| TOTAL:            | " + total_duration_str2 + " | " +
+                   total_nb_tests_str + " | " + success_rate_str + "|\n")
+        report += ("+===================+============"
+                   "+===============+===========+")
+        report += "\n"
+
+        logger.info("\n" + report)
+        payload.append({'summary': {'duration': total_duration,
+                                    'nb tests': total_nb_tests,
+                                    'nb success': success_rate}})
+
+        self.criteria = ft_utils.check_success_rate(
+            self.case_name, success_rate)
+        self.details = payload
+
+        logger.info("Rally '%s' success_rate is %s%%, is marked as %s"
+                    % (self.case_name, success_rate, self.criteria))
+
+    def _clean_up(self):
+        if self.volume_type:
+            logger.debug("Deleting volume type '%s'..." % self.volume_type)
+            os_utils.delete_volume_type(self.cinder_client, self.volume_type)
+
+        if not self.image_exists:
+            logger.debug("Deleting image '%s' with ID '%s'..."
+                         % (self.GLANCE_IMAGE_NAME, self.image_id))
+            if not os_utils.delete_glance_image(self.nova_client,
+                                                self.image_id):
+                logger.error("Error deleting the glance image")
+
+    def run(self):
+        self.start_time = time.time()
+        try:
+            self._prepare_env()
+            self._run_tests()
+            self._generate_report()
+            self._clean_up()
+        except Exception as e:
+            logger.error('Error with run: %s' % e)
+            return testcase_base.TestcaseBase.EX_RUN_ERROR
+        self.stop_time = time.time()
+
+        if self.criteria == "PASS":
+            return testcase_base.TestcaseBase.EX_OK
+        else:
+            return testcase_base.TestcaseBase.EX_TESTCASE_FAILED
+
+
+class RallySanity(RallyBase):
+    def __init__(self):
+        super(RallySanity, self).__init__()
+        self.case_name = 'rally_sanity'
+        self.mode = 'sanity'
+        self.test_name = 'all'
+        self.smoke = True
+        self.scenario_dir = os.path.join(self.RALLY_SCENARIO_DIR, 'sanity')
+
+
+class RallyFull(RallyBase):
+    def __init__(self):
+        super(RallyFull, self).__init__()
+        self.case_name = 'rally_full'
+        self.mode = 'full'
+        self.test_name = 'all'
+        self.smoke = False
+        self.scenario_dir = os.path.join(self.RALLY_SCENARIO_DIR, 'full')
diff --git a/functest/opnfv_tests/openstack/rally/run_rally-cert.py b/functest/opnfv_tests/openstack/rally/run_rally-cert.py
deleted file mode 100755 (executable)
index b02fd42..0000000
+++ /dev/null
@@ -1,613 +0,0 @@
-#!/usr/bin/python
-#
-# Copyright (c) 2015 All rights reserved
-# This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-
-import argparse
-import json
-import os
-import re
-import subprocess
-import time
-
-import iniparse
-import yaml
-
-from functest.utils.constants import CONST
-import functest.utils.functest_logger as ft_logger
-import functest.utils.functest_utils as ft_utils
-import functest.utils.openstack_utils as os_utils
-
-tests = ['authenticate', 'glance', 'cinder', 'heat', 'keystone',
-         'neutron', 'nova', 'quotas', 'requests', 'vm', 'all']
-parser = argparse.ArgumentParser()
-parser.add_argument("test_name",
-                    help="Module name to be tested. "
-                         "Possible values are : "
-                         "[ {d[0]} | {d[1]} | {d[2]} | {d[3]} | {d[4]} | "
-                         "{d[5]} | {d[6]} | {d[7]} | {d[8]} | {d[9]} | "
-                         "{d[10]} ] "
-                         "The 'all' value "
-                         "performs all possible test scenarios"
-                         .format(d=tests))
-
-parser.add_argument("-d", "--debug", help="Debug mode", action="store_true")
-parser.add_argument("-r", "--report",
-                    help="Create json result file",
-                    action="store_true")
-parser.add_argument("-s", "--smoke",
-                    help="Smoke test mode",
-                    action="store_true")
-parser.add_argument("-v", "--verbose",
-                    help="Print verbose info about the progress",
-                    action="store_true")
-parser.add_argument("-n", "--noclean",
-                    help="Don't clean the created resources for this test.",
-                    action="store_true")
-parser.add_argument("-z", "--sanity",
-                    help="Sanity test mode, execute only a subset of tests",
-                    action="store_true")
-
-args = parser.parse_args()
-
-
-if args.verbose:
-    RALLY_STDERR = subprocess.STDOUT
-else:
-    RALLY_STDERR = open(os.devnull, 'w')
-
-""" logging configuration """
-logger = ft_logger.Logger("run_rally-cert").getLogger()
-
-RALLY_DIR = os.path.join(CONST.dir_repo_functest, CONST.dir_rally)
-RALLY_SCENARIO_DIR = os.path.join(RALLY_DIR, "scenario")
-SANITY_MODE_DIR = os.path.join(RALLY_SCENARIO_DIR, "sanity")
-FULL_MODE_DIR = os.path.join(RALLY_SCENARIO_DIR, "full")
-TEMPLATE_DIR = os.path.join(RALLY_SCENARIO_DIR, "templates")
-SUPPORT_DIR = os.path.join(RALLY_SCENARIO_DIR, "support")
-TEMP_DIR = os.path.join(RALLY_DIR, "var")
-BLACKLIST_FILE = os.path.join(RALLY_DIR, "blacklist.txt")
-
-FLAVOR_NAME = "m1.tiny"
-USERS_AMOUNT = 2
-TENANTS_AMOUNT = 3
-ITERATIONS_AMOUNT = 10
-CONCURRENCY = 4
-
-RESULTS_DIR = os.path.join(CONST.dir_results, 'rally')
-TEMPEST_CONF_FILE = os.path.join(CONST.dir_results,
-                                 'tempest/tempest.conf')
-
-RALLY_PRIVATE_NET_NAME = CONST.rally_network_name
-RALLY_PRIVATE_SUBNET_NAME = CONST.rally_subnet_name
-RALLY_PRIVATE_SUBNET_CIDR = CONST.rally_subnet_cidr
-RALLY_ROUTER_NAME = CONST.rally_router_name
-
-GLANCE_IMAGE_NAME = CONST.openstack_image_name
-GLANCE_IMAGE_FILENAME = CONST.openstack_image_file_name
-GLANCE_IMAGE_FORMAT = CONST.openstack_image_disk_format
-GLANCE_IMAGE_PATH = os.path.join(CONST.dir_functest_data,
-                                 GLANCE_IMAGE_FILENAME)
-CINDER_VOLUME_TYPE_NAME = "volume_test"
-
-
-class GlobalVariables:
-    SUMMARY = []
-    neutron_client = None
-    network_dict = {}
-
-
-def get_task_id(cmd_raw):
-    """
-    get task id from command rally result
-    :param cmd_raw:
-    :return: task_id as string
-    """
-    taskid_re = re.compile('^Task +(.*): started$')
-    for line in cmd_raw.splitlines(True):
-        line = line.strip()
-        match = taskid_re.match(line)
-        if match:
-            return match.group(1)
-    return None
-
-
-def task_succeed(json_raw):
-    """
-    Parse JSON from rally JSON results
-    :param json_raw:
-    :return: Bool
-    """
-    rally_report = json.loads(json_raw)
-    for report in rally_report:
-        if report is None or report.get('result') is None:
-            return False
-
-        for result in report.get('result'):
-            if result is None or len(result.get('error')) > 0:
-                return False
-
-    return True
-
-
-def live_migration_supported():
-    config = iniparse.ConfigParser()
-    if (config.read(TEMPEST_CONF_FILE) and
-            config.has_section('compute-feature-enabled') and
-            config.has_option('compute-feature-enabled', 'live_migration')):
-        return config.getboolean('compute-feature-enabled', 'live_migration')
-
-    return False
-
-
-def build_task_args(test_file_name):
-    task_args = {'service_list': [test_file_name]}
-    task_args['image_name'] = GLANCE_IMAGE_NAME
-    task_args['flavor_name'] = FLAVOR_NAME
-    task_args['glance_image_location'] = GLANCE_IMAGE_PATH
-    task_args['glance_image_format'] = GLANCE_IMAGE_FORMAT
-    task_args['tmpl_dir'] = TEMPLATE_DIR
-    task_args['sup_dir'] = SUPPORT_DIR
-    task_args['users_amount'] = USERS_AMOUNT
-    task_args['tenants_amount'] = TENANTS_AMOUNT
-    task_args['use_existing_users'] = False
-    task_args['iterations'] = ITERATIONS_AMOUNT
-    task_args['concurrency'] = CONCURRENCY
-
-    if args.sanity:
-        task_args['smoke'] = True
-    else:
-        task_args['smoke'] = args.smoke
-
-    ext_net = os_utils.get_external_net(GlobalVariables.neutron_client)
-    if ext_net:
-        task_args['floating_network'] = str(ext_net)
-    else:
-        task_args['floating_network'] = ''
-
-    net_id = GlobalVariables.network_dict['net_id']
-    task_args['netid'] = str(net_id)
-
-    auth_url = CONST.OS_AUTH_URL
-    if auth_url is not None:
-        task_args['request_url'] = auth_url.rsplit(":", 1)[0]
-    else:
-        task_args['request_url'] = ''
-
-    return task_args
-
-
-def get_output(proc, test_name):
-    result = ""
-    nb_tests = 0
-    overall_duration = 0.0
-    success = 0.0
-    nb_totals = 0
-
-    while proc.poll() is None:
-        line = proc.stdout.readline()
-        if args.verbose:
-            result += line
-        else:
-            if ("Load duration" in line or
-                    "started" in line or
-                    "finished" in line or
-                    " Preparing" in line or
-                    "+-" in line or
-                    "|" in line):
-                result += line
-            elif "test scenario" in line:
-                result += "\n" + line
-            elif "Full duration" in line:
-                result += line + "\n\n"
-
-        # parse output for summary report
-        if ("| " in line and
-                "| action" not in line and
-                "| Starting" not in line and
-                "| Completed" not in line and
-                "| ITER" not in line and
-                "|   " not in line and
-                "| total" not in line):
-            nb_tests += 1
-        elif "| total" in line:
-            percentage = ((line.split('|')[8]).strip(' ')).strip('%')
-            try:
-                success += float(percentage)
-            except ValueError:
-                logger.info('Percentage error: %s, %s' % (percentage, line))
-            nb_totals += 1
-        elif "Full duration" in line:
-            duration = line.split(': ')[1]
-            try:
-                overall_duration += float(duration)
-            except ValueError:
-                logger.info('Duration error: %s, %s' % (duration, line))
-
-    overall_duration = "{:10.2f}".format(overall_duration)
-    if nb_totals == 0:
-        success_avg = 0
-    else:
-        success_avg = "{:0.2f}".format(success / nb_totals)
-
-    scenario_summary = {'test_name': test_name,
-                        'overall_duration': overall_duration,
-                        'nb_tests': nb_tests,
-                        'success': success_avg}
-    GlobalVariables.SUMMARY.append(scenario_summary)
-
-    logger.debug("\n" + result)
-
-    return result
-
-
-def get_cmd_output(proc):
-    result = ""
-
-    while proc.poll() is None:
-        line = proc.stdout.readline()
-        result += line
-
-    return result
-
-
-def excl_scenario():
-    black_tests = []
-
-    try:
-        with open(BLACKLIST_FILE, 'r') as black_list_file:
-            black_list_yaml = yaml.safe_load(black_list_file)
-
-        installer_type = CONST.INSTALLER_TYPE
-        deploy_scenario = CONST.DEPLOY_SCENARIO
-        if (bool(installer_type) * bool(deploy_scenario)):
-            if 'scenario' in black_list_yaml.keys():
-                for item in black_list_yaml['scenario']:
-                    scenarios = item['scenarios']
-                    installers = item['installers']
-                    if (deploy_scenario in scenarios and
-                            installer_type in installers):
-                        tests = item['tests']
-                        black_tests.extend(tests)
-    except:
-        logger.debug("Scenario exclusion not applied.")
-
-    return black_tests
-
-
-def excl_func():
-    black_tests = []
-    func_list = []
-
-    try:
-        with open(BLACKLIST_FILE, 'r') as black_list_file:
-            black_list_yaml = yaml.safe_load(black_list_file)
-
-        if not live_migration_supported():
-            func_list.append("no_live_migration")
-
-        if 'functionality' in black_list_yaml.keys():
-            for item in black_list_yaml['functionality']:
-                functions = item['functions']
-                for func in func_list:
-                    if func in functions:
-                        tests = item['tests']
-                        black_tests.extend(tests)
-    except:
-        logger.debug("Functionality exclusion not applied.")
-
-    return black_tests
-
-
-def apply_blacklist(case_file_name, result_file_name):
-    logger.debug("Applying blacklist...")
-    cases_file = open(case_file_name, 'r')
-    result_file = open(result_file_name, 'w')
-
-    black_tests = list(set(excl_func() + excl_scenario()))
-
-    include = True
-    for cases_line in cases_file:
-        if include:
-            for black_tests_line in black_tests:
-                if re.search(black_tests_line, cases_line.strip().rstrip(':')):
-                    include = False
-                    break
-            else:
-                result_file.write(str(cases_line))
-        else:
-            if cases_line.isspace():
-                include = True
-
-    cases_file.close()
-    result_file.close()
-
-
-def prepare_test_list(test_name):
-    test_yaml_file_name = 'opnfv-{}.yaml'.format(test_name)
-    scenario_file_name = os.path.join(RALLY_SCENARIO_DIR, test_yaml_file_name)
-
-    if not os.path.exists(scenario_file_name):
-        if args.sanity:
-            scenario_file_name = os.path.join(SANITY_MODE_DIR,
-                                              test_yaml_file_name)
-        else:
-            scenario_file_name = os.path.join(FULL_MODE_DIR,
-                                              test_yaml_file_name)
-
-        if not os.path.exists(scenario_file_name):
-            logger.info("The scenario '%s' does not exist."
-                        % scenario_file_name)
-            exit(-1)
-
-    logger.debug('Scenario fetched from : {}'.format(scenario_file_name))
-    test_file_name = os.path.join(TEMP_DIR, test_yaml_file_name)
-
-    if not os.path.exists(TEMP_DIR):
-        os.makedirs(TEMP_DIR)
-
-    apply_blacklist(scenario_file_name, test_file_name)
-    return test_file_name
-
-
-def file_is_empty(file_name):
-    try:
-        if os.stat(file_name).st_size > 0:
-            return False
-    except:
-        pass
-
-    return True
-
-
-def run_task(test_name):
-    #
-    # the "main" function of the script who launch rally for a task
-    # :param test_name: name for the rally test
-    # :return: void
-    #
-    logger.info('Starting test scenario "{}" ...'.format(test_name))
-    start_time = time.time()
-
-    task_file = os.path.join(RALLY_DIR, 'task.yaml')
-    if not os.path.exists(task_file):
-        logger.error("Task file '%s' does not exist." % task_file)
-        exit(-1)
-
-    file_name = prepare_test_list(test_name)
-    if file_is_empty(file_name):
-        logger.info('No tests for scenario "{}"'.format(test_name))
-        return
-
-    cmd_line = ("rally task start --abort-on-sla-failure "
-                "--task {0} "
-                "--task-args \"{1}\""
-                .format(task_file, build_task_args(test_name)))
-    logger.debug('running command line: {}'.format(cmd_line))
-
-    p = subprocess.Popen(cmd_line, stdout=subprocess.PIPE,
-                         stderr=RALLY_STDERR, shell=True)
-    output = get_output(p, test_name)
-    task_id = get_task_id(output)
-    logger.debug('task_id : {}'.format(task_id))
-
-    if task_id is None:
-        logger.error('Failed to retrieve task_id, validating task...')
-        cmd_line = ("rally task validate "
-                    "--task {0} "
-                    "--task-args \"{1}\""
-                    .format(task_file, build_task_args(test_name)))
-        logger.debug('running command line: {}'.format(cmd_line))
-        p = subprocess.Popen(cmd_line, stdout=subprocess.PIPE,
-                             stderr=subprocess.STDOUT, shell=True)
-        output = get_cmd_output(p)
-        logger.error("Task validation result:" + "\n" + output)
-        return
-
-    # check for result directory and create it otherwise
-    if not os.path.exists(RESULTS_DIR):
-        logger.debug('{} does not exist, we create it.'.format(RESULTS_DIR))
-        os.makedirs(RESULTS_DIR)
-
-    # write html report file
-    report_html_name = 'opnfv-{}.html'.format(test_name)
-    report_html_dir = os.path.join(RESULTS_DIR, report_html_name)
-    cmd_line = "rally task report {} --out {}".format(task_id,
-                                                      report_html_dir)
-
-    logger.debug('running command line: {}'.format(cmd_line))
-    os.popen(cmd_line)
-
-    # get and save rally operation JSON result
-    cmd_line = "rally task results %s" % task_id
-    logger.debug('running command line: {}'.format(cmd_line))
-    cmd = os.popen(cmd_line)
-    json_results = cmd.read()
-    report_json_name = 'opnfv-{}.json'.format(test_name)
-    report_json_dir = os.path.join(RESULTS_DIR, report_json_name)
-    with open(report_json_dir, 'w') as f:
-        logger.debug('saving json file')
-        f.write(json_results)
-
-    with open(report_json_dir) as json_file:
-        json_data = json.load(json_file)
-
-    """ parse JSON operation result """
-    status = "FAIL"
-    if task_succeed(json_results):
-        logger.info('Test scenario: "{}" OK.'.format(test_name) + "\n")
-        status = "PASS"
-    else:
-        logger.info('Test scenario: "{}" Failed.'.format(test_name) + "\n")
-
-    # Push results in payload of testcase
-    if args.report:
-        stop_time = time.time()
-        logger.debug("Push Rally detailed results into DB")
-        ft_utils.push_results_to_db("functest",
-                                    "Rally_details",
-                                    start_time,
-                                    stop_time,
-                                    status,
-                                    json_data)
-
-
-def main():
-
-    GlobalVariables.nova_client = os_utils.get_nova_client()
-    GlobalVariables.neutron_client = os_utils.get_neutron_client()
-    cinder_client = os_utils.get_cinder_client()
-
-    start_time = time.time()
-
-    # configure script
-    if not (args.test_name in tests):
-        logger.error('argument not valid')
-        exit(-1)
-
-    GlobalVariables.SUMMARY = []
-
-    volume_types = os_utils.list_volume_types(cinder_client,
-                                              private=False)
-    if not volume_types:
-        volume_type = os_utils.create_volume_type(
-            cinder_client, CINDER_VOLUME_TYPE_NAME)
-        if not volume_type:
-            logger.error("Failed to create volume type...")
-            exit(-1)
-        else:
-            logger.debug("Volume type '%s' created succesfully..."
-                         % CINDER_VOLUME_TYPE_NAME)
-    else:
-        logger.debug("Using existing volume type(s)...")
-
-    image_exists, image_id = os_utils.get_or_create_image(GLANCE_IMAGE_NAME,
-                                                          GLANCE_IMAGE_PATH,
-                                                          GLANCE_IMAGE_FORMAT)
-    if not image_id:
-        exit(-1)
-
-    logger.debug("Creating network '%s'..." % RALLY_PRIVATE_NET_NAME)
-    GlobalVariables.network_dict = \
-        os_utils.create_shared_network_full(RALLY_PRIVATE_NET_NAME,
-                                            RALLY_PRIVATE_SUBNET_NAME,
-                                            RALLY_ROUTER_NAME,
-                                            RALLY_PRIVATE_SUBNET_CIDR)
-    if not GlobalVariables.network_dict:
-        exit(1)
-
-    if args.test_name == "all":
-        for test_name in tests:
-            if not (test_name == 'all' or
-                    test_name == 'vm'):
-                run_task(test_name)
-    else:
-        logger.debug("Test name: " + args.test_name)
-        run_task(args.test_name)
-
-    report = ("\n"
-              "                                                              "
-              "\n"
-              "                     Rally Summary Report\n"
-              "\n"
-              "+===================+============+===============+===========+"
-              "\n"
-              "| Module            | Duration   | nb. Test Run  | Success   |"
-              "\n"
-              "+===================+============+===============+===========+"
-              "\n")
-    payload = []
-    stop_time = time.time()
-
-    # for each scenario we draw a row for the table
-    total_duration = 0.0
-    total_nb_tests = 0
-    total_success = 0.0
-    for s in GlobalVariables.SUMMARY:
-        name = "{0:<17}".format(s['test_name'])
-        duration = float(s['overall_duration'])
-        total_duration += duration
-        duration = time.strftime("%M:%S", time.gmtime(duration))
-        duration = "{0:<10}".format(duration)
-        nb_tests = "{0:<13}".format(s['nb_tests'])
-        total_nb_tests += int(s['nb_tests'])
-        success = "{0:<10}".format(str(s['success']) + '%')
-        total_success += float(s['success'])
-        report += ("" +
-                   "| " + name + " | " + duration + " | " +
-                   nb_tests + " | " + success + "|\n" +
-                   "+-------------------+------------"
-                   "+---------------+-----------+\n")
-        payload.append({'module': name,
-                        'details': {'duration': s['overall_duration'],
-                                    'nb tests': s['nb_tests'],
-                                    'success': s['success']}})
-
-    total_duration_str = time.strftime("%H:%M:%S", time.gmtime(total_duration))
-    total_duration_str2 = "{0:<10}".format(total_duration_str)
-    total_nb_tests_str = "{0:<13}".format(total_nb_tests)
-
-    if len(GlobalVariables.SUMMARY):
-        success_rate = total_success / len(GlobalVariables.SUMMARY)
-    else:
-        success_rate = 100
-    success_rate = "{:0.2f}".format(success_rate)
-    success_rate_str = "{0:<10}".format(str(success_rate) + '%')
-    report += "+===================+============+===============+===========+"
-    report += "\n"
-    report += ("| TOTAL:            | " + total_duration_str2 + " | " +
-               total_nb_tests_str + " | " + success_rate_str + "|\n")
-    report += "+===================+============+===============+===========+"
-    report += "\n"
-
-    logger.info("\n" + report)
-    payload.append({'summary': {'duration': total_duration,
-                                'nb tests': total_nb_tests,
-                                'nb success': success_rate}})
-
-    if args.sanity:
-        case_name = "rally_sanity"
-    else:
-        case_name = "rally_full"
-
-    # Evaluation of the success criteria
-    status = ft_utils.check_success_rate(case_name, success_rate)
-
-    exit_code = -1
-    if status == "PASS":
-        exit_code = 0
-
-    if args.report:
-        logger.debug("Pushing Rally summary into DB...")
-        ft_utils.push_results_to_db("functest",
-                                    case_name,
-                                    start_time,
-                                    stop_time,
-                                    status,
-                                    payload)
-    if args.noclean:
-        exit(exit_code)
-
-    if not image_exists:
-        logger.debug("Deleting image '%s' with ID '%s'..."
-                     % (GLANCE_IMAGE_NAME, image_id))
-        if not os_utils.delete_glance_image(GlobalVariables.nova_client,
-                                            image_id):
-            logger.error("Error deleting the glance image")
-
-    if not volume_types:
-        logger.debug("Deleting volume type '%s'..."
-                     % CINDER_VOLUME_TYPE_NAME)
-        if not os_utils.delete_volume_type(cinder_client, volume_type):
-            logger.error("Error in deleting volume type...")
-
-    exit(exit_code)
-
-
-if __name__ == '__main__':
-    main()