Merge "Publish rally logs to ease debugging."
[functest.git] / functest / opnfv_tests / openstack / rally / rally.py
index a808d03..4a63629 100644 (file)
@@ -18,50 +18,27 @@ import os
 import re
 import subprocess
 import time
-import uuid
 
 import pkg_resources
+import prettytable
+from xtesting.core import testcase
+from xtesting.energy import energy
 import yaml
 
-from functest.core import testcase
-from functest.energy import energy
-from functest.opnfv_tests.openstack.snaps import snaps_utils
+from functest.core import singlevm
 from functest.opnfv_tests.openstack.tempest import conf_utils
-from functest.utils.constants import CONST
-
-from snaps.config.flavor import FlavorConfig
-from snaps.config.image import ImageConfig
-from snaps.config.network import NetworkConfig, SubnetConfig
-from snaps.config.router import RouterConfig
-
-from snaps.openstack.create_flavor import OpenStackFlavor
-from snaps.openstack.tests import openstack_tests
-from snaps.openstack.utils import deploy_utils
+from functest.utils import config
+from functest.utils import env
 
 LOGGER = logging.getLogger(__name__)
 
 
-class RallyBase(testcase.TestCase):
+class RallyBase(singlevm.VmReady1):
     """Base class form Rally testcases implementation."""
 
-    TESTS = ['authenticate', 'glance', 'ceilometer', 'cinder', 'heat',
+    # pylint: disable=too-many-instance-attributes
+    TESTS = ['authenticate', 'glance', 'cinder', 'gnocchi', 'heat',
              'keystone', 'neutron', 'nova', 'quotas', 'vm', 'all']
-    GLANCE_IMAGE_NAME = CONST.__getattribute__('openstack_image_name')
-    GLANCE_IMAGE_FILENAME = CONST.__getattribute__('openstack_image_file_name')
-    GLANCE_IMAGE_PATH = os.path.join(
-        CONST.__getattribute__('dir_functest_images'),
-        GLANCE_IMAGE_FILENAME)
-    GLANCE_IMAGE_FORMAT = CONST.__getattribute__('openstack_image_disk_format')
-    GLANCE_IMAGE_USERNAME = CONST.__getattribute__('openstack_image_username')
-    GLANCE_IMAGE_EXTRA_PROPERTIES = {}
-    if hasattr(CONST, 'openstack_extra_properties'):
-        GLANCE_IMAGE_EXTRA_PROPERTIES = CONST.__getattribute__(
-            'openstack_extra_properties')
-    FLAVOR_NAME = CONST.__getattribute__('rally_flavor_name')
-    FLAVOR_ALT_NAME = CONST.__getattribute__('rally_flavor_alt_name')
-    FLAVOR_EXTRA_SPECS = None
-    if hasattr(CONST, 'flavor_extra_specs'):
-        FLAVOR_EXTRA_SPECS = CONST.__getattribute__('flavor_extra_specs')
 
     RALLY_DIR = pkg_resources.resource_filename(
         'functest', 'opnfv_tests/openstack/rally')
@@ -75,57 +52,38 @@ class RallyBase(testcase.TestCase):
     TENANTS_AMOUNT = 3
     ITERATIONS_AMOUNT = 10
     CONCURRENCY = 4
-    RESULTS_DIR = os.path.join(CONST.__getattribute__('dir_results'), 'rally')
+    RESULTS_DIR = os.path.join(getattr(config.CONF, 'dir_results'), 'rally')
     BLACKLIST_FILE = os.path.join(RALLY_DIR, "blacklist.txt")
     TEMP_DIR = os.path.join(RALLY_DIR, "var")
 
-    RALLY_PRIVATE_NET_NAME = CONST.__getattribute__('rally_network_name')
-    RALLY_PRIVATE_SUBNET_NAME = CONST.__getattribute__('rally_subnet_name')
-    RALLY_PRIVATE_SUBNET_CIDR = CONST.__getattribute__('rally_subnet_cidr')
-    RALLY_ROUTER_NAME = CONST.__getattribute__('rally_router_name')
+    visibility = 'public'
+    shared_network = True
 
     def __init__(self, **kwargs):
         """Initialize RallyBase object."""
         super(RallyBase, self).__init__(**kwargs)
-        if 'os_creds' in kwargs:
-            self.os_creds = kwargs['os_creds']
-        else:
-            creds_override = None
-            if hasattr(CONST, 'snaps_os_creds_override'):
-                creds_override = CONST.__getattribute__(
-                    'snaps_os_creds_override')
-
-            self.os_creds = openstack_tests.get_credentials(
-                os_env_file=CONST.__getattribute__('openstack_creds'),
-                overrides=creds_override)
-
-        self.guid = '-' + str(uuid.uuid4())
-
         self.creators = []
         self.mode = ''
         self.summary = []
         self.scenario_dir = ''
-        self.image_name = None
-        self.ext_net_name = None
-        self.priv_net_id = None
-        self.flavor_name = None
-        self.flavor_alt_name = None
         self.smoke = None
         self.test_name = None
         self.start_time = None
         self.result = None
         self.details = None
         self.compute_cnt = 0
+        self.flavor_alt = None
 
     def _build_task_args(self, test_file_name):
+        """Build arguments for the Rally task."""
         task_args = {'service_list': [test_file_name]}
-        task_args['image_name'] = self.image_name
-        task_args['flavor_name'] = self.flavor_name
-        task_args['flavor_alt_name'] = self.flavor_alt_name
-        task_args['glance_image_location'] = self.GLANCE_IMAGE_PATH
-        task_args['glance_image_format'] = self.GLANCE_IMAGE_FORMAT
-        task_args['tmpl_dir'] = self.TEMPLATE_DIR
-        task_args['sup_dir'] = self.SUPPORT_DIR
+        task_args['image_name'] = str(self.image.name)
+        task_args['flavor_name'] = str(self.flavor.name)
+        task_args['flavor_alt_name'] = str(self.flavor_alt.name)
+        task_args['glance_image_location'] = str(self.filename)
+        task_args['glance_image_format'] = str(self.image_format)
+        task_args['tmpl_dir'] = str(self.TEMPLATE_DIR)
+        task_args['sup_dir'] = str(self.SUPPORT_DIR)
         task_args['users_amount'] = self.USERS_AMOUNT
         task_args['tenants_amount'] = self.TENANTS_AMOUNT
         task_args['use_existing_users'] = False
@@ -133,21 +91,20 @@ class RallyBase(testcase.TestCase):
         task_args['concurrency'] = self.CONCURRENCY
         task_args['smoke'] = self.smoke
 
-        ext_net = self.ext_net_name
-        if ext_net:
-            task_args['floating_network'] = str(ext_net)
+        if self.ext_net:
+            task_args['floating_network'] = str(self.ext_net.name)
         else:
             task_args['floating_network'] = ''
 
-        net_id = self.priv_net_id
-        if net_id:
-            task_args['netid'] = str(net_id)
+        if self.network:
+            task_args['netid'] = str(self.network.id)
         else:
             task_args['netid'] = ''
 
         return task_args
 
     def _prepare_test_list(self, test_name):
+        """Build the list of test cases to be executed."""
         test_yaml_file_name = 'opnfv-{}.yaml'.format(test_name)
         scenario_file_name = os.path.join(self.RALLY_SCENARIO_DIR,
                                           test_yaml_file_name)
@@ -194,14 +151,14 @@ class RallyBase(testcase.TestCase):
         :return: Bool
         """
         rally_report = json.loads(json_raw)
-        for report in rally_report:
-            if report is None or report.get('result') is None:
-                return False
-
-            for result in report.get('result'):
-                if result is None or len(result.get('error')) > 0:
+        tasks = rally_report.get('tasks')
+        if tasks:
+            for task in tasks:
+                if task.get('status') != 'finished' or \
+                   task.get('pass_sla') is not True:
                     return False
-
+        else:
+            return False
         return True
 
     def _migration_supported(self):
@@ -211,15 +168,6 @@ class RallyBase(testcase.TestCase):
 
         return False
 
-    @staticmethod
-    def get_cmd_output(proc):
-        """Get command stdout."""
-        result = ""
-        while proc.poll() is None:
-            line = proc.stdout.readline()
-            result += line
-        return result
-
     @staticmethod
     def excl_scenario():
         """Exclude scenario."""
@@ -228,8 +176,8 @@ class RallyBase(testcase.TestCase):
             with open(RallyBase.BLACKLIST_FILE, 'r') as black_list_file:
                 black_list_yaml = yaml.safe_load(black_list_file)
 
-            installer_type = CONST.__getattribute__('INSTALLER_TYPE')
-            deploy_scenario = CONST.__getattribute__('DEPLOY_SCENARIO')
+            installer_type = env.get('INSTALLER_TYPE')
+            deploy_scenario = env.get('DEPLOY_SCENARIO')
             if (bool(installer_type) and bool(deploy_scenario) and
                     'scenario' in black_list_yaml.keys()):
                 for item in black_list_yaml['scenario']:
@@ -240,7 +188,7 @@ class RallyBase(testcase.TestCase):
                             in_it(installer_type, installers)):
                         tests = item['tests']
                         black_tests.extend(tests)
-        except Exception:
+        except Exception:  # pylint: disable=broad-except
             LOGGER.debug("Scenario exclusion not applied.")
 
         return black_tests
@@ -263,8 +211,8 @@ class RallyBase(testcase.TestCase):
             # match if regex pattern is set and found in the needle
             if pattern and re.search(pattern, needle) is not None:
                 return True
-        else:
-            return False
+
+        return False
 
     def excl_func(self):
         """Exclude functionalities."""
@@ -300,7 +248,7 @@ class RallyBase(testcase.TestCase):
                                self.excl_scenario()))
 
         if black_tests:
-            LOGGER.debug("Blacklisted tests: " + str(black_tests))
+            LOGGER.debug("Blacklisted tests: %s", str(black_tests))
 
         include = True
         for cases_line in cases_file:
@@ -330,6 +278,47 @@ class RallyBase(testcase.TestCase):
 
         return True
 
+    def _save_results(self, test_name, task_id):
+        """ Generate and save task execution results"""
+        # check for result directory and create it otherwise
+        if not os.path.exists(self.RESULTS_DIR):
+            LOGGER.debug('%s does not exist, we create it.',
+                         self.RESULTS_DIR)
+            os.makedirs(self.RESULTS_DIR)
+
+        # put detailed result to log
+        cmd = (["rally", "task", "detailed", "--uuid", task_id])
+        LOGGER.debug('running command: %s', cmd)
+        output = subprocess.check_output(cmd, stderr=subprocess.STDOUT)
+        LOGGER.info("%s\n%s", " ".join(cmd), output)
+
+        # save report as JSON
+        report_json_name = 'opnfv-{}.json'.format(test_name)
+        report_json_dir = os.path.join(self.RESULTS_DIR, report_json_name)
+        cmd = (["rally", "task", "report", "--json", "--uuid", task_id,
+                "--out", report_json_dir])
+        LOGGER.debug('running command: %s', cmd)
+        output = subprocess.check_output(cmd, stderr=subprocess.STDOUT)
+        LOGGER.info("%s\n%s", " ".join(cmd), output)
+
+        # save report as HTML
+        report_html_name = 'opnfv-{}.html'.format(test_name)
+        report_html_dir = os.path.join(self.RESULTS_DIR, report_html_name)
+        cmd = (["rally", "task", "report", "--html", "--uuid", task_id,
+                "--out", report_html_dir])
+        LOGGER.debug('running command: %s', cmd)
+        output = subprocess.check_output(cmd, stderr=subprocess.STDOUT)
+        LOGGER.info("%s\n%s", " ".join(cmd), output)
+
+        json_results = open(report_json_dir).read()
+        self._append_summary(json_results, test_name)
+
+        # parse JSON operation result
+        if self.task_succeed(json_results):
+            LOGGER.info('Test scenario: "%s" OK.', test_name)
+        else:
+            LOGGER.info('Test scenario: "%s" Failed.', test_name)
+
     def _run_task(self, test_name):
         """Run a task."""
         LOGGER.info('Starting test scenario "%s" ...', test_name)
@@ -337,7 +326,7 @@ class RallyBase(testcase.TestCase):
         task_file = os.path.join(self.RALLY_DIR, 'task.yaml')
         if not os.path.exists(task_file):
             LOGGER.error("Task file '%s' does not exist.", task_file)
-            raise Exception("Task file '%s' does not exist.", task_file)
+            raise Exception("Task file '{}' does not exist.".format(task_file))
 
         file_name = self._prepare_test_list(test_name)
         if self.file_is_empty(file_name):
@@ -348,188 +337,59 @@ class RallyBase(testcase.TestCase):
                 task_file, "--task-args",
                 str(self._build_task_args(test_name))])
         LOGGER.debug('running command: %s', cmd)
-
         proc = subprocess.Popen(cmd, stdout=subprocess.PIPE,
                                 stderr=subprocess.STDOUT)
-        output = self._get_output(proc, test_name)
+        output = proc.communicate()[0]
+
         task_id = self.get_task_id(output)
         LOGGER.debug('task_id : %s', task_id)
-
         if task_id is None:
-            LOGGER.error('Failed to retrieve task_id, validating task...')
-            cmd = (["rally", "task", "validate", "--task", task_file,
-                    "--task-args", str(self._build_task_args(test_name))])
-            LOGGER.debug('running command: %s', cmd)
-            proc = subprocess.Popen(cmd, stdout=subprocess.PIPE,
-                                    stderr=subprocess.STDOUT)
-            output = self.get_cmd_output(proc)
-            LOGGER.error("Task validation result:" + "\n" + output)
-            return
-
-        # check for result directory and create it otherwise
-        if not os.path.exists(self.RESULTS_DIR):
-            LOGGER.debug('%s does not exist, we create it.',
-                         self.RESULTS_DIR)
-            os.makedirs(self.RESULTS_DIR)
+            LOGGER.error("Failed to retrieve task_id")
+            LOGGER.error("Result:\n%s", output)
+            raise Exception("Failed to retrieve task id")
 
-        # write html report file
-        report_html_name = 'opnfv-{}.html'.format(test_name)
-        report_html_dir = os.path.join(self.RESULTS_DIR, report_html_name)
-        cmd = (["rally", "task", "report", task_id, "--out", report_html_dir])
+        self._save_results(test_name, task_id)
 
-        LOGGER.debug('running command: %s', cmd)
-        subprocess.Popen(cmd, stdout=subprocess.PIPE,
-                         stderr=subprocess.STDOUT)
+    def _append_summary(self, json_raw, test_name):
+        """Update statistics summary info."""
+        nb_tests = 0
+        nb_success = 0
+        overall_duration = 0.0
 
-        # get and save rally operation JSON result
-        cmd = (["rally", "task", "results", task_id])
-        LOGGER.debug('running command: %s', cmd)
-        proc = subprocess.Popen(cmd, stdout=subprocess.PIPE,
-                                stderr=subprocess.STDOUT)
-        json_results = self.get_cmd_output(proc)
-        report_json_name = 'opnfv-{}.json'.format(test_name)
-        report_json_dir = os.path.join(self.RESULTS_DIR, report_json_name)
-        with open(report_json_dir, 'w') as r_file:
-            LOGGER.debug('saving json file')
-            r_file.write(json_results)
+        rally_report = json.loads(json_raw)
+        for task in rally_report.get('tasks'):
+            for subtask in task.get('subtasks'):
+                for workload in subtask.get('workloads'):
+                    if workload.get('full_duration'):
+                        overall_duration += workload.get('full_duration')
 
-        # parse JSON operation result
-        if self.task_succeed(json_results):
-            LOGGER.info('Test scenario: "{}" OK.'.format(test_name) + "\n")
-        else:
-            LOGGER.info('Test scenario: "{}" Failed.'.format(test_name) + "\n")
+                    if workload.get('data'):
+                        nb_tests += len(workload.get('data'))
 
-    def _get_output(self, proc, test_name):
-        result = ""
-        nb_tests = 0
-        overall_duration = 0.0
-        success = 0.0
-        nb_totals = 0
-
-        while proc.poll() is None:
-            line = proc.stdout.readline()
-            if ("Load duration" in line or
-                    "started" in line or
-                    "finished" in line or
-                    " Preparing" in line or
-                    "+-" in line or
-                    "|" in line):
-                result += line
-            elif "test scenario" in line:
-                result += "\n" + line
-            elif "Full duration" in line:
-                result += line + "\n\n"
-
-            # parse output for summary report
-            if ("| " in line and
-                    "| action" not in line and
-                    "| Starting" not in line and
-                    "| Completed" not in line and
-                    "| ITER" not in line and
-                    "|   " not in line and
-                    "| total" not in line):
-                nb_tests += 1
-            elif "| total" in line:
-                percentage = ((line.split('|')[8]).strip(' ')).strip('%')
-                try:
-                    success += float(percentage)
-                except ValueError:
-                    LOGGER.info('Percentage error: %s, %s',
-                                percentage, line)
-                nb_totals += 1
-            elif "Full duration" in line:
-                duration = line.split(': ')[1]
-                try:
-                    overall_duration += float(duration)
-                except ValueError:
-                    LOGGER.info('Duration error: %s, %s', duration, line)
-
-        overall_duration = "{:10.2f}".format(overall_duration)
-        if nb_totals == 0:
-            success_avg = 0
-        else:
-            success_avg = "{:0.2f}".format(success / nb_totals)
+                    for result in workload.get('data'):
+                        if not result.get('error'):
+                            nb_success += 1
 
         scenario_summary = {'test_name': test_name,
                             'overall_duration': overall_duration,
                             'nb_tests': nb_tests,
-                            'success': success_avg}
+                            'nb_success': nb_success,
+                            'task_status': self.task_succeed(json_raw)}
         self.summary.append(scenario_summary)
 
-        LOGGER.debug("\n" + result)
-
-        return result
-
     def _prepare_env(self):
+        """Create resources needed by test scenarios."""
+        assert self.cloud
         LOGGER.debug('Validating the test name...')
         if self.test_name not in self.TESTS:
             raise Exception("Test name '%s' is invalid" % self.test_name)
 
-        network_name = self.RALLY_PRIVATE_NET_NAME + self.guid
-        subnet_name = self.RALLY_PRIVATE_SUBNET_NAME + self.guid
-        router_name = self.RALLY_ROUTER_NAME + self.guid
-        self.image_name = self.GLANCE_IMAGE_NAME + self.guid
-        self.flavor_name = self.FLAVOR_NAME + self.guid
-        self.flavor_alt_name = self.FLAVOR_ALT_NAME + self.guid
-        self.ext_net_name = snaps_utils.get_ext_net_name(self.os_creds)
-        self.compute_cnt = snaps_utils.get_active_compute_cnt(self.os_creds)
-
-        LOGGER.debug("Creating image '%s'...", self.image_name)
-        image_creator = deploy_utils.create_image(
-            self.os_creds, ImageConfig(
-                name=self.image_name,
-                image_file=self.GLANCE_IMAGE_PATH,
-                img_format=self.GLANCE_IMAGE_FORMAT,
-                image_user=self.GLANCE_IMAGE_USERNAME,
-                public=True,
-                extra_properties=self.GLANCE_IMAGE_EXTRA_PROPERTIES))
-        if image_creator is None:
-            raise Exception("Failed to create image")
-        self.creators.append(image_creator)
-
-        LOGGER.debug("Creating network '%s'...", network_name)
-        network_creator = deploy_utils.create_network(
-            self.os_creds, NetworkConfig(
-                name=network_name,
-                shared=True,
-                subnet_settings=[SubnetConfig(
-                    name=subnet_name,
-                    cidr=self.RALLY_PRIVATE_SUBNET_CIDR)
-                ]))
-        if network_creator is None:
-            raise Exception("Failed to create private network")
-        self.priv_net_id = network_creator.get_network().id
-        self.creators.append(network_creator)
-
-        LOGGER.debug("Creating router '%s'...", router_name)
-        router_creator = deploy_utils.create_router(
-            self.os_creds, RouterConfig(
-                name=router_name,
-                external_gateway=self.ext_net_name,
-                internal_subnets=[subnet_name]))
-        if router_creator is None:
-            raise Exception("Failed to create router")
-        self.creators.append(router_creator)
-
-        LOGGER.debug("Creating flavor '%s'...", self.flavor_name)
-        flavor_creator = OpenStackFlavor(
-            self.os_creds, FlavorConfig(
-                name=self.flavor_name, ram=512, disk=1, vcpus=1,
-                metadata=self.FLAVOR_EXTRA_SPECS))
-        if flavor_creator is None or flavor_creator.create() is None:
-            raise Exception("Failed to create flavor")
-        self.creators.append(flavor_creator)
-
-        LOGGER.debug("Creating flavor '%s'...", self.flavor_alt_name)
-        flavor_alt_creator = OpenStackFlavor(
-            self.os_creds, FlavorConfig(
-                name=self.flavor_alt_name, ram=1024, disk=1, vcpus=1,
-                metadata=self.FLAVOR_EXTRA_SPECS))
-        if flavor_alt_creator is None or flavor_alt_creator.create() is None:
-            raise Exception("Failed to create flavor")
-        self.creators.append(flavor_alt_creator)
+        self.compute_cnt = len(self.cloud.list_hypervisors())
+        self.flavor_alt = self.create_flavor_alt()
+        LOGGER.debug("flavor: %s", self.flavor_alt)
 
     def _run_tests(self):
+        """Execute tests."""
         if self.test_name == 'all':
             for test in self.TESTS:
                 if test == 'all' or test == 'vm':
@@ -539,87 +399,83 @@ class RallyBase(testcase.TestCase):
             self._run_task(self.test_name)
 
     def _generate_report(self):
-        report = (
-            "\n"
-            "                                                              "
-            "\n"
-            "                     Rally Summary Report\n"
-            "\n"
-            "+===================+============+===============+===========+"
-            "\n"
-            "| Module            | Duration   | nb. Test Run  | Success   |"
-            "\n"
-            "+===================+============+===============+===========+"
-            "\n")
+        """Generate test execution summary report."""
+        total_duration = 0.0
+        total_nb_tests = 0
+        total_nb_success = 0
+        nb_modules = 0
         payload = []
 
+        res_table = prettytable.PrettyTable(
+            padding_width=2,
+            field_names=['Module', 'Duration', 'nb. Test Run', 'Success'])
+        res_table.align['Module'] = "l"
+        res_table.align['Duration'] = "r"
+        res_table.align['Success'] = "r"
+
         # for each scenario we draw a row for the table
-        total_duration = 0.0
-        total_nb_tests = 0
-        total_success = 0.0
         for item in self.summary:
-            name = "{0:<17}".format(item['test_name'])
-            duration = float(item['overall_duration'])
-            total_duration += duration
-            duration = time.strftime("%M:%S", time.gmtime(duration))
-            duration = "{0:<10}".format(duration)
-            nb_tests = "{0:<13}".format(item['nb_tests'])
-            total_nb_tests += int(item['nb_tests'])
-            success = "{0:<10}".format(str(item['success']) + '%')
-            total_success += float(item['success'])
-            report += ("" +
-                       "| " + name + " | " + duration + " | " +
-                       nb_tests + " | " + success + "|\n" +
-                       "+-------------------+------------"
-                       "+---------------+-----------+\n")
-            payload.append({'module': name,
+            if item['task_status'] is True:
+                nb_modules += 1
+            total_duration += item['overall_duration']
+            total_nb_tests += item['nb_tests']
+            total_nb_success += item['nb_success']
+            try:
+                success_avg = 100 * item['nb_success'] / item['nb_tests']
+            except ZeroDivisionError:
+                success_avg = 0
+            success_str = str("{:0.2f}".format(success_avg)) + '%'
+            duration_str = time.strftime("%M:%S",
+                                         time.gmtime(item['overall_duration']))
+            res_table.add_row([item['test_name'], duration_str,
+                               item['nb_tests'], success_str])
+            payload.append({'module': item['test_name'],
                             'details': {'duration': item['overall_duration'],
                                         'nb tests': item['nb_tests'],
-                                        'success': item['success']}})
+                                        'success': success_str}})
 
         total_duration_str = time.strftime("%H:%M:%S",
                                            time.gmtime(total_duration))
-        total_duration_str2 = "{0:<10}".format(total_duration_str)
-        total_nb_tests_str = "{0:<13}".format(total_nb_tests)
-
         try:
-            self.result = total_success / len(self.summary)
+            self.result = 100 * total_nb_success / total_nb_tests
         except ZeroDivisionError:
             self.result = 100
-
         success_rate = "{:0.2f}".format(self.result)
-        success_rate_str = "{0:<10}".format(str(success_rate) + '%')
-        report += ("+===================+============"
-                   "+===============+===========+")
-        report += "\n"
-        report += ("| TOTAL:            | " + total_duration_str2 + " | " +
-                   total_nb_tests_str + " | " + success_rate_str + "|\n")
-        report += ("+===================+============"
-                   "+===============+===========+")
-        report += "\n"
-
-        LOGGER.info("\n" + report)
+        success_rate_str = str(success_rate) + '%'
+        res_table.add_row(["", "", "", ""])
+        res_table.add_row(["TOTAL:", total_duration_str, total_nb_tests,
+                           success_rate_str])
+
+        LOGGER.info("Rally Summary Report:\n\n%s\n", res_table.get_string())
+        LOGGER.info("Rally '%s' success_rate is %s%% in %s/%s modules",
+                    self.case_name, success_rate, nb_modules,
+                    len(self.summary))
         payload.append({'summary': {'duration': total_duration,
                                     'nb tests': total_nb_tests,
                                     'nb success': success_rate}})
-
         self.details = payload
 
-        LOGGER.info("Rally '%s' success_rate is %s%%",
-                    self.case_name, success_rate)
+    def clean(self):
+        """Cleanup of OpenStack resources. Should be called on completion."""
+        if self.flavor_alt:
+            self.orig_cloud.delete_flavor(self.flavor_alt.id)
+        super(RallyBase, self).clean()
 
-    def _clean_up(self):
-        for creator in reversed(self.creators):
-            try:
-                creator.clean()
-            except Exception as e:
-                LOGGER.error('Unexpected error cleaning - %s', e)
+    def is_successful(self):
+        """The overall result of the test."""
+        for item in self.summary:
+            if item['task_status'] is False:
+                return testcase.TestCase.EX_TESTCASE_FAILED
+
+        return super(RallyBase, self).is_successful()
 
     @energy.enable_recording
     def run(self, **kwargs):
         """Run testcase."""
         self.start_time = time.time()
         try:
+            assert super(RallyBase, self).run(
+                **kwargs) == testcase.TestCase.EX_OK
             conf_utils.create_rally_deployment()
             self._prepare_env()
             self._run_tests()
@@ -627,10 +483,8 @@ class RallyBase(testcase.TestCase):
             res = testcase.TestCase.EX_OK
         except Exception as exc:   # pylint: disable=broad-except
             LOGGER.error('Error with run: %s', exc)
+            self.result = 0
             res = testcase.TestCase.EX_RUN_ERROR
-        finally:
-            self._clean_up()
-
         self.stop_time = time.time()
         return res