Fix pylint errors/warnings in rally
[functest.git] / functest / opnfv_tests / openstack / rally / rally.py
index 86ec355..2fd7d7f 100644 (file)
@@ -1,4 +1,4 @@
-#!/usr/bin/python
+#!/usr/bin/env python
 #
 # Copyright (c) 2015 All rights reserved
 # This program and the accompanying materials
@@ -8,6 +8,8 @@
 # http://www.apache.org/licenses/LICENSE-2.0
 #
 
+"""Rally testcases implementation."""
+
 from __future__ import division
 
 import json
@@ -16,66 +18,104 @@ import os
 import re
 import subprocess
 import time
+import uuid
 
-import iniparse
+import pkg_resources
+import prettytable
 import yaml
 
 from functest.core import testcase
+from functest.energy import energy
+from functest.opnfv_tests.openstack.snaps import snaps_utils
+from functest.opnfv_tests.openstack.tempest import conf_utils
 from functest.utils.constants import CONST
-import functest.utils.openstack_utils as os_utils
 
-logger = logging.getLogger(__name__)
+from snaps.config.flavor import FlavorConfig
+from snaps.config.image import ImageConfig
+from snaps.config.network import NetworkConfig, SubnetConfig
+from snaps.config.router import RouterConfig
+
+from snaps.openstack.create_flavor import OpenStackFlavor
+from snaps.openstack.utils import deploy_utils
+
+LOGGER = logging.getLogger(__name__)
 
 
 class RallyBase(testcase.TestCase):
-    TESTS = ['authenticate', 'glance', 'cinder', 'heat', 'keystone',
-             'neutron', 'nova', 'quotas', 'requests', 'vm', 'all']
+    """Base class form Rally testcases implementation."""
+
+    # pylint: disable=too-many-instance-attributes
+    TESTS = ['authenticate', 'glance', 'ceilometer', 'cinder', 'heat',
+             'keystone', 'neutron', 'nova', 'quotas', 'vm', 'all']
     GLANCE_IMAGE_NAME = CONST.__getattribute__('openstack_image_name')
     GLANCE_IMAGE_FILENAME = CONST.__getattribute__('openstack_image_file_name')
     GLANCE_IMAGE_PATH = os.path.join(
         CONST.__getattribute__('dir_functest_images'),
         GLANCE_IMAGE_FILENAME)
     GLANCE_IMAGE_FORMAT = CONST.__getattribute__('openstack_image_disk_format')
-    FLAVOR_NAME = "m1.tiny"
-
-    RALLY_DIR = os.path.join(
-        CONST.__getattribute__('dir_repo_functest'),
-        CONST.__getattribute__('dir_rally'))
-    RALLY_SCENARIO_DIR = os.path.join(RALLY_DIR, "scenario")
-    TEMPLATE_DIR = os.path.join(RALLY_SCENARIO_DIR, "templates")
-    SUPPORT_DIR = os.path.join(RALLY_SCENARIO_DIR, "support")
+    GLANCE_IMAGE_USERNAME = CONST.__getattribute__('openstack_image_username')
+    GLANCE_IMAGE_EXTRA_PROPERTIES = {}
+    if hasattr(CONST, 'openstack_extra_properties'):
+        GLANCE_IMAGE_EXTRA_PROPERTIES = CONST.__getattribute__(
+            'openstack_extra_properties')
+    FLAVOR_NAME = CONST.__getattribute__('rally_flavor_name')
+    FLAVOR_ALT_NAME = CONST.__getattribute__('rally_flavor_alt_name')
+    FLAVOR_EXTRA_SPECS = None
+    FLAVOR_RAM = 512
+    FLAVOR_RAM_ALT = 1024
+    if hasattr(CONST, 'flavor_extra_specs'):
+        FLAVOR_EXTRA_SPECS = CONST.__getattribute__('flavor_extra_specs')
+        FLAVOR_RAM = 1024
+        FLAVOR_RAM_ALT = 2048
+
+    RALLY_DIR = pkg_resources.resource_filename(
+        'functest', 'opnfv_tests/openstack/rally')
+    RALLY_SCENARIO_DIR = pkg_resources.resource_filename(
+        'functest', 'opnfv_tests/openstack/rally/scenario')
+    TEMPLATE_DIR = pkg_resources.resource_filename(
+        'functest', 'opnfv_tests/openstack/rally/scenario/templates')
+    SUPPORT_DIR = pkg_resources.resource_filename(
+        'functest', 'opnfv_tests/openstack/rally/scenario/support')
     USERS_AMOUNT = 2
     TENANTS_AMOUNT = 3
     ITERATIONS_AMOUNT = 10
     CONCURRENCY = 4
     RESULTS_DIR = os.path.join(CONST.__getattribute__('dir_results'), 'rally')
-    TEMPEST_CONF_FILE = os.path.join(CONST.__getattribute__('dir_results'),
-                                     'tempest/tempest.conf')
     BLACKLIST_FILE = os.path.join(RALLY_DIR, "blacklist.txt")
     TEMP_DIR = os.path.join(RALLY_DIR, "var")
 
-    CINDER_VOLUME_TYPE_NAME = "volume_test"
     RALLY_PRIVATE_NET_NAME = CONST.__getattribute__('rally_network_name')
     RALLY_PRIVATE_SUBNET_NAME = CONST.__getattribute__('rally_subnet_name')
     RALLY_PRIVATE_SUBNET_CIDR = CONST.__getattribute__('rally_subnet_cidr')
     RALLY_ROUTER_NAME = CONST.__getattribute__('rally_router_name')
 
     def __init__(self, **kwargs):
+        """Initialize RallyBase object."""
         super(RallyBase, self).__init__(**kwargs)
+        self.os_creds = kwargs.get('os_creds') or snaps_utils.get_credentials()
+        self.guid = '-' + str(uuid.uuid4())
+        self.creators = []
         self.mode = ''
         self.summary = []
         self.scenario_dir = ''
-        self.nova_client = os_utils.get_nova_client()
-        self.neutron_client = os_utils.get_neutron_client()
-        self.cinder_client = os_utils.get_cinder_client()
-        self.network_dict = {}
-        self.volume_type = None
+        self.image_name = None
+        self.ext_net_name = None
+        self.priv_net_id = None
+        self.flavor_name = None
+        self.flavor_alt_name = None
         self.smoke = None
+        self.test_name = None
+        self.start_time = None
+        self.result = None
+        self.details = None
+        self.compute_cnt = 0
 
     def _build_task_args(self, test_file_name):
+        """Build arguments for the Rally task."""
         task_args = {'service_list': [test_file_name]}
-        task_args['image_name'] = self.GLANCE_IMAGE_NAME
-        task_args['flavor_name'] = self.FLAVOR_NAME
+        task_args['image_name'] = self.image_name
+        task_args['flavor_name'] = self.flavor_name
+        task_args['flavor_alt_name'] = self.flavor_alt_name
         task_args['glance_image_location'] = self.GLANCE_IMAGE_PATH
         task_args['glance_image_format'] = self.GLANCE_IMAGE_FORMAT
         task_args['tmpl_dir'] = self.TEMPLATE_DIR
@@ -87,24 +127,22 @@ class RallyBase(testcase.TestCase):
         task_args['concurrency'] = self.CONCURRENCY
         task_args['smoke'] = self.smoke
 
-        ext_net = os_utils.get_external_net(self.neutron_client)
+        ext_net = self.ext_net_name
         if ext_net:
             task_args['floating_network'] = str(ext_net)
         else:
             task_args['floating_network'] = ''
 
-        net_id = self.network_dict['net_id']
+        net_id = self.priv_net_id
         if net_id:
             task_args['netid'] = str(net_id)
         else:
             task_args['netid'] = ''
 
-        # get keystone auth endpoint
-        task_args['request_url'] = CONST.__getattribute__('OS_AUTH_URL') or ''
-
         return task_args
 
     def _prepare_test_list(self, test_name):
+        """Build the list of test cases to be executed."""
         test_yaml_file_name = 'opnfv-{}.yaml'.format(test_name)
         scenario_file_name = os.path.join(self.RALLY_SCENARIO_DIR,
                                           test_yaml_file_name)
@@ -117,19 +155,20 @@ class RallyBase(testcase.TestCase):
                 raise Exception("The scenario '%s' does not exist."
                                 % scenario_file_name)
 
-        logger.debug('Scenario fetched from : {}'.format(scenario_file_name))
+        LOGGER.debug('Scenario fetched from : %s', scenario_file_name)
         test_file_name = os.path.join(self.TEMP_DIR, test_yaml_file_name)
 
         if not os.path.exists(self.TEMP_DIR):
             os.makedirs(self.TEMP_DIR)
 
-        self.apply_blacklist(scenario_file_name, test_file_name)
+        self._apply_blacklist(scenario_file_name, test_file_name)
         return test_file_name
 
     @staticmethod
     def get_task_id(cmd_raw):
         """
-        get task id from command rally result
+        Get task id from command rally result.
+
         :param cmd_raw:
         :return: task_id as string
         """
@@ -144,7 +183,8 @@ class RallyBase(testcase.TestCase):
     @staticmethod
     def task_succeed(json_raw):
         """
-        Parse JSON from rally JSON results
+        Parse JSON from rally JSON results.
+
         :param json_raw:
         :return: Bool
         """
@@ -159,28 +199,24 @@ class RallyBase(testcase.TestCase):
 
         return True
 
-    @staticmethod
-    def live_migration_supported():
-        config = iniparse.ConfigParser()
-        if (config.read(RallyBase.TEMPEST_CONF_FILE) and
-                config.has_section('compute-feature-enabled') and
-                config.has_option('compute-feature-enabled',
-                                  'live_migration')):
-            return config.getboolean('compute-feature-enabled',
-                                     'live_migration')
+    def _migration_supported(self):
+        """Determine if migration is supported."""
+        if self.compute_cnt > 1:
+            return True
 
         return False
 
     @staticmethod
     def get_cmd_output(proc):
+        """Get command stdout."""
         result = ""
-        while proc.poll() is None:
-            line = proc.stdout.readline()
+        for line in proc.stdout:
             result += line
         return result
 
     @staticmethod
     def excl_scenario():
+        """Exclude scenario."""
         black_tests = []
         try:
             with open(RallyBase.BLACKLIST_FILE, 'r') as black_list_file:
@@ -188,22 +224,44 @@ class RallyBase(testcase.TestCase):
 
             installer_type = CONST.__getattribute__('INSTALLER_TYPE')
             deploy_scenario = CONST.__getattribute__('DEPLOY_SCENARIO')
-            if (bool(installer_type) * bool(deploy_scenario)):
-                if 'scenario' in black_list_yaml.keys():
-                    for item in black_list_yaml['scenario']:
-                        scenarios = item['scenarios']
-                        installers = item['installers']
-                        if (deploy_scenario in scenarios and
-                                installer_type in installers):
-                            tests = item['tests']
-                            black_tests.extend(tests)
-        except Exception:
-            logger.debug("Scenario exclusion not applied.")
+            if (bool(installer_type) and bool(deploy_scenario) and
+                    'scenario' in black_list_yaml.keys()):
+                for item in black_list_yaml['scenario']:
+                    scenarios = item['scenarios']
+                    installers = item['installers']
+                    in_it = RallyBase.in_iterable_re
+                    if (in_it(deploy_scenario, scenarios) and
+                            in_it(installer_type, installers)):
+                        tests = item['tests']
+                        black_tests.extend(tests)
+        except Exception:  # pylint: disable=broad-except
+            LOGGER.debug("Scenario exclusion not applied.")
 
         return black_tests
 
     @staticmethod
-    def excl_func():
+    def in_iterable_re(needle, haystack):
+        """
+        Check if given needle is in the iterable haystack, using regex.
+
+        :param needle: string to be matched
+        :param haystack: iterable of strings (optionally regex patterns)
+        :return: True if needle is eqial to any of the elements in haystack,
+                 or if a nonempty regex pattern in haystack is found in needle.
+        """
+        # match without regex
+        if needle in haystack:
+            return True
+
+        for pattern in haystack:
+            # match if regex pattern is set and found in the needle
+            if pattern and re.search(pattern, needle) is not None:
+                return True
+
+        return False
+
+    def excl_func(self):
+        """Exclude functionalities."""
         black_tests = []
         func_list = []
 
@@ -211,8 +269,8 @@ class RallyBase(testcase.TestCase):
             with open(RallyBase.BLACKLIST_FILE, 'r') as black_list_file:
                 black_list_yaml = yaml.safe_load(black_list_file)
 
-            if not RallyBase.live_migration_supported():
-                func_list.append("no_live_migration")
+            if not self._migration_supported():
+                func_list.append("no_migration")
 
             if 'functionality' in black_list_yaml.keys():
                 for item in black_list_yaml['functionality']:
@@ -221,19 +279,22 @@ class RallyBase(testcase.TestCase):
                         if func in functions:
                             tests = item['tests']
                             black_tests.extend(tests)
-        except Exception:
-            logger.debug("Functionality exclusion not applied.")
+        except Exception:  # pylint: disable=broad-except
+            LOGGER.debug("Functionality exclusion not applied.")
 
         return black_tests
 
-    @staticmethod
-    def apply_blacklist(case_file_name, result_file_name):
-        logger.debug("Applying blacklist...")
+    def _apply_blacklist(self, case_file_name, result_file_name):
+        """Apply blacklist."""
+        LOGGER.debug("Applying blacklist...")
         cases_file = open(case_file_name, 'r')
         result_file = open(result_file_name, 'w')
 
-        black_tests = list(set(RallyBase.excl_func() +
-                           RallyBase.excl_scenario()))
+        black_tests = list(set(self.excl_func() +
+                               self.excl_scenario()))
+
+        if black_tests:
+            LOGGER.debug("Blacklisted tests: " + str(black_tests))
 
         include = True
         for cases_line in cases_file:
@@ -254,292 +315,298 @@ class RallyBase(testcase.TestCase):
 
     @staticmethod
     def file_is_empty(file_name):
+        """Determine is a file is empty."""
         try:
             if os.stat(file_name).st_size > 0:
                 return False
-        except:
+        except Exception:  # pylint: disable=broad-except
             pass
 
         return True
 
     def _run_task(self, test_name):
-        logger.info('Starting test scenario "{}" ...'.format(test_name))
+        """Run a task."""
+        LOGGER.info('Starting test scenario "%s" ...', test_name)
 
         task_file = os.path.join(self.RALLY_DIR, 'task.yaml')
         if not os.path.exists(task_file):
-            logger.error("Task file '%s' does not exist." % task_file)
-            raise Exception("Task file '%s' does not exist." % task_file)
+            LOGGER.error("Task file '%s' does not exist.", task_file)
+            raise Exception("Task file '%s' does not exist.", task_file)
 
         file_name = self._prepare_test_list(test_name)
         if self.file_is_empty(file_name):
-            logger.info('No tests for scenario "{}"'.format(test_name))
+            LOGGER.info('No tests for scenario "%s"', test_name)
             return
 
-        cmd_line = ("rally task start --abort-on-sla-failure "
-                    "--task {0} "
-                    "--task-args \"{1}\""
-                    .format(task_file, self._build_task_args(test_name)))
-        logger.debug('running command line: {}'.format(cmd_line))
+        cmd = (["rally", "task", "start", "--abort-on-sla-failure", "--task",
+                task_file, "--task-args",
+                str(self._build_task_args(test_name))])
+        LOGGER.debug('running command: %s', cmd)
 
-        p = subprocess.Popen(cmd_line, stdout=subprocess.PIPE,
-                             stderr=subprocess.STDOUT, shell=True)
-        output = self._get_output(p, test_name)
+        proc = subprocess.Popen(cmd, stdout=subprocess.PIPE,
+                                stderr=subprocess.STDOUT)
+        output = self.get_cmd_output(proc)
         task_id = self.get_task_id(output)
-        logger.debug('task_id : {}'.format(task_id))
+
+        LOGGER.debug('task_id : %s', task_id)
 
         if task_id is None:
-            logger.error('Failed to retrieve task_id, validating task...')
-            cmd_line = ("rally task validate "
-                        "--task {0} "
-                        "--task-args \"{1}\""
-                        .format(task_file, self._build_task_args(test_name)))
-            logger.debug('running command line: {}'.format(cmd_line))
-            p = subprocess.Popen(cmd_line, stdout=subprocess.PIPE,
-                                 stderr=subprocess.STDOUT, shell=True)
-            output = self.get_cmd_output(p)
-            logger.error("Task validation result:" + "\n" + output)
+            LOGGER.error('Failed to retrieve task_id, validating task...')
+            cmd = (["rally", "task", "validate", "--task", task_file,
+                    "--task-args", str(self._build_task_args(test_name))])
+            LOGGER.debug('running command: %s', cmd)
+            proc = subprocess.Popen(cmd, stdout=subprocess.PIPE,
+                                    stderr=subprocess.STDOUT)
+            output = self.get_cmd_output(proc)
+            LOGGER.error("Task validation result:" + "\n" + output)
             return
 
         # check for result directory and create it otherwise
         if not os.path.exists(self.RESULTS_DIR):
-            logger.debug('{} does not exist, we create it.'
-                         .format(self.RESULTS_DIR))
+            LOGGER.debug('%s does not exist, we create it.',
+                         self.RESULTS_DIR)
             os.makedirs(self.RESULTS_DIR)
 
-        # write html report file
-        report_html_name = 'opnfv-{}.html'.format(test_name)
-        report_html_dir = os.path.join(self.RESULTS_DIR, report_html_name)
-        cmd_line = "rally task report {} --out {}".format(task_id,
-                                                          report_html_dir)
-
-        logger.debug('running command line: {}'.format(cmd_line))
-        os.popen(cmd_line)
-
         # get and save rally operation JSON result
-        cmd_line = "rally task results %s" % task_id
-        logger.debug('running command line: {}'.format(cmd_line))
-        cmd = os.popen(cmd_line)
-        json_results = cmd.read()
+        cmd = (["rally", "task", "detailed", task_id])
+        LOGGER.debug('running command: %s', cmd)
+        proc = subprocess.Popen(cmd, stdout=subprocess.PIPE,
+                                stderr=subprocess.STDOUT)
+        json_detailed = self.get_cmd_output(proc)
+        LOGGER.info('%s', json_detailed)
+
+        cmd = (["rally", "task", "results", task_id])
+        LOGGER.debug('running command: %s', cmd)
+        proc = subprocess.Popen(cmd, stdout=subprocess.PIPE,
+                                stderr=subprocess.STDOUT)
+        json_results = self.get_cmd_output(proc)
+        self._append_summary(json_results, test_name)
         report_json_name = 'opnfv-{}.json'.format(test_name)
         report_json_dir = os.path.join(self.RESULTS_DIR, report_json_name)
-        with open(report_json_dir, 'w') as f:
-            logger.debug('saving json file')
-            f.write(json_results)
+        with open(report_json_dir, 'w') as r_file:
+            LOGGER.debug('saving json file')
+            r_file.write(json_results)
+
+        # write html report file
+        report_html_name = 'opnfv-{}.html'.format(test_name)
+        report_html_dir = os.path.join(self.RESULTS_DIR, report_html_name)
+        cmd = (["rally", "task", "report", task_id, "--out", report_html_dir])
+        LOGGER.debug('running command: %s', cmd)
+        subprocess.Popen(cmd, stdout=subprocess.PIPE,
+                         stderr=subprocess.STDOUT)
 
-        """ parse JSON operation result """
+        # parse JSON operation result
         if self.task_succeed(json_results):
-            logger.info('Test scenario: "{}" OK.'.format(test_name) + "\n")
+            LOGGER.info('Test scenario: "{}" OK.'.format(test_name) + "\n")
         else:
-            logger.info('Test scenario: "{}" Failed.'.format(test_name) + "\n")
+            LOGGER.info('Test scenario: "{}" Failed.'.format(test_name) + "\n")
 
-    def _get_output(self, proc, test_name):
-        result = ""
+    def _append_summary(self, json_raw, test_name):
+        """Update statistics summary info."""
         nb_tests = 0
+        nb_success = 0
         overall_duration = 0.0
-        success = 0.0
-        nb_totals = 0
-
-        while proc.poll() is None:
-            line = proc.stdout.readline()
-            if ("Load duration" in line or
-                    "started" in line or
-                    "finished" in line or
-                    " Preparing" in line or
-                    "+-" in line or
-                    "|" in line):
-                result += line
-            elif "test scenario" in line:
-                result += "\n" + line
-            elif "Full duration" in line:
-                result += line + "\n\n"
-
-            # parse output for summary report
-            if ("| " in line and
-                    "| action" not in line and
-                    "| Starting" not in line and
-                    "| Completed" not in line and
-                    "| ITER" not in line and
-                    "|   " not in line and
-                    "| total" not in line):
-                nb_tests += 1
-            elif "| total" in line:
-                percentage = ((line.split('|')[8]).strip(' ')).strip('%')
-                try:
-                    success += float(percentage)
-                except ValueError:
-                    logger.info('Percentage error: %s, %s' %
-                                (percentage, line))
-                nb_totals += 1
-            elif "Full duration" in line:
-                duration = line.split(': ')[1]
-                try:
-                    overall_duration += float(duration)
-                except ValueError:
-                    logger.info('Duration error: %s, %s' % (duration, line))
-
-        overall_duration = "{:10.2f}".format(overall_duration)
-        if nb_totals == 0:
-            success_avg = 0
-        else:
-            success_avg = "{:0.2f}".format(success / nb_totals)
+
+        rally_report = json.loads(json_raw)
+        for report in rally_report:
+            if report.get('full_duration'):
+                overall_duration += report.get('full_duration')
+
+            if report.get('result'):
+                for result in report.get('result'):
+                    nb_tests += 1
+                    if not result.get('error'):
+                        nb_success += 1
 
         scenario_summary = {'test_name': test_name,
                             'overall_duration': overall_duration,
                             'nb_tests': nb_tests,
-                            'success': success_avg}
+                            'nb_success': nb_success}
         self.summary.append(scenario_summary)
 
-        logger.debug("\n" + result)
-
-        return result
-
     def _prepare_env(self):
-        logger.debug('Validating the test name...')
-        if not (self.test_name in self.TESTS):
+        """Create resources needed by test scenarios."""
+        LOGGER.debug('Validating the test name...')
+        if self.test_name not in self.TESTS:
             raise Exception("Test name '%s' is invalid" % self.test_name)
 
-        volume_types = os_utils.list_volume_types(self.cinder_client,
-                                                  private=False)
-        if volume_types:
-            logger.debug("Using existing volume type(s)...")
-        else:
-            logger.debug('Creating volume type...')
-            self.volume_type = os_utils.create_volume_type(
-                self.cinder_client, self.CINDER_VOLUME_TYPE_NAME)
-            if self.volume_type is None:
-                raise Exception("Failed to create volume type '%s'" %
-                                self.CINDER_VOLUME_TYPE_NAME)
-            logger.debug("Volume type '%s' is created succesfully." %
-                         self.CINDER_VOLUME_TYPE_NAME)
-
-        logger.debug('Getting or creating image...')
-        self.image_exists, self.image_id = os_utils.get_or_create_image(
-            self.GLANCE_IMAGE_NAME,
-            self.GLANCE_IMAGE_PATH,
-            self.GLANCE_IMAGE_FORMAT)
-        if self.image_id is None:
-            raise Exception("Failed to get or create image '%s'" %
-                            self.GLANCE_IMAGE_NAME)
-
-        logger.debug("Creating network '%s'..." % self.RALLY_PRIVATE_NET_NAME)
-        self.network_dict = os_utils.create_shared_network_full(
-            self.RALLY_PRIVATE_NET_NAME,
-            self.RALLY_PRIVATE_SUBNET_NAME,
-            self.RALLY_ROUTER_NAME,
-            self.RALLY_PRIVATE_SUBNET_CIDR)
-        if self.network_dict is None:
-            raise Exception("Failed to create shared network '%s'" %
-                            self.RALLY_PRIVATE_NET_NAME)
+        network_name = self.RALLY_PRIVATE_NET_NAME + self.guid
+        subnet_name = self.RALLY_PRIVATE_SUBNET_NAME + self.guid
+        router_name = self.RALLY_ROUTER_NAME + self.guid
+        self.image_name = self.GLANCE_IMAGE_NAME + self.guid
+        self.flavor_name = self.FLAVOR_NAME + self.guid
+        self.flavor_alt_name = self.FLAVOR_ALT_NAME + self.guid
+        self.ext_net_name = snaps_utils.get_ext_net_name(self.os_creds)
+        self.compute_cnt = snaps_utils.get_active_compute_cnt(self.os_creds)
+
+        LOGGER.debug("Creating image '%s'...", self.image_name)
+        image_creator = deploy_utils.create_image(
+            self.os_creds, ImageConfig(
+                name=self.image_name,
+                image_file=self.GLANCE_IMAGE_PATH,
+                img_format=self.GLANCE_IMAGE_FORMAT,
+                image_user=self.GLANCE_IMAGE_USERNAME,
+                public=True,
+                extra_properties=self.GLANCE_IMAGE_EXTRA_PROPERTIES))
+        if image_creator is None:
+            raise Exception("Failed to create image")
+        self.creators.append(image_creator)
+
+        LOGGER.debug("Creating network '%s'...", network_name)
+
+        rally_network_type = None
+        rally_physical_network = None
+        rally_segmentation_id = None
+
+        if hasattr(CONST, 'rally_network_type'):
+            rally_network_type = CONST.__getattribute__(
+                'rally_network_type')
+        if hasattr(CONST, 'rally_physical_network'):
+            rally_physical_network = CONST.__getattribute__(
+                'rally_physical_network')
+        if hasattr(CONST, 'rally_segmentation_id'):
+            rally_segmentation_id = CONST.__getattribute__(
+                'rally_segmentation_id')
+
+        network_creator = deploy_utils.create_network(
+            self.os_creds, NetworkConfig(
+                name=network_name,
+                shared=True,
+                network_type=rally_network_type,
+                physical_network=rally_physical_network,
+                segmentation_id=rally_segmentation_id,
+                subnet_settings=[SubnetConfig(
+                    name=subnet_name,
+                    cidr=self.RALLY_PRIVATE_SUBNET_CIDR)]))
+        if network_creator is None:
+            raise Exception("Failed to create private network")
+        self.priv_net_id = network_creator.get_network().id
+        self.creators.append(network_creator)
+
+        LOGGER.debug("Creating router '%s'...", router_name)
+        router_creator = deploy_utils.create_router(
+            self.os_creds, RouterConfig(
+                name=router_name,
+                external_gateway=self.ext_net_name,
+                internal_subnets=[subnet_name]))
+        if router_creator is None:
+            raise Exception("Failed to create router")
+        self.creators.append(router_creator)
+
+        LOGGER.debug("Creating flavor '%s'...", self.flavor_name)
+        flavor_creator = OpenStackFlavor(
+            self.os_creds, FlavorConfig(
+                name=self.flavor_name, ram=self.FLAVOR_RAM, disk=1, vcpus=1,
+                metadata=self.FLAVOR_EXTRA_SPECS))
+        if flavor_creator is None or flavor_creator.create() is None:
+            raise Exception("Failed to create flavor")
+        self.creators.append(flavor_creator)
+
+        LOGGER.debug("Creating flavor '%s'...", self.flavor_alt_name)
+        flavor_alt_creator = OpenStackFlavor(
+            self.os_creds, FlavorConfig(
+                name=self.flavor_alt_name, ram=self.FLAVOR_RAM_ALT, disk=1,
+                vcpus=1, metadata=self.FLAVOR_EXTRA_SPECS))
+        if flavor_alt_creator is None or flavor_alt_creator.create() is None:
+            raise Exception("Failed to create flavor")
+        self.creators.append(flavor_alt_creator)
 
     def _run_tests(self):
+        """Execute tests."""
         if self.test_name == 'all':
             for test in self.TESTS:
-                if (test == 'all' or test == 'vm'):
+                if test == 'all' or test == 'vm':
                     continue
                 self._run_task(test)
         else:
             self._run_task(self.test_name)
 
     def _generate_report(self):
-        report = (
-            "\n"
-            "                                                              "
-            "\n"
-            "                     Rally Summary Report\n"
-            "\n"
-            "+===================+============+===============+===========+"
-            "\n"
-            "| Module            | Duration   | nb. Test Run  | Success   |"
-            "\n"
-            "+===================+============+===============+===========+"
-            "\n")
+        """Generate test execution summary report."""
+        total_duration = 0.0
+        total_nb_tests = 0
+        total_nb_success = 0
         payload = []
 
+        res_table = prettytable.PrettyTable(
+            padding_width=2,
+            field_names=['Module', 'Duration', 'nb. Test Run', 'Success'])
+        res_table.align['Module'] = "l"
+        res_table.align['Duration'] = "r"
+        res_table.align['Success'] = "r"
+
         # for each scenario we draw a row for the table
-        total_duration = 0.0
-        total_nb_tests = 0
-        total_success = 0.0
-        for s in self.summary:
-            name = "{0:<17}".format(s['test_name'])
-            duration = float(s['overall_duration'])
-            total_duration += duration
-            duration = time.strftime("%M:%S", time.gmtime(duration))
-            duration = "{0:<10}".format(duration)
-            nb_tests = "{0:<13}".format(s['nb_tests'])
-            total_nb_tests += int(s['nb_tests'])
-            success = "{0:<10}".format(str(s['success']) + '%')
-            total_success += float(s['success'])
-            report += ("" +
-                       "| " + name + " | " + duration + " | " +
-                       nb_tests + " | " + success + "|\n" +
-                       "+-------------------+------------"
-                       "+---------------+-----------+\n")
-            payload.append({'module': name,
-                            'details': {'duration': s['overall_duration'],
-                                        'nb tests': s['nb_tests'],
-                                        'success': s['success']}})
+        for item in self.summary:
+            total_duration += item['overall_duration']
+            total_nb_tests += item['nb_tests']
+            total_nb_success += item['nb_success']
+            try:
+                success_avg = 100 * item['nb_success'] / item['nb_tests']
+            except ZeroDivisionError:
+                success_avg = 0
+            success_str = str("{:0.2f}".format(success_avg)) + '%'
+            duration_str = time.strftime("%M:%S",
+                                         time.gmtime(item['overall_duration']))
+            res_table.add_row([item['test_name'], duration_str,
+                               item['nb_tests'], success_str])
+            payload.append({'module': item['test_name'],
+                            'details': {'duration': item['overall_duration'],
+                                        'nb tests': item['nb_tests'],
+                                        'success': success_str}})
 
         total_duration_str = time.strftime("%H:%M:%S",
                                            time.gmtime(total_duration))
-        total_duration_str2 = "{0:<10}".format(total_duration_str)
-        total_nb_tests_str = "{0:<13}".format(total_nb_tests)
-
         try:
-            self.result = total_success / len(self.summary)
+            self.result = 100 * total_nb_success / total_nb_tests
         except ZeroDivisionError:
             self.result = 100
-
         success_rate = "{:0.2f}".format(self.result)
-        success_rate_str = "{0:<10}".format(str(success_rate) + '%')
-        report += ("+===================+============"
-                   "+===============+===========+")
-        report += "\n"
-        report += ("| TOTAL:            | " + total_duration_str2 + " | " +
-                   total_nb_tests_str + " | " + success_rate_str + "|\n")
-        report += ("+===================+============"
-                   "+===============+===========+")
-        report += "\n"
-
-        logger.info("\n" + report)
+        success_rate_str = str(success_rate) + '%'
+        res_table.add_row(["", "", "", ""])
+        res_table.add_row(["TOTAL:", total_duration_str, total_nb_tests,
+                           success_rate_str])
+
+        LOGGER.info("Rally Summary Report:\n\n%s\n", res_table.get_string())
+        LOGGER.info("Rally '%s' success_rate is %s%%",
+                    self.case_name, success_rate)
         payload.append({'summary': {'duration': total_duration,
                                     'nb tests': total_nb_tests,
                                     'nb success': success_rate}})
-
         self.details = payload
 
-        logger.info("Rally '%s' success_rate is %s%%"
-                    % (self.case_name, success_rate))
-
     def _clean_up(self):
-        if self.volume_type:
-            logger.debug("Deleting volume type '%s'..." % self.volume_type)
-            os_utils.delete_volume_type(self.cinder_client, self.volume_type)
-
-        if not self.image_exists:
-            logger.debug("Deleting image '%s' with ID '%s'..."
-                         % (self.GLANCE_IMAGE_NAME, self.image_id))
-            if not os_utils.delete_glance_image(self.nova_client,
-                                                self.image_id):
-                logger.error("Error deleting the glance image")
-
-    def run(self):
+        """Cleanup all OpenStack objects. Should be called on completion."""
+        for creator in reversed(self.creators):
+            try:
+                creator.clean()
+            except Exception as exc:  # pylint: disable=broad-except
+                LOGGER.error('Unexpected error cleaning - %s', exc)
+
+    @energy.enable_recording
+    def run(self, **kwargs):
+        """Run testcase."""
         self.start_time = time.time()
         try:
+            conf_utils.create_rally_deployment()
             self._prepare_env()
             self._run_tests()
             self._generate_report()
-            self._clean_up()
             res = testcase.TestCase.EX_OK
-        except Exception as e:
-            logger.error('Error with run: %s' % e)
+        except Exception as exc:   # pylint: disable=broad-except
+            LOGGER.error('Error with run: %s', exc)
             res = testcase.TestCase.EX_RUN_ERROR
+        finally:
+            self._clean_up()
 
         self.stop_time = time.time()
         return res
 
 
 class RallySanity(RallyBase):
+    """Rally sanity testcase implementation."""
+
     def __init__(self, **kwargs):
+        """Initialize RallySanity object."""
         if "case_name" not in kwargs:
             kwargs["case_name"] = "rally_sanity"
         super(RallySanity, self).__init__(**kwargs)
@@ -550,7 +617,10 @@ class RallySanity(RallyBase):
 
 
 class RallyFull(RallyBase):
+    """Rally full testcase implementation."""
+
     def __init__(self, **kwargs):
+        """Initialize RallyFull object."""
         if "case_name" not in kwargs:
             kwargs["case_name"] = "rally_full"
         super(RallyFull, self).__init__(**kwargs)