import time
import uuid
-import iniparse
import pkg_resources
+import prettytable
+from snaps.config.flavor import FlavorConfig
+from snaps.config.image import ImageConfig
+from snaps.config.network import NetworkConfig, SubnetConfig
+from snaps.config.router import RouterConfig
+from snaps.openstack.create_flavor import OpenStackFlavor
+from snaps.openstack.utils import deploy_utils
+from xtesting.core import testcase
+from xtesting.energy import energy
import yaml
-from functest.core import testcase
-from functest.energy import energy
from functest.opnfv_tests.openstack.snaps import snaps_utils
-from functest.utils.constants import CONST
-
-from snaps.openstack.create_image import ImageSettings
-from snaps.openstack.create_network import NetworkSettings, SubnetSettings
-from snaps.openstack.create_router import RouterSettings
-from snaps.openstack.tests import openstack_tests
-from snaps.openstack.utils import deploy_utils
+from functest.opnfv_tests.openstack.tempest import conf_utils
+from functest.utils import config
+from functest.utils import env
LOGGER = logging.getLogger(__name__)
class RallyBase(testcase.TestCase):
"""Base class form Rally testcases implementation."""
- TESTS = ['authenticate', 'glance', 'ceilometer', 'cinder', 'heat',
+ # pylint: disable=too-many-instance-attributes
+ TESTS = ['authenticate', 'glance', 'cinder', 'heat',
'keystone', 'neutron', 'nova', 'quotas', 'vm', 'all']
- GLANCE_IMAGE_NAME = CONST.__getattribute__('openstack_image_name')
- GLANCE_IMAGE_FILENAME = CONST.__getattribute__('openstack_image_file_name')
- GLANCE_IMAGE_PATH = os.path.join(
- CONST.__getattribute__('dir_functest_images'),
- GLANCE_IMAGE_FILENAME)
- GLANCE_IMAGE_FORMAT = CONST.__getattribute__('openstack_image_disk_format')
- GLANCE_IMAGE_USERNAME = CONST.__getattribute__('openstack_image_username')
- GLANCE_IMAGE_EXTRA_PROPERTIES = {}
- if hasattr(CONST, 'openstack_extra_properties'):
- GLANCE_IMAGE_EXTRA_PROPERTIES = CONST.__getattribute__(
- 'openstack_extra_properties')
- FLAVOR_NAME = "m1.tiny"
+ GLANCE_IMAGE_NAME = getattr(config.CONF, 'openstack_image_name')
+ GLANCE_IMAGE_FILENAME = getattr(config.CONF, 'openstack_image_file_name')
+ GLANCE_IMAGE_PATH = os.path.join(getattr(
+ config.CONF, 'dir_functest_images'), GLANCE_IMAGE_FILENAME)
+ GLANCE_IMAGE_FORMAT = getattr(config.CONF, 'openstack_image_disk_format')
+ GLANCE_IMAGE_USERNAME = getattr(config.CONF, 'openstack_image_username')
+ GLANCE_IMAGE_EXTRA_PROPERTIES = getattr(
+ config.CONF, 'image_properties', {})
+ FLAVOR_NAME = getattr(config.CONF, 'rally_flavor_name')
+ FLAVOR_ALT_NAME = getattr(config.CONF, 'rally_flavor_alt_name')
+ FLAVOR_RAM = 512
+ FLAVOR_RAM_ALT = 1024
+ FLAVOR_EXTRA_SPECS = getattr(config.CONF, 'flavor_extra_specs', None)
+ if FLAVOR_EXTRA_SPECS:
+ FLAVOR_RAM = 1024
+ FLAVOR_RAM_ALT = 2048
RALLY_DIR = pkg_resources.resource_filename(
'functest', 'opnfv_tests/openstack/rally')
TENANTS_AMOUNT = 3
ITERATIONS_AMOUNT = 10
CONCURRENCY = 4
- RESULTS_DIR = os.path.join(CONST.__getattribute__('dir_results'), 'rally')
- TEMPEST_CONF_FILE = os.path.join(CONST.__getattribute__('dir_results'),
- 'tempest/tempest.conf')
+ RESULTS_DIR = os.path.join(getattr(config.CONF, 'dir_results'), 'rally')
BLACKLIST_FILE = os.path.join(RALLY_DIR, "blacklist.txt")
TEMP_DIR = os.path.join(RALLY_DIR, "var")
- RALLY_PRIVATE_NET_NAME = CONST.__getattribute__('rally_network_name')
- RALLY_PRIVATE_SUBNET_NAME = CONST.__getattribute__('rally_subnet_name')
- RALLY_PRIVATE_SUBNET_CIDR = CONST.__getattribute__('rally_subnet_cidr')
- RALLY_ROUTER_NAME = CONST.__getattribute__('rally_router_name')
+ RALLY_PRIVATE_NET_NAME = getattr(config.CONF, 'rally_network_name')
+ RALLY_PRIVATE_SUBNET_NAME = getattr(config.CONF, 'rally_subnet_name')
+ RALLY_PRIVATE_SUBNET_CIDR = getattr(config.CONF, 'rally_subnet_cidr')
+ RALLY_ROUTER_NAME = getattr(config.CONF, 'rally_router_name')
def __init__(self, **kwargs):
"""Initialize RallyBase object."""
super(RallyBase, self).__init__(**kwargs)
- if 'os_creds' in kwargs:
- self.os_creds = kwargs['os_creds']
- else:
- creds_override = None
- if hasattr(CONST, 'snaps_os_creds_override'):
- creds_override = CONST.__getattribute__(
- 'snaps_os_creds_override')
-
- self.os_creds = openstack_tests.get_credentials(
- os_env_file=CONST.__getattribute__('openstack_creds'),
- overrides=creds_override)
-
- self.guid = ''
- if CONST.__getattribute__('rally_unique_names'):
- self.guid = '-' + str(uuid.uuid4())
-
+ self.os_creds = kwargs.get('os_creds') or snaps_utils.get_credentials()
+ self.guid = '-' + str(uuid.uuid4())
self.creators = []
self.mode = ''
self.summary = []
self.scenario_dir = ''
+ self.image_name = None
self.ext_net_name = None
self.priv_net_id = None
+ self.flavor_name = None
+ self.flavor_alt_name = None
self.smoke = None
self.test_name = None
self.start_time = None
self.result = None
self.details = None
+ self.compute_cnt = 0
def _build_task_args(self, test_file_name):
+ """Build arguments for the Rally task."""
task_args = {'service_list': [test_file_name]}
- task_args['image_name'] = self.GLANCE_IMAGE_NAME
- task_args['flavor_name'] = self.FLAVOR_NAME
- task_args['glance_image_location'] = self.GLANCE_IMAGE_PATH
- task_args['glance_image_format'] = self.GLANCE_IMAGE_FORMAT
- task_args['tmpl_dir'] = self.TEMPLATE_DIR
- task_args['sup_dir'] = self.SUPPORT_DIR
+ task_args['image_name'] = str(self.image_name)
+ task_args['flavor_name'] = str(self.flavor_name)
+ task_args['flavor_alt_name'] = str(self.flavor_alt_name)
+ task_args['glance_image_location'] = str(self.GLANCE_IMAGE_PATH)
+ task_args['glance_image_format'] = str(self.GLANCE_IMAGE_FORMAT)
+ task_args['tmpl_dir'] = str(self.TEMPLATE_DIR)
+ task_args['sup_dir'] = str(self.SUPPORT_DIR)
task_args['users_amount'] = self.USERS_AMOUNT
task_args['tenants_amount'] = self.TENANTS_AMOUNT
task_args['use_existing_users'] = False
return task_args
def _prepare_test_list(self, test_name):
+ """Build the list of test cases to be executed."""
test_yaml_file_name = 'opnfv-{}.yaml'.format(test_name)
scenario_file_name = os.path.join(self.RALLY_SCENARIO_DIR,
test_yaml_file_name)
if not os.path.exists(self.TEMP_DIR):
os.makedirs(self.TEMP_DIR)
- self.apply_blacklist(scenario_file_name, test_file_name)
+ self._apply_blacklist(scenario_file_name, test_file_name)
return test_file_name
@staticmethod
return False
for result in report.get('result'):
- if result is None or len(result.get('error')) > 0:
+ if result is None or result.get('error'):
return False
return True
- @staticmethod
- def live_migration_supported():
- """Determine if live migration is supported."""
- config = iniparse.ConfigParser()
- if (config.read(RallyBase.TEMPEST_CONF_FILE) and
- config.has_section('compute-feature-enabled') and
- config.has_option('compute-feature-enabled',
- 'live_migration')):
- return config.getboolean('compute-feature-enabled',
- 'live_migration')
+ def _migration_supported(self):
+ """Determine if migration is supported."""
+ if self.compute_cnt > 1:
+ return True
return False
def get_cmd_output(proc):
"""Get command stdout."""
result = ""
- while proc.poll() is None:
- line = proc.stdout.readline()
+ for line in proc.stdout:
result += line
return result
with open(RallyBase.BLACKLIST_FILE, 'r') as black_list_file:
black_list_yaml = yaml.safe_load(black_list_file)
- installer_type = CONST.__getattribute__('INSTALLER_TYPE')
- deploy_scenario = CONST.__getattribute__('DEPLOY_SCENARIO')
+ installer_type = env.get('INSTALLER_TYPE')
+ deploy_scenario = env.get('DEPLOY_SCENARIO')
if (bool(installer_type) and bool(deploy_scenario) and
'scenario' in black_list_yaml.keys()):
for item in black_list_yaml['scenario']:
in_it(installer_type, installers)):
tests = item['tests']
black_tests.extend(tests)
- except Exception:
+ except Exception: # pylint: disable=broad-except
LOGGER.debug("Scenario exclusion not applied.")
return black_tests
# match if regex pattern is set and found in the needle
if pattern and re.search(pattern, needle) is not None:
return True
- else:
- return False
- @staticmethod
- def excl_func():
+ return False
+
+ def excl_func(self):
"""Exclude functionalities."""
black_tests = []
func_list = []
with open(RallyBase.BLACKLIST_FILE, 'r') as black_list_file:
black_list_yaml = yaml.safe_load(black_list_file)
- if not RallyBase.live_migration_supported():
- func_list.append("no_live_migration")
+ if not self._migration_supported():
+ func_list.append("no_migration")
if 'functionality' in black_list_yaml.keys():
for item in black_list_yaml['functionality']:
return black_tests
- @staticmethod
- def apply_blacklist(case_file_name, result_file_name):
+ def _apply_blacklist(self, case_file_name, result_file_name):
"""Apply blacklist."""
LOGGER.debug("Applying blacklist...")
cases_file = open(case_file_name, 'r')
result_file = open(result_file_name, 'w')
- black_tests = list(set(RallyBase.excl_func() +
- RallyBase.excl_scenario()))
+ black_tests = list(set(self.excl_func() +
+ self.excl_scenario()))
if black_tests:
LOGGER.debug("Blacklisted tests: " + str(black_tests))
LOGGER.info('No tests for scenario "%s"', test_name)
return
- cmd_line = ("rally task start --abort-on-sla-failure "
- "--task {0} "
- "--task-args \"{1}\""
- .format(task_file, self._build_task_args(test_name)))
- LOGGER.debug('running command line: %s', cmd_line)
+ cmd = (["rally", "task", "start", "--abort-on-sla-failure", "--task",
+ task_file, "--task-args",
+ str(self._build_task_args(test_name))])
+ LOGGER.debug('running command: %s', cmd)
- proc = subprocess.Popen(cmd_line, stdout=subprocess.PIPE,
- stderr=subprocess.STDOUT, shell=True)
- output = self._get_output(proc, test_name)
+ proc = subprocess.Popen(cmd, stdout=subprocess.PIPE,
+ stderr=subprocess.STDOUT)
+ output = self.get_cmd_output(proc)
task_id = self.get_task_id(output)
+
LOGGER.debug('task_id : %s', task_id)
if task_id is None:
LOGGER.error('Failed to retrieve task_id, validating task...')
- cmd_line = ("rally task validate "
- "--task {0} "
- "--task-args \"{1}\""
- .format(task_file, self._build_task_args(test_name)))
- LOGGER.debug('running command line: %s', cmd_line)
- proc = subprocess.Popen(cmd_line, stdout=subprocess.PIPE,
- stderr=subprocess.STDOUT, shell=True)
+ cmd = (["rally", "task", "validate", "--task", task_file,
+ "--task-args", str(self._build_task_args(test_name))])
+ LOGGER.debug('running command: %s', cmd)
+ proc = subprocess.Popen(cmd, stdout=subprocess.PIPE,
+ stderr=subprocess.STDOUT)
output = self.get_cmd_output(proc)
LOGGER.error("Task validation result:" + "\n" + output)
return
self.RESULTS_DIR)
os.makedirs(self.RESULTS_DIR)
- # write html report file
- report_html_name = 'opnfv-{}.html'.format(test_name)
- report_html_dir = os.path.join(self.RESULTS_DIR, report_html_name)
- cmd_line = "rally task report {} --out {}".format(task_id,
- report_html_dir)
-
- LOGGER.debug('running command line: %s', cmd_line)
- os.popen(cmd_line)
-
# get and save rally operation JSON result
- cmd_line = "rally task results %s" % task_id
- LOGGER.debug('running command line: %s', cmd_line)
- cmd = os.popen(cmd_line)
- json_results = cmd.read()
+ cmd = (["rally", "task", "detailed", task_id])
+ LOGGER.debug('running command: %s', cmd)
+ proc = subprocess.Popen(cmd, stdout=subprocess.PIPE,
+ stderr=subprocess.STDOUT)
+ json_detailed = self.get_cmd_output(proc)
+ LOGGER.info('%s', json_detailed)
+
+ cmd = (["rally", "task", "results", task_id])
+ LOGGER.debug('running command: %s', cmd)
+ proc = subprocess.Popen(cmd, stdout=subprocess.PIPE,
+ stderr=subprocess.STDOUT)
+ json_results = self.get_cmd_output(proc)
+ self._append_summary(json_results, test_name)
report_json_name = 'opnfv-{}.json'.format(test_name)
report_json_dir = os.path.join(self.RESULTS_DIR, report_json_name)
with open(report_json_dir, 'w') as r_file:
LOGGER.debug('saving json file')
r_file.write(json_results)
+ # write html report file
+ report_html_name = 'opnfv-{}.html'.format(test_name)
+ report_html_dir = os.path.join(self.RESULTS_DIR, report_html_name)
+ cmd = (["rally", "task", "report", task_id, "--out", report_html_dir])
+ LOGGER.debug('running command: %s', cmd)
+ subprocess.Popen(cmd, stdout=subprocess.PIPE,
+ stderr=subprocess.STDOUT)
+
# parse JSON operation result
if self.task_succeed(json_results):
LOGGER.info('Test scenario: "{}" OK.'.format(test_name) + "\n")
else:
LOGGER.info('Test scenario: "{}" Failed.'.format(test_name) + "\n")
- def _get_output(self, proc, test_name):
- result = ""
+ def _append_summary(self, json_raw, test_name):
+ """Update statistics summary info."""
nb_tests = 0
+ nb_success = 0
overall_duration = 0.0
- success = 0.0
- nb_totals = 0
-
- while proc.poll() is None:
- line = proc.stdout.readline()
- if ("Load duration" in line or
- "started" in line or
- "finished" in line or
- " Preparing" in line or
- "+-" in line or
- "|" in line):
- result += line
- elif "test scenario" in line:
- result += "\n" + line
- elif "Full duration" in line:
- result += line + "\n\n"
-
- # parse output for summary report
- if ("| " in line and
- "| action" not in line and
- "| Starting" not in line and
- "| Completed" not in line and
- "| ITER" not in line and
- "| " not in line and
- "| total" not in line):
- nb_tests += 1
- elif "| total" in line:
- percentage = ((line.split('|')[8]).strip(' ')).strip('%')
- try:
- success += float(percentage)
- except ValueError:
- LOGGER.info('Percentage error: %s, %s',
- percentage, line)
- nb_totals += 1
- elif "Full duration" in line:
- duration = line.split(': ')[1]
- try:
- overall_duration += float(duration)
- except ValueError:
- LOGGER.info('Duration error: %s, %s', duration, line)
-
- overall_duration = "{:10.2f}".format(overall_duration)
- if nb_totals == 0:
- success_avg = 0
- else:
- success_avg = "{:0.2f}".format(success / nb_totals)
+
+ rally_report = json.loads(json_raw)
+ for report in rally_report:
+ if report.get('full_duration'):
+ overall_duration += report.get('full_duration')
+
+ if report.get('result'):
+ for result in report.get('result'):
+ nb_tests += 1
+ if not result.get('error'):
+ nb_success += 1
scenario_summary = {'test_name': test_name,
'overall_duration': overall_duration,
'nb_tests': nb_tests,
- 'success': success_avg}
+ 'nb_success': nb_success}
self.summary.append(scenario_summary)
- LOGGER.debug("\n" + result)
-
- return result
-
def _prepare_env(self):
+ """Create resources needed by test scenarios."""
LOGGER.debug('Validating the test name...')
if self.test_name not in self.TESTS:
raise Exception("Test name '%s' is invalid" % self.test_name)
- image_name = self.GLANCE_IMAGE_NAME + self.guid
network_name = self.RALLY_PRIVATE_NET_NAME + self.guid
subnet_name = self.RALLY_PRIVATE_SUBNET_NAME + self.guid
router_name = self.RALLY_ROUTER_NAME + self.guid
+ self.image_name = self.GLANCE_IMAGE_NAME + self.guid
+ self.flavor_name = self.FLAVOR_NAME + self.guid
+ self.flavor_alt_name = self.FLAVOR_ALT_NAME + self.guid
self.ext_net_name = snaps_utils.get_ext_net_name(self.os_creds)
+ self.compute_cnt = snaps_utils.get_active_compute_cnt(self.os_creds)
- LOGGER.debug('Getting or creating image...')
+ LOGGER.debug("Creating image '%s'...", self.image_name)
image_creator = deploy_utils.create_image(
- self.os_creds, ImageSettings(
- name=image_name,
+ self.os_creds, ImageConfig(
+ name=self.image_name,
image_file=self.GLANCE_IMAGE_PATH,
img_format=self.GLANCE_IMAGE_FORMAT,
image_user=self.GLANCE_IMAGE_USERNAME,
public=True,
extra_properties=self.GLANCE_IMAGE_EXTRA_PROPERTIES))
if image_creator is None:
- raise Exception("Failed to get or create image '%s'" %
- image_name)
+ raise Exception("Failed to create image")
self.creators.append(image_creator)
LOGGER.debug("Creating network '%s'...", network_name)
+
+ rally_network_type = getattr(config.CONF, 'rally_network_type', None)
+ rally_physical_network = getattr(
+ config.CONF, 'rally_physical_network', None)
+ rally_segmentation_id = getattr(
+ config.CONF, 'rally_segmentation_id', None)
+
network_creator = deploy_utils.create_network(
- self.os_creds, NetworkSettings(
+ self.os_creds, NetworkConfig(
name=network_name,
shared=True,
- subnet_settings=[SubnetSettings(
+ network_type=rally_network_type,
+ physical_network=rally_physical_network,
+ segmentation_id=rally_segmentation_id,
+ subnet_settings=[SubnetConfig(
name=subnet_name,
- cidr=self.RALLY_PRIVATE_SUBNET_CIDR)
- ]))
+ cidr=self.RALLY_PRIVATE_SUBNET_CIDR,
+ dns_nameservers=[env.get('NAMESERVER')])]))
if network_creator is None:
raise Exception("Failed to create private network")
self.priv_net_id = network_creator.get_network().id
LOGGER.debug("Creating router '%s'...", router_name)
router_creator = deploy_utils.create_router(
- self.os_creds, RouterSettings(
+ self.os_creds, RouterConfig(
name=router_name,
external_gateway=self.ext_net_name,
internal_subnets=[subnet_name]))
raise Exception("Failed to create router")
self.creators.append(router_creator)
+ LOGGER.debug("Creating flavor '%s'...", self.flavor_name)
+ flavor_creator = OpenStackFlavor(
+ self.os_creds, FlavorConfig(
+ name=self.flavor_name, ram=self.FLAVOR_RAM, disk=1, vcpus=1,
+ metadata=self.FLAVOR_EXTRA_SPECS))
+ if flavor_creator is None or flavor_creator.create() is None:
+ raise Exception("Failed to create flavor")
+ self.creators.append(flavor_creator)
+
+ LOGGER.debug("Creating flavor '%s'...", self.flavor_alt_name)
+ flavor_alt_creator = OpenStackFlavor(
+ self.os_creds, FlavorConfig(
+ name=self.flavor_alt_name, ram=self.FLAVOR_RAM_ALT, disk=1,
+ vcpus=1, metadata=self.FLAVOR_EXTRA_SPECS))
+ if flavor_alt_creator is None or flavor_alt_creator.create() is None:
+ raise Exception("Failed to create flavor")
+ self.creators.append(flavor_alt_creator)
+
def _run_tests(self):
+ """Execute tests."""
if self.test_name == 'all':
for test in self.TESTS:
if test == 'all' or test == 'vm':
self._run_task(self.test_name)
def _generate_report(self):
- report = (
- "\n"
- " "
- "\n"
- " Rally Summary Report\n"
- "\n"
- "+===================+============+===============+===========+"
- "\n"
- "| Module | Duration | nb. Test Run | Success |"
- "\n"
- "+===================+============+===============+===========+"
- "\n")
+ """Generate test execution summary report."""
+ total_duration = 0.0
+ total_nb_tests = 0
+ total_nb_success = 0
payload = []
+ res_table = prettytable.PrettyTable(
+ padding_width=2,
+ field_names=['Module', 'Duration', 'nb. Test Run', 'Success'])
+ res_table.align['Module'] = "l"
+ res_table.align['Duration'] = "r"
+ res_table.align['Success'] = "r"
+
# for each scenario we draw a row for the table
- total_duration = 0.0
- total_nb_tests = 0
- total_success = 0.0
for item in self.summary:
- name = "{0:<17}".format(item['test_name'])
- duration = float(item['overall_duration'])
- total_duration += duration
- duration = time.strftime("%M:%S", time.gmtime(duration))
- duration = "{0:<10}".format(duration)
- nb_tests = "{0:<13}".format(item['nb_tests'])
- total_nb_tests += int(item['nb_tests'])
- success = "{0:<10}".format(str(item['success']) + '%')
- total_success += float(item['success'])
- report += ("" +
- "| " + name + " | " + duration + " | " +
- nb_tests + " | " + success + "|\n" +
- "+-------------------+------------"
- "+---------------+-----------+\n")
- payload.append({'module': name,
+ total_duration += item['overall_duration']
+ total_nb_tests += item['nb_tests']
+ total_nb_success += item['nb_success']
+ try:
+ success_avg = 100 * item['nb_success'] / item['nb_tests']
+ except ZeroDivisionError:
+ success_avg = 0
+ success_str = str("{:0.2f}".format(success_avg)) + '%'
+ duration_str = time.strftime("%M:%S",
+ time.gmtime(item['overall_duration']))
+ res_table.add_row([item['test_name'], duration_str,
+ item['nb_tests'], success_str])
+ payload.append({'module': item['test_name'],
'details': {'duration': item['overall_duration'],
'nb tests': item['nb_tests'],
- 'success': item['success']}})
+ 'success': success_str}})
total_duration_str = time.strftime("%H:%M:%S",
time.gmtime(total_duration))
- total_duration_str2 = "{0:<10}".format(total_duration_str)
- total_nb_tests_str = "{0:<13}".format(total_nb_tests)
-
try:
- self.result = total_success / len(self.summary)
+ self.result = 100 * total_nb_success / total_nb_tests
except ZeroDivisionError:
self.result = 100
-
success_rate = "{:0.2f}".format(self.result)
- success_rate_str = "{0:<10}".format(str(success_rate) + '%')
- report += ("+===================+============"
- "+===============+===========+")
- report += "\n"
- report += ("| TOTAL: | " + total_duration_str2 + " | " +
- total_nb_tests_str + " | " + success_rate_str + "|\n")
- report += ("+===================+============"
- "+===============+===========+")
- report += "\n"
-
- LOGGER.info("\n" + report)
+ success_rate_str = str(success_rate) + '%'
+ res_table.add_row(["", "", "", ""])
+ res_table.add_row(["TOTAL:", total_duration_str, total_nb_tests,
+ success_rate_str])
+
+ LOGGER.info("Rally Summary Report:\n\n%s\n", res_table.get_string())
+ LOGGER.info("Rally '%s' success_rate is %s%%",
+ self.case_name, success_rate)
payload.append({'summary': {'duration': total_duration,
'nb tests': total_nb_tests,
'nb success': success_rate}})
-
self.details = payload
- LOGGER.info("Rally '%s' success_rate is %s%%",
- self.case_name, success_rate)
-
def _clean_up(self):
+ """Cleanup all OpenStack objects. Should be called on completion."""
for creator in reversed(self.creators):
try:
creator.clean()
- except Exception as e:
- LOGGER.error('Unexpected error cleaning - %s', e)
+ except Exception as exc: # pylint: disable=broad-except
+ LOGGER.error('Unexpected error cleaning - %s', exc)
@energy.enable_recording
def run(self, **kwargs):
"""Run testcase."""
self.start_time = time.time()
try:
+ conf_utils.create_rally_deployment()
self._prepare_env()
self._run_tests()
self._generate_report()