3 # Copyright (c) 2015 Orange
4 # guyrodrigue.koffi@orange.com
5 # morgan.richomme@orange.com
6 # All rights reserved. This program and the accompanying materials
7 # are made available under the terms of the Apache License, Version 2.0
8 # which accompanies this distribution, and is available at
9 # http://www.apache.org/licenses/LICENSE-2.0
11 # 0.1 (05/2015) initial commit
12 # 0.2 (28/09/2015) extract Tempest, format json result, add ceilometer suite
13 # 0.3 (19/10/2015) remove Tempest from run_rally
14 # and push result into test DB
16 """ tests configuration """
28 import functest.utils.functest_logger as ft_logger
29 import functest.utils.functest_utils as ft_utils
30 import functest.utils.openstack_utils as os_utils
32 tests = ['authenticate', 'glance', 'cinder', 'heat', 'keystone',
33 'neutron', 'nova', 'quotas', 'requests', 'vm', 'all']
34 parser = argparse.ArgumentParser()
35 parser.add_argument("test_name",
36 help="Module name to be tested. "
37 "Possible values are : "
38 "[ {d[0]} | {d[1]} | {d[2]} | {d[3]} | {d[4]} | "
39 "{d[5]} | {d[6]} | {d[7]} | {d[8]} | {d[9]} | "
42 "performs all possible test scenarios"
45 parser.add_argument("-d", "--debug", help="Debug mode", action="store_true")
46 parser.add_argument("-r", "--report",
47 help="Create json result file",
49 parser.add_argument("-s", "--smoke",
50 help="Smoke test mode",
52 parser.add_argument("-v", "--verbose",
53 help="Print verbose info about the progress",
55 parser.add_argument("-n", "--noclean",
56 help="Don't clean the created resources for this test.",
58 parser.add_argument("-z", "--sanity",
59 help="Sanity test mode, execute only a subset of tests",
62 args = parser.parse_args()
67 RALLY_STDERR = subprocess.STDOUT
69 RALLY_STDERR = open(os.devnull, 'w')
71 """ logging configuration """
72 logger = ft_logger.Logger("run_rally").getLogger()
75 HOME = os.environ['HOME'] + "/"
76 RALLY_DIR = ft_utils.FUNCTEST_REPO + '/' + \
77 ft_utils.get_functest_config('general.directories.dir_rally')
78 SANITY_MODE_DIR = RALLY_DIR + "scenario/sanity"
79 FULL_MODE_DIR = RALLY_DIR + "scenario/full"
80 TEMPLATE_DIR = RALLY_DIR + "scenario/templates"
81 SUPPORT_DIR = RALLY_DIR + "scenario/support"
82 TEMP_DIR = RALLY_DIR + "var"
83 BLACKLIST_FILE = RALLY_DIR + "blacklist.txt"
85 FLAVOR_NAME = "m1.tiny"
88 ITERATIONS_AMOUNT = 10
92 ft_utils.get_functest_config('general.directories.dir_rally_res')
94 ft_utils.get_functest_config('general.directories.dir_results') + \
95 '/tempest/tempest.conf'
96 TEST_DB = ft_utils.get_functest_config('results.test_db_url')
98 PRIVATE_NET_NAME = ft_utils.get_functest_config('rally.network_name')
99 PRIVATE_SUBNET_NAME = ft_utils.get_functest_config('rally.subnet_name')
100 PRIVATE_SUBNET_CIDR = ft_utils.get_functest_config('rally.subnet_cidr')
101 ROUTER_NAME = ft_utils.get_functest_config('rally.router_name')
103 GLANCE_IMAGE_NAME = \
104 ft_utils.get_functest_config('general.openstack.image_name')
105 GLANCE_IMAGE_FILENAME = \
106 ft_utils.get_functest_config('general.openstack.image_file_name')
107 GLANCE_IMAGE_FORMAT = \
108 ft_utils.get_functest_config('general.openstack.image_disk_format')
109 GLANCE_IMAGE_PATH = \
110 ft_utils.get_functest_config('general.directories.dir_functest_data') + \
111 "/" + GLANCE_IMAGE_FILENAME
113 CINDER_VOLUME_TYPE_NAME = "volume_test"
117 neutron_client = None
120 def get_task_id(cmd_raw):
122 get task id from command rally result
124 :return: task_id as string
126 taskid_re = re.compile('^Task +(.*): started$')
127 for line in cmd_raw.splitlines(True):
129 match = taskid_re.match(line)
131 return match.group(1)
135 def task_succeed(json_raw):
137 Parse JSON from rally JSON results
141 rally_report = json.loads(json_raw)
142 for report in rally_report:
143 if report is None or report.get('result') is None:
146 for result in report.get('result'):
147 if result is None or len(result.get('error')) > 0:
153 def live_migration_supported():
154 config = iniparse.ConfigParser()
155 if (config.read(TEMPEST_CONF_FILE) and
156 config.has_section('compute-feature-enabled') and
157 config.has_option('compute-feature-enabled', 'live_migration')):
158 return config.getboolean('compute-feature-enabled', 'live_migration')
163 def build_task_args(test_file_name):
164 task_args = {'service_list': [test_file_name]}
165 task_args['image_name'] = GLANCE_IMAGE_NAME
166 task_args['flavor_name'] = FLAVOR_NAME
167 task_args['glance_image_location'] = GLANCE_IMAGE_PATH
168 task_args['glance_image_format'] = GLANCE_IMAGE_FORMAT
169 task_args['tmpl_dir'] = TEMPLATE_DIR
170 task_args['sup_dir'] = SUPPORT_DIR
171 task_args['users_amount'] = USERS_AMOUNT
172 task_args['tenants_amount'] = TENANTS_AMOUNT
173 task_args['use_existing_users'] = False
174 task_args['iterations'] = ITERATIONS_AMOUNT
175 task_args['concurrency'] = CONCURRENCY
178 task_args['smoke'] = True
180 task_args['smoke'] = args.smoke
182 ext_net = os_utils.get_external_net(neutron_client)
184 task_args['floating_network'] = str(ext_net)
186 task_args['floating_network'] = ''
188 net_id = network_dict['net_id']
189 task_args['netid'] = str(net_id)
191 auth_url = os.getenv('OS_AUTH_URL')
192 if auth_url is not None:
193 task_args['request_url'] = auth_url.rsplit(":", 1)[0]
195 task_args['request_url'] = ''
200 def get_output(proc, test_name):
204 overall_duration = 0.0
208 while proc.poll() is None:
209 line = proc.stdout.readline()
213 if ("Load duration" in line or
215 "finished" in line or
216 " Preparing" in line or
220 elif "test scenario" in line:
221 result += "\n" + line
222 elif "Full duration" in line:
223 result += line + "\n\n"
225 # parse output for summary report
227 "| action" not in line and
228 "| Starting" not in line and
229 "| Completed" not in line and
230 "| ITER" not in line and
232 "| total" not in line):
234 elif "| total" in line:
235 percentage = ((line.split('|')[8]).strip(' ')).strip('%')
237 success += float(percentage)
239 logger.info('Percentage error: %s, %s' % (percentage, line))
241 elif "Full duration" in line:
242 duration = line.split(': ')[1]
244 overall_duration += float(duration)
246 logger.info('Duration error: %s, %s' % (duration, line))
248 overall_duration = "{:10.2f}".format(overall_duration)
252 success_avg = "{:0.2f}".format(success / nb_totals)
254 scenario_summary = {'test_name': test_name,
255 'overall_duration': overall_duration,
256 'nb_tests': nb_tests,
257 'success': success_avg}
258 SUMMARY.append(scenario_summary)
260 logger.debug("\n" + result)
265 def get_cmd_output(proc):
268 while proc.poll() is None:
269 line = proc.stdout.readline()
279 with open(BLACKLIST_FILE, 'r') as black_list_file:
280 black_list_yaml = yaml.safe_load(black_list_file)
282 installer_type = os.getenv('INSTALLER_TYPE')
283 deploy_scenario = os.getenv('DEPLOY_SCENARIO')
284 if (bool(installer_type) * bool(deploy_scenario)):
285 if 'scenario' in black_list_yaml.keys():
286 for item in black_list_yaml['scenario']:
287 scenarios = item['scenarios']
288 installers = item['installers']
289 if (deploy_scenario in scenarios and
290 installer_type in installers):
291 tests = item['tests']
292 black_tests.extend(tests)
294 logger.debug("Scenario exclusion not applied.")
304 with open(BLACKLIST_FILE, 'r') as black_list_file:
305 black_list_yaml = yaml.safe_load(black_list_file)
307 if not live_migration_supported():
308 func_list.append("no_live_migration")
310 if 'functionality' in black_list_yaml.keys():
311 for item in black_list_yaml['functionality']:
312 functions = item['functions']
313 for func in func_list:
314 if func in functions:
315 tests = item['tests']
316 black_tests.extend(tests)
318 logger.debug("Functionality exclusion not applied.")
323 def apply_blacklist(case_file_name, result_file_name):
324 logger.debug("Applying blacklist...")
325 cases_file = open(case_file_name, 'r')
326 result_file = open(result_file_name, 'w')
328 black_tests = list(set(excl_func() + excl_scenario()))
331 for cases_line in cases_file:
333 for black_tests_line in black_tests:
334 if re.search(black_tests_line, cases_line.strip().rstrip(':')):
338 result_file.write(str(cases_line))
340 if cases_line.isspace():
347 def prepare_test_list(test_name):
348 scenario_file_name = '{}opnfv-{}.yaml'.format(RALLY_DIR + "scenario/",
350 if not os.path.exists(scenario_file_name):
352 scenario_file_name = '{}opnfv-{}.yaml'.format(SANITY_MODE_DIR +
355 scenario_file_name = '{}opnfv-{}.yaml'.format(FULL_MODE_DIR +
357 if not os.path.exists(scenario_file_name):
358 logger.info("The scenario '%s' does not exist."
359 % scenario_file_name)
362 logger.debug('Scenario fetched from : {}'.format(scenario_file_name))
363 test_file_name = '{}opnfv-{}.yaml'.format(TEMP_DIR + "/", test_name)
365 if not os.path.exists(TEMP_DIR):
366 os.makedirs(TEMP_DIR)
368 apply_blacklist(scenario_file_name, test_file_name)
369 return test_file_name
372 def file_is_empty(file_name):
374 if os.stat(file_name).st_size > 0:
382 def run_task(test_name):
384 # the "main" function of the script who launch rally for a task
385 # :param test_name: name for the rally test
389 logger.info('Starting test scenario "{}" ...'.format(test_name))
390 start_time = time.time()
392 task_file = '{}task.yaml'.format(RALLY_DIR)
393 if not os.path.exists(task_file):
394 logger.error("Task file '%s' does not exist." % task_file)
397 file_name = prepare_test_list(test_name)
398 if file_is_empty(file_name):
399 logger.info('No tests for scenario "{}"'.format(test_name))
402 cmd_line = ("rally task start --abort-on-sla-failure " +
403 "--task {} ".format(task_file) +
404 "--task-args \"{}\" ".format(build_task_args(test_name)))
405 logger.debug('running command line : {}'.format(cmd_line))
407 p = subprocess.Popen(cmd_line, stdout=subprocess.PIPE,
408 stderr=RALLY_STDERR, shell=True)
409 output = get_output(p, test_name)
410 task_id = get_task_id(output)
411 logger.debug('task_id : {}'.format(task_id))
414 logger.error('Failed to retrieve task_id, validating task...')
415 cmd_line = ("rally task validate " +
416 "--task {} ".format(task_file) +
417 "--task-args \"{}\" ".format(build_task_args(test_name)))
418 logger.debug('running command line : {}'.format(cmd_line))
419 p = subprocess.Popen(cmd_line, stdout=subprocess.PIPE,
420 stderr=subprocess.STDOUT, shell=True)
421 output = get_cmd_output(p)
422 logger.error("Task validation result:" + "\n" + output)
425 # check for result directory and create it otherwise
426 if not os.path.exists(RESULTS_DIR):
427 logger.debug('{} does not exist, we create it.'.format(RESULTS_DIR))
428 os.makedirs(RESULTS_DIR)
430 # write html report file
431 report_file_name = '{}opnfv-{}.html'.format(RESULTS_DIR, test_name)
432 cmd_line = "rally task report {} --out {}".format(task_id,
435 logger.debug('running command line : {}'.format(cmd_line))
438 # get and save rally operation JSON result
439 cmd_line = "rally task results %s" % task_id
440 logger.debug('running command line : {}'.format(cmd_line))
441 cmd = os.popen(cmd_line)
442 json_results = cmd.read()
443 with open('{}opnfv-{}.json'.format(RESULTS_DIR, test_name), 'w') as f:
444 logger.debug('saving json file')
445 f.write(json_results)
447 with open('{}opnfv-{}.json'
448 .format(RESULTS_DIR, test_name)) as json_file:
449 json_data = json.load(json_file)
451 """ parse JSON operation result """
453 if task_succeed(json_results):
454 logger.info('Test scenario: "{}" OK.'.format(test_name) + "\n")
457 logger.info('Test scenario: "{}" Failed.'.format(test_name) + "\n")
459 # Push results in payload of testcase
461 stop_time = time.time()
462 logger.debug("Push Rally detailed results into DB")
463 ft_utils.push_results_to_db("functest",
474 global neutron_client
476 nova_client = os_utils.get_nova_client()
477 neutron_client = os_utils.get_neutron_client()
478 cinder_client = os_utils.get_cinder_client()
480 start_time = time.time()
483 if not (args.test_name in tests):
484 logger.error('argument not valid')
489 volume_types = os_utils.list_volume_types(cinder_client,
492 volume_type = os_utils.create_volume_type(
493 cinder_client, CINDER_VOLUME_TYPE_NAME)
495 logger.error("Failed to create volume type...")
498 logger.debug("Volume type '%s' created succesfully..."
499 % CINDER_VOLUME_TYPE_NAME)
501 logger.debug("Using existing volume type(s)...")
503 image_exists, image_id = os_utils.get_or_create_image(GLANCE_IMAGE_NAME,
509 logger.debug("Creating network '%s'..." % PRIVATE_NET_NAME)
510 network_dict = os_utils.create_shared_network_full(PRIVATE_NET_NAME,
517 if args.test_name == "all":
518 for test_name in tests:
519 if not (test_name == 'all' or
523 logger.debug("Test name: " + args.test_name)
524 run_task(args.test_name)
529 " Rally Summary Report\n"
531 "+===================+============+===============+===========+"
533 "| Module | Duration | nb. Test Run | Success |"
535 "+===================+============+===============+===========+"
538 stop_time = time.time()
540 # for each scenario we draw a row for the table
545 name = "{0:<17}".format(s['test_name'])
546 duration = float(s['overall_duration'])
547 total_duration += duration
548 duration = time.strftime("%M:%S", time.gmtime(duration))
549 duration = "{0:<10}".format(duration)
550 nb_tests = "{0:<13}".format(s['nb_tests'])
551 total_nb_tests += int(s['nb_tests'])
552 success = "{0:<10}".format(str(s['success']) + '%')
553 total_success += float(s['success'])
555 "| " + name + " | " + duration + " | " +
556 nb_tests + " | " + success + "|\n" +
557 "+-------------------+------------"
558 "+---------------+-----------+\n")
559 payload.append({'module': name,
560 'details': {'duration': s['overall_duration'],
561 'nb tests': s['nb_tests'],
562 'success': s['success']}})
564 total_duration_str = time.strftime("%H:%M:%S", time.gmtime(total_duration))
565 total_duration_str2 = "{0:<10}".format(total_duration_str)
566 total_nb_tests_str = "{0:<13}".format(total_nb_tests)
569 success_rate = total_success / len(SUMMARY)
572 success_rate = "{:0.2f}".format(success_rate)
573 success_rate_str = "{0:<10}".format(str(success_rate) + '%')
574 report += "+===================+============+===============+===========+"
576 report += ("| TOTAL: | " + total_duration_str2 + " | " +
577 total_nb_tests_str + " | " + success_rate_str + "|\n")
578 report += "+===================+============+===============+===========+"
581 logger.info("\n" + report)
582 payload.append({'summary': {'duration': total_duration,
583 'nb tests': total_nb_tests,
584 'nb success': success_rate}})
587 case_name = "rally_sanity"
589 case_name = "rally_full"
591 # Evaluation of the success criteria
592 status = ft_utils.check_success_rate(case_name, success_rate)
599 logger.debug("Pushing Rally summary into DB...")
600 ft_utils.push_results_to_db("functest",
610 logger.debug("Deleting image '%s' with ID '%s'..."
611 % (GLANCE_IMAGE_NAME, image_id))
612 if not os_utils.delete_glance_image(nova_client, image_id):
613 logger.error("Error deleting the glance image")
616 logger.debug("Deleting volume type '%s'..."
617 % CINDER_VOLUME_TYPE_NAME)
618 if not os_utils.delete_volume_type(cinder_client, volume_type):
619 logger.error("Error in deleting volume type...")
624 if __name__ == '__main__':