3 # Copyright (c) 2015 All rights reserved
4 # This program and the accompanying materials
5 # are made available under the terms of the Apache License, Version 2.0
6 # which accompanies this distribution, and is available at
8 # http://www.apache.org/licenses/LICENSE-2.0
21 from functest.utils.constants import CONST
22 import functest.utils.functest_logger as ft_logger
23 import functest.utils.functest_utils as ft_utils
24 import functest.utils.openstack_utils as os_utils
26 tests = ['authenticate', 'glance', 'cinder', 'heat', 'keystone',
27 'neutron', 'nova', 'quotas', 'requests', 'vm', 'all']
28 parser = argparse.ArgumentParser()
29 parser.add_argument("test_name",
30 help="Module name to be tested. "
31 "Possible values are : "
32 "[ {d[0]} | {d[1]} | {d[2]} | {d[3]} | {d[4]} | "
33 "{d[5]} | {d[6]} | {d[7]} | {d[8]} | {d[9]} | "
36 "performs all possible test scenarios"
39 parser.add_argument("-d", "--debug", help="Debug mode", action="store_true")
40 parser.add_argument("-r", "--report",
41 help="Create json result file",
43 parser.add_argument("-s", "--smoke",
44 help="Smoke test mode",
46 parser.add_argument("-v", "--verbose",
47 help="Print verbose info about the progress",
49 parser.add_argument("-n", "--noclean",
50 help="Don't clean the created resources for this test.",
52 parser.add_argument("-z", "--sanity",
53 help="Sanity test mode, execute only a subset of tests",
56 args = parser.parse_args()
60 RALLY_STDERR = subprocess.STDOUT
62 RALLY_STDERR = open(os.devnull, 'w')
64 """ logging configuration """
65 logger = ft_logger.Logger("run_rally-cert").getLogger()
67 RALLY_DIR = os.path.join(CONST.dir_repo_functest, CONST.dir_rally)
68 RALLY_SCENARIO_DIR = os.path.join(RALLY_DIR, "scenario")
69 SANITY_MODE_DIR = os.path.join(RALLY_SCENARIO_DIR, "sanity")
70 FULL_MODE_DIR = os.path.join(RALLY_SCENARIO_DIR, "full")
71 TEMPLATE_DIR = os.path.join(RALLY_SCENARIO_DIR, "templates")
72 SUPPORT_DIR = os.path.join(RALLY_SCENARIO_DIR, "support")
73 TEMP_DIR = os.path.join(RALLY_DIR, "var")
74 BLACKLIST_FILE = os.path.join(RALLY_DIR, "blacklist.txt")
76 FLAVOR_NAME = "m1.tiny"
79 ITERATIONS_AMOUNT = 10
82 RESULTS_DIR = os.path.join(CONST.dir_results, 'rally')
83 TEMPEST_CONF_FILE = os.path.join(CONST.dir_results,
84 'tempest/tempest.conf')
86 RALLY_PRIVATE_NET_NAME = CONST.rally_network_name
87 RALLY_PRIVATE_SUBNET_NAME = CONST.rally_subnet_name
88 RALLY_PRIVATE_SUBNET_CIDR = CONST.rally_subnet_cidr
89 RALLY_ROUTER_NAME = CONST.rally_router_name
91 GLANCE_IMAGE_NAME = CONST.openstack_image_name
92 GLANCE_IMAGE_FILENAME = CONST.openstack_image_file_name
93 GLANCE_IMAGE_FORMAT = CONST.openstack_image_disk_format
94 GLANCE_IMAGE_PATH = os.path.join(CONST.dir_functest_data,
95 GLANCE_IMAGE_FILENAME)
96 CINDER_VOLUME_TYPE_NAME = "volume_test"
99 class GlobalVariables:
101 neutron_client = None
105 def get_task_id(cmd_raw):
107 get task id from command rally result
109 :return: task_id as string
111 taskid_re = re.compile('^Task +(.*): started$')
112 for line in cmd_raw.splitlines(True):
114 match = taskid_re.match(line)
116 return match.group(1)
120 def task_succeed(json_raw):
122 Parse JSON from rally JSON results
126 rally_report = json.loads(json_raw)
127 for report in rally_report:
128 if report is None or report.get('result') is None:
131 for result in report.get('result'):
132 if result is None or len(result.get('error')) > 0:
138 def live_migration_supported():
139 config = iniparse.ConfigParser()
140 if (config.read(TEMPEST_CONF_FILE) and
141 config.has_section('compute-feature-enabled') and
142 config.has_option('compute-feature-enabled', 'live_migration')):
143 return config.getboolean('compute-feature-enabled', 'live_migration')
148 def build_task_args(test_file_name):
149 task_args = {'service_list': [test_file_name]}
150 task_args['image_name'] = GLANCE_IMAGE_NAME
151 task_args['flavor_name'] = FLAVOR_NAME
152 task_args['glance_image_location'] = GLANCE_IMAGE_PATH
153 task_args['glance_image_format'] = GLANCE_IMAGE_FORMAT
154 task_args['tmpl_dir'] = TEMPLATE_DIR
155 task_args['sup_dir'] = SUPPORT_DIR
156 task_args['users_amount'] = USERS_AMOUNT
157 task_args['tenants_amount'] = TENANTS_AMOUNT
158 task_args['use_existing_users'] = False
159 task_args['iterations'] = ITERATIONS_AMOUNT
160 task_args['concurrency'] = CONCURRENCY
163 task_args['smoke'] = True
165 task_args['smoke'] = args.smoke
167 ext_net = os_utils.get_external_net(GlobalVariables.neutron_client)
169 task_args['floating_network'] = str(ext_net)
171 task_args['floating_network'] = ''
173 net_id = GlobalVariables.network_dict['net_id']
174 task_args['netid'] = str(net_id)
176 auth_url = CONST.OS_AUTH_URL
177 if auth_url is not None:
178 task_args['request_url'] = auth_url.rsplit(":", 1)[0]
180 task_args['request_url'] = ''
185 def get_output(proc, test_name):
188 overall_duration = 0.0
192 while proc.poll() is None:
193 line = proc.stdout.readline()
197 if ("Load duration" in line or
199 "finished" in line or
200 " Preparing" in line or
204 elif "test scenario" in line:
205 result += "\n" + line
206 elif "Full duration" in line:
207 result += line + "\n\n"
209 # parse output for summary report
211 "| action" not in line and
212 "| Starting" not in line and
213 "| Completed" not in line and
214 "| ITER" not in line and
216 "| total" not in line):
218 elif "| total" in line:
219 percentage = ((line.split('|')[8]).strip(' ')).strip('%')
221 success += float(percentage)
223 logger.info('Percentage error: %s, %s' % (percentage, line))
225 elif "Full duration" in line:
226 duration = line.split(': ')[1]
228 overall_duration += float(duration)
230 logger.info('Duration error: %s, %s' % (duration, line))
232 overall_duration = "{:10.2f}".format(overall_duration)
236 success_avg = "{:0.2f}".format(success / nb_totals)
238 scenario_summary = {'test_name': test_name,
239 'overall_duration': overall_duration,
240 'nb_tests': nb_tests,
241 'success': success_avg}
242 GlobalVariables.SUMMARY.append(scenario_summary)
244 logger.debug("\n" + result)
249 def get_cmd_output(proc):
252 while proc.poll() is None:
253 line = proc.stdout.readline()
263 with open(BLACKLIST_FILE, 'r') as black_list_file:
264 black_list_yaml = yaml.safe_load(black_list_file)
266 installer_type = CONST.INSTALLER_TYPE
267 deploy_scenario = CONST.DEPLOY_SCENARIO
268 if (bool(installer_type) * bool(deploy_scenario)):
269 if 'scenario' in black_list_yaml.keys():
270 for item in black_list_yaml['scenario']:
271 scenarios = item['scenarios']
272 installers = item['installers']
273 if (deploy_scenario in scenarios and
274 installer_type in installers):
275 tests = item['tests']
276 black_tests.extend(tests)
278 logger.debug("Scenario exclusion not applied.")
288 with open(BLACKLIST_FILE, 'r') as black_list_file:
289 black_list_yaml = yaml.safe_load(black_list_file)
291 if not live_migration_supported():
292 func_list.append("no_live_migration")
294 if 'functionality' in black_list_yaml.keys():
295 for item in black_list_yaml['functionality']:
296 functions = item['functions']
297 for func in func_list:
298 if func in functions:
299 tests = item['tests']
300 black_tests.extend(tests)
302 logger.debug("Functionality exclusion not applied.")
307 def apply_blacklist(case_file_name, result_file_name):
308 logger.debug("Applying blacklist...")
309 cases_file = open(case_file_name, 'r')
310 result_file = open(result_file_name, 'w')
312 black_tests = list(set(excl_func() + excl_scenario()))
315 for cases_line in cases_file:
317 for black_tests_line in black_tests:
318 if re.search(black_tests_line, cases_line.strip().rstrip(':')):
322 result_file.write(str(cases_line))
324 if cases_line.isspace():
331 def prepare_test_list(test_name):
332 test_yaml_file_name = 'opnfv-{}.yaml'.format(test_name)
333 scenario_file_name = os.path.join(RALLY_SCENARIO_DIR, test_yaml_file_name)
335 if not os.path.exists(scenario_file_name):
337 scenario_file_name = os.path.join(SANITY_MODE_DIR,
340 scenario_file_name = os.path.join(FULL_MODE_DIR,
343 if not os.path.exists(scenario_file_name):
344 logger.info("The scenario '%s' does not exist."
345 % scenario_file_name)
348 logger.debug('Scenario fetched from : {}'.format(scenario_file_name))
349 test_file_name = os.path.join(TEMP_DIR, test_yaml_file_name)
351 if not os.path.exists(TEMP_DIR):
352 os.makedirs(TEMP_DIR)
354 apply_blacklist(scenario_file_name, test_file_name)
355 return test_file_name
358 def file_is_empty(file_name):
360 if os.stat(file_name).st_size > 0:
368 def run_task(test_name):
370 # the "main" function of the script who launch rally for a task
371 # :param test_name: name for the rally test
374 logger.info('Starting test scenario "{}" ...'.format(test_name))
375 start_time = time.time()
377 task_file = os.path.join(RALLY_DIR, 'task.yaml')
378 if not os.path.exists(task_file):
379 logger.error("Task file '%s' does not exist." % task_file)
382 file_name = prepare_test_list(test_name)
383 if file_is_empty(file_name):
384 logger.info('No tests for scenario "{}"'.format(test_name))
387 cmd_line = ("rally task start --abort-on-sla-failure "
389 "--task-args \"{1}\""
390 .format(task_file, build_task_args(test_name)))
391 logger.debug('running command line: {}'.format(cmd_line))
393 p = subprocess.Popen(cmd_line, stdout=subprocess.PIPE,
394 stderr=RALLY_STDERR, shell=True)
395 output = get_output(p, test_name)
396 task_id = get_task_id(output)
397 logger.debug('task_id : {}'.format(task_id))
400 logger.error('Failed to retrieve task_id, validating task...')
401 cmd_line = ("rally task validate "
403 "--task-args \"{1}\""
404 .format(task_file, build_task_args(test_name)))
405 logger.debug('running command line: {}'.format(cmd_line))
406 p = subprocess.Popen(cmd_line, stdout=subprocess.PIPE,
407 stderr=subprocess.STDOUT, shell=True)
408 output = get_cmd_output(p)
409 logger.error("Task validation result:" + "\n" + output)
412 # check for result directory and create it otherwise
413 if not os.path.exists(RESULTS_DIR):
414 logger.debug('{} does not exist, we create it.'.format(RESULTS_DIR))
415 os.makedirs(RESULTS_DIR)
417 # write html report file
418 report_html_name = 'opnfv-{}.html'.format(test_name)
419 report_html_dir = os.path.join(RESULTS_DIR, report_html_name)
420 cmd_line = "rally task report {} --out {}".format(task_id,
423 logger.debug('running command line: {}'.format(cmd_line))
426 # get and save rally operation JSON result
427 cmd_line = "rally task results %s" % task_id
428 logger.debug('running command line: {}'.format(cmd_line))
429 cmd = os.popen(cmd_line)
430 json_results = cmd.read()
431 report_json_name = 'opnfv-{}.json'.format(test_name)
432 report_json_dir = os.path.join(RESULTS_DIR, report_json_name)
433 with open(report_json_dir, 'w') as f:
434 logger.debug('saving json file')
435 f.write(json_results)
437 with open(report_json_dir) as json_file:
438 json_data = json.load(json_file)
440 """ parse JSON operation result """
442 if task_succeed(json_results):
443 logger.info('Test scenario: "{}" OK.'.format(test_name) + "\n")
446 logger.info('Test scenario: "{}" Failed.'.format(test_name) + "\n")
448 # Push results in payload of testcase
450 stop_time = time.time()
451 logger.debug("Push Rally detailed results into DB")
452 ft_utils.push_results_to_db("functest",
462 GlobalVariables.nova_client = os_utils.get_nova_client()
463 GlobalVariables.neutron_client = os_utils.get_neutron_client()
464 cinder_client = os_utils.get_cinder_client()
466 start_time = time.time()
469 if not (args.test_name in tests):
470 logger.error('argument not valid')
473 GlobalVariables.SUMMARY = []
475 volume_types = os_utils.list_volume_types(cinder_client,
478 volume_type = os_utils.create_volume_type(
479 cinder_client, CINDER_VOLUME_TYPE_NAME)
481 logger.error("Failed to create volume type...")
484 logger.debug("Volume type '%s' created succesfully..."
485 % CINDER_VOLUME_TYPE_NAME)
487 logger.debug("Using existing volume type(s)...")
489 image_exists, image_id = os_utils.get_or_create_image(GLANCE_IMAGE_NAME,
495 logger.debug("Creating network '%s'..." % RALLY_PRIVATE_NET_NAME)
496 GlobalVariables.network_dict = \
497 os_utils.create_shared_network_full(RALLY_PRIVATE_NET_NAME,
498 RALLY_PRIVATE_SUBNET_NAME,
500 RALLY_PRIVATE_SUBNET_CIDR)
501 if not GlobalVariables.network_dict:
504 if args.test_name == "all":
505 for test_name in tests:
506 if not (test_name == 'all' or
510 logger.debug("Test name: " + args.test_name)
511 run_task(args.test_name)
516 " Rally Summary Report\n"
518 "+===================+============+===============+===========+"
520 "| Module | Duration | nb. Test Run | Success |"
522 "+===================+============+===============+===========+"
525 stop_time = time.time()
527 # for each scenario we draw a row for the table
531 for s in GlobalVariables.SUMMARY:
532 name = "{0:<17}".format(s['test_name'])
533 duration = float(s['overall_duration'])
534 total_duration += duration
535 duration = time.strftime("%M:%S", time.gmtime(duration))
536 duration = "{0:<10}".format(duration)
537 nb_tests = "{0:<13}".format(s['nb_tests'])
538 total_nb_tests += int(s['nb_tests'])
539 success = "{0:<10}".format(str(s['success']) + '%')
540 total_success += float(s['success'])
542 "| " + name + " | " + duration + " | " +
543 nb_tests + " | " + success + "|\n" +
544 "+-------------------+------------"
545 "+---------------+-----------+\n")
546 payload.append({'module': name,
547 'details': {'duration': s['overall_duration'],
548 'nb tests': s['nb_tests'],
549 'success': s['success']}})
551 total_duration_str = time.strftime("%H:%M:%S", time.gmtime(total_duration))
552 total_duration_str2 = "{0:<10}".format(total_duration_str)
553 total_nb_tests_str = "{0:<13}".format(total_nb_tests)
555 if len(GlobalVariables.SUMMARY):
556 success_rate = total_success / len(GlobalVariables.SUMMARY)
559 success_rate = "{:0.2f}".format(success_rate)
560 success_rate_str = "{0:<10}".format(str(success_rate) + '%')
561 report += "+===================+============+===============+===========+"
563 report += ("| TOTAL: | " + total_duration_str2 + " | " +
564 total_nb_tests_str + " | " + success_rate_str + "|\n")
565 report += "+===================+============+===============+===========+"
568 logger.info("\n" + report)
569 payload.append({'summary': {'duration': total_duration,
570 'nb tests': total_nb_tests,
571 'nb success': success_rate}})
574 case_name = "rally_sanity"
576 case_name = "rally_full"
578 # Evaluation of the success criteria
579 status = ft_utils.check_success_rate(case_name, success_rate)
586 logger.debug("Pushing Rally summary into DB...")
587 ft_utils.push_results_to_db("functest",
597 logger.debug("Deleting image '%s' with ID '%s'..."
598 % (GLANCE_IMAGE_NAME, image_id))
599 if not os_utils.delete_glance_image(GlobalVariables.nova_client,
601 logger.error("Error deleting the glance image")
604 logger.debug("Deleting volume type '%s'..."
605 % CINDER_VOLUME_TYPE_NAME)
606 if not os_utils.delete_volume_type(cinder_client, volume_type):
607 logger.error("Error in deleting volume type...")
612 if __name__ == '__main__':