3 # Copyright (c) 2015 Orange
4 # guyrodrigue.koffi@orange.com
5 # morgan.richomme@orange.com
6 # All rights reserved. This program and the accompanying materials
7 # are made available under the terms of the Apache License, Version 2.0
8 # which accompanies this distribution, and is available at
9 # http://www.apache.org/licenses/LICENSE-2.0
11 # 0.1 (05/2015) initial commit
12 # 0.2 (28/09/2015) extract Tempest, format json result, add ceilometer suite
13 # 0.3 (19/10/2015) remove Tempest from run_rally
14 # and push result into test DB
26 from novaclient import client as novaclient
27 from glanceclient import client as glanceclient
28 from keystoneclient.v2_0 import client as keystoneclient
29 from neutronclient.v2_0 import client as neutronclient
30 from cinderclient import client as cinderclient
32 import functest.utils.functest_logger as ft_logger
33 import functest.utils.functest_utils as functest_utils
34 import functest.utils.openstack_utils as openstack_utils
36 """ tests configuration """
37 tests = ['authenticate', 'glance', 'cinder', 'heat', 'keystone',
38 'neutron', 'nova', 'quotas', 'requests', 'vm', 'all']
39 parser = argparse.ArgumentParser()
40 parser.add_argument("test_name",
41 help="Module name to be tested. "
42 "Possible values are : "
43 "[ {d[0]} | {d[1]} | {d[2]} | {d[3]} | {d[4]} | "
44 "{d[5]} | {d[6]} | {d[7]} | {d[8]} | {d[9]} | "
47 "performs all possible test scenarios"
50 parser.add_argument("-d", "--debug", help="Debug mode", action="store_true")
51 parser.add_argument("-r", "--report",
52 help="Create json result file",
54 parser.add_argument("-s", "--smoke",
55 help="Smoke test mode",
57 parser.add_argument("-v", "--verbose",
58 help="Print verbose info about the progress",
60 parser.add_argument("-n", "--noclean",
61 help="Don't clean the created resources for this test.",
63 parser.add_argument("-z", "--sanity",
64 help="Sanity test mode, execute only a subset of tests",
67 args = parser.parse_args()
73 RALLY_STDERR = subprocess.STDOUT
75 RALLY_STDERR = open(os.devnull, 'w')
77 """ logging configuration """
78 logger = ft_logger.Logger("run_rally").getLogger()
80 REPO_PATH = os.environ['repos_dir'] + '/functest/'
81 if not os.path.exists(REPO_PATH):
82 logger.error("Functest repository directory not found '%s'" % REPO_PATH)
86 with open(os.environ["CONFIG_FUNCTEST_YAML"]) as f:
87 functest_yaml = yaml.safe_load(f)
90 HOME = os.environ['HOME'] + "/"
91 RALLY_DIR = REPO_PATH + functest_yaml.get("general").get(
92 "directories").get("dir_rally")
93 TEMPLATE_DIR = RALLY_DIR + "scenario/templates"
94 SUPPORT_DIR = RALLY_DIR + "scenario/support"
96 FLAVOR_NAME = "m1.tiny"
99 ITERATIONS_AMOUNT = 10
102 RESULTS_DIR = functest_yaml.get("general").get("directories").get(
104 TEMPEST_CONF_FILE = functest_yaml.get("general").get("directories").get(
105 "dir_results") + '/tempest/tempest.conf'
106 TEST_DB = functest_yaml.get("results").get("test_db_url")
108 PRIVATE_NET_NAME = functest_yaml.get("rally").get("network_name")
109 PRIVATE_SUBNET_NAME = functest_yaml.get("rally").get("subnet_name")
110 PRIVATE_SUBNET_CIDR = functest_yaml.get("rally").get("subnet_cidr")
111 ROUTER_NAME = functest_yaml.get("rally").get("router_name")
113 GLANCE_IMAGE_NAME = functest_yaml.get("general").get("openstack").get(
115 GLANCE_IMAGE_FILENAME = functest_yaml.get("general").get("openstack").get(
117 GLANCE_IMAGE_FORMAT = functest_yaml.get("general").get("openstack").get(
119 GLANCE_IMAGE_PATH = functest_yaml.get("general").get("directories").get(
120 "dir_functest_data") + "/" + GLANCE_IMAGE_FILENAME
122 CINDER_VOLUME_TYPE_NAME = "volume_test"
128 def push_results_to_db(case, payload, criteria):
130 url = TEST_DB + "/results"
131 installer = functest_utils.get_installer_type(logger)
132 scenario = functest_utils.get_scenario(logger)
133 version = functest_utils.get_version(logger)
134 pod_name = functest_utils.get_pod_name(logger)
136 # evalutate success criteria
138 params = {"project_name": "functest", "case_name": case,
139 "pod_name": pod_name, "installer": installer,
140 "version": version, "scenario": scenario,
141 "criteria": criteria, "details": payload}
143 headers = {'Content-Type': 'application/json'}
144 r = requests.post(url, data=json.dumps(params), headers=headers)
148 def get_task_id(cmd_raw):
150 get task id from command rally result
152 :return: task_id as string
154 taskid_re = re.compile('^Task +(.*): started$')
155 for line in cmd_raw.splitlines(True):
157 match = taskid_re.match(line)
159 return match.group(1)
163 def task_succeed(json_raw):
165 Parse JSON from rally JSON results
169 rally_report = json.loads(json_raw)
170 for report in rally_report:
171 if report is None or report.get('result') is None:
174 for result in report.get('result'):
175 if result is None or len(result.get('error')) > 0:
181 def live_migration_supported():
182 config = iniparse.ConfigParser()
183 if (config.read(TEMPEST_CONF_FILE) and
184 config.has_section('compute-feature-enabled') and
185 config.has_option('compute-feature-enabled', 'live_migration')):
186 return config.getboolean('compute-feature-enabled', 'live_migration')
191 def build_task_args(test_file_name):
192 task_args = {'service_list': [test_file_name]}
193 task_args['image_name'] = GLANCE_IMAGE_NAME
194 task_args['flavor_name'] = FLAVOR_NAME
195 task_args['glance_image_location'] = GLANCE_IMAGE_PATH
196 task_args['tmpl_dir'] = TEMPLATE_DIR
197 task_args['sup_dir'] = SUPPORT_DIR
198 task_args['users_amount'] = USERS_AMOUNT
199 task_args['tenants_amount'] = TENANTS_AMOUNT
200 task_args['iterations'] = ITERATIONS_AMOUNT
201 task_args['concurrency'] = CONCURRENCY
204 task_args['full_mode'] = False
205 task_args['smoke'] = True
207 task_args['full_mode'] = True
208 task_args['smoke'] = args.smoke
210 ext_net = openstack_utils.get_external_net(client_dict['neutron'])
212 task_args['floating_network'] = str(ext_net)
214 task_args['floating_network'] = ''
216 net_id = network_dict['net_id']
217 task_args['netid'] = str(net_id)
218 task_args['live_migration'] = live_migration_supported()
223 def get_output(proc, test_name):
227 overall_duration = 0.0
231 while proc.poll() is None:
232 line = proc.stdout.readline()
236 if ("Load duration" in line or
238 "finished" in line or
239 " Preparing" in line or
243 elif "test scenario" in line:
244 result += "\n" + line
245 elif "Full duration" in line:
246 result += line + "\n\n"
248 # parse output for summary report
250 "| action" not in line and
251 "| Starting" not in line and
252 "| Completed" not in line and
253 "| ITER" not in line and
255 "| total" not in line):
257 elif "| total" in line:
258 percentage = ((line.split('|')[8]).strip(' ')).strip('%')
260 success += float(percentage)
262 logger.info('Percentage error: %s, %s' % (percentage, line))
264 elif "Full duration" in line:
265 duration = line.split(': ')[1]
267 overall_duration += float(duration)
269 logger.info('Duration error: %s, %s' % (duration, line))
271 overall_duration = "{:10.2f}".format(overall_duration)
275 success_avg = "{:0.2f}".format(success / nb_totals)
277 scenario_summary = {'test_name': test_name,
278 'overall_duration': overall_duration,
279 'nb_tests': nb_tests,
280 'success': success_avg}
281 SUMMARY.append(scenario_summary)
283 logger.info("\n" + result)
288 def get_cmd_output(proc):
291 while proc.poll() is None:
292 line = proc.stdout.readline()
298 def run_task(test_name):
300 # the "main" function of the script who launch rally for a task
301 # :param test_name: name for the rally test
305 logger.info('Starting test scenario "{}" ...'.format(test_name))
307 task_file = '{}task.yaml'.format(RALLY_DIR)
308 if not os.path.exists(task_file):
309 logger.error("Task file '%s' does not exist." % task_file)
312 test_file_name = '{}opnfv-{}.yaml'.format(RALLY_DIR + "scenario/",
314 if not os.path.exists(test_file_name):
315 logger.error("The scenario '%s' does not exist." % test_file_name)
318 logger.debug('Scenario fetched from : {}'.format(test_file_name))
320 cmd_line = ("rally task start --abort-on-sla-failure " +
321 "--task {} ".format(task_file) +
322 "--task-args \"{}\" ".format(build_task_args(test_name)))
323 logger.debug('running command line : {}'.format(cmd_line))
325 p = subprocess.Popen(cmd_line, stdout=subprocess.PIPE,
326 stderr=RALLY_STDERR, shell=True)
327 output = get_output(p, test_name)
328 task_id = get_task_id(output)
329 logger.debug('task_id : {}'.format(task_id))
332 logger.error('Failed to retrieve task_id, validating task...')
333 cmd_line = ("rally task validate " +
334 "--task {} ".format(task_file) +
335 "--task-args \"{}\" ".format(build_task_args(test_name)))
336 logger.debug('running command line : {}'.format(cmd_line))
337 p = subprocess.Popen(cmd_line, stdout=subprocess.PIPE,
338 stderr=subprocess.STDOUT, shell=True)
339 output = get_cmd_output(p)
340 logger.error("Task validation result:" + "\n" + output)
343 # check for result directory and create it otherwise
344 if not os.path.exists(RESULTS_DIR):
345 logger.debug('%s does not exist, we create it.'.format(RESULTS_DIR))
346 os.makedirs(RESULTS_DIR)
348 # write html report file
349 report_file_name = '{}opnfv-{}.html'.format(RESULTS_DIR, test_name)
350 cmd_line = "rally task report {} --out {}".format(task_id,
353 logger.debug('running command line : {}'.format(cmd_line))
356 # get and save rally operation JSON result
357 cmd_line = "rally task results %s" % task_id
358 logger.debug('running command line : {}'.format(cmd_line))
359 cmd = os.popen(cmd_line)
360 json_results = cmd.read()
361 with open('{}opnfv-{}.json'.format(RESULTS_DIR, test_name), 'w') as f:
362 logger.debug('saving json file')
363 f.write(json_results)
365 with open('{}opnfv-{}.json'
366 .format(RESULTS_DIR, test_name)) as json_file:
367 json_data = json.load(json_file)
369 """ parse JSON operation result """
371 if task_succeed(json_results):
372 logger.info('Test scenario: "{}" OK.'.format(test_name) + "\n")
375 logger.info('Test scenario: "{}" Failed.'.format(test_name) + "\n")
377 # Push results in payload of testcase
379 logger.debug("Push result into DB")
380 push_results_to_db("Rally_details", json_data, status)
387 if not (args.test_name in tests):
388 logger.error('argument not valid')
392 creds_nova = openstack_utils.get_credentials("nova")
393 nova_client = novaclient.Client('2', **creds_nova)
394 creds_neutron = openstack_utils.get_credentials("neutron")
395 neutron_client = neutronclient.Client(**creds_neutron)
396 creds_keystone = openstack_utils.get_credentials("keystone")
397 keystone_client = keystoneclient.Client(**creds_keystone)
398 glance_endpoint = keystone_client.service_catalog.url_for(
399 service_type='image', endpoint_type='publicURL')
400 glance_client = glanceclient.Client(1, glance_endpoint,
401 token=keystone_client.auth_token)
402 creds_cinder = openstack_utils.get_credentials("cinder")
403 cinder_client = cinderclient.Client('2', creds_cinder['username'],
404 creds_cinder['api_key'],
405 creds_cinder['project_id'],
406 creds_cinder['auth_url'],
407 service_type="volume")
409 client_dict['neutron'] = neutron_client
411 volume_types = openstack_utils.list_volume_types(cinder_client,
414 volume_type = openstack_utils.create_volume_type(
415 cinder_client, CINDER_VOLUME_TYPE_NAME)
417 logger.error("Failed to create volume type...")
420 logger.debug("Volume type '%s' created succesfully..."
421 % CINDER_VOLUME_TYPE_NAME)
423 logger.debug("Using existing volume type(s)...")
425 image_id = openstack_utils.get_image_id(glance_client, GLANCE_IMAGE_NAME)
429 logger.debug("Creating image '%s' from '%s'..." % (GLANCE_IMAGE_NAME,
431 image_id = openstack_utils.create_glance_image(glance_client,
435 logger.error("Failed to create the Glance image...")
438 logger.debug("Image '%s' with ID '%s' created succesfully ."
439 % (GLANCE_IMAGE_NAME, image_id))
441 logger.debug("Using existing image '%s' with ID '%s'..."
442 % (GLANCE_IMAGE_NAME, image_id))
445 logger.debug("Creating network '%s'..." % PRIVATE_NET_NAME)
446 network_dict = openstack_utils.create_network_full(logger,
447 client_dict['neutron'],
453 logger.error("Failed to create network...")
456 if not openstack_utils.update_neutron_net(client_dict['neutron'],
457 network_dict['net_id'],
459 logger.error("Failed to update network...")
462 logger.debug("Network '%s' available..." % PRIVATE_NET_NAME)
464 if args.test_name == "all":
465 for test_name in tests:
466 if not (test_name == 'all' or
470 logger.debug("Test name: " + args.test_name)
471 run_task(args.test_name)
476 " Rally Summary Report\n"
478 "+===================+============+===============+===========+"
480 "| Module | Duration | nb. Test Run | Success |"
482 "+===================+============+===============+===========+"
486 # for each scenario we draw a row for the table
491 name = "{0:<17}".format(s['test_name'])
492 duration = float(s['overall_duration'])
493 total_duration += duration
494 duration = time.strftime("%M:%S", time.gmtime(duration))
495 duration = "{0:<10}".format(duration)
496 nb_tests = "{0:<13}".format(s['nb_tests'])
497 total_nb_tests += int(s['nb_tests'])
498 success = "{0:<10}".format(str(s['success']) + '%')
499 total_success += float(s['success'])
501 "| " + name + " | " + duration + " | " +
502 nb_tests + " | " + success + "|\n" +
503 "+-------------------+------------"
504 "+---------------+-----------+\n")
505 payload.append({'module': name,
506 'details': {'duration': s['overall_duration'],
507 'nb tests': s['nb_tests'],
508 'success': s['success']}})
510 total_duration_str = time.strftime("%H:%M:%S", time.gmtime(total_duration))
511 total_duration_str2 = "{0:<10}".format(total_duration_str)
512 total_nb_tests_str = "{0:<13}".format(total_nb_tests)
513 total_success = "{:0.2f}".format(total_success / len(SUMMARY))
514 total_success_str = "{0:<10}".format(str(total_success) + '%')
515 report += "+===================+============+===============+===========+"
517 report += ("| TOTAL: | " + total_duration_str2 + " | " +
518 total_nb_tests_str + " | " + total_success_str + "|\n")
519 report += "+===================+============+===============+===========+"
522 logger.info("\n" + report)
523 payload.append({'summary': {'duration': total_duration,
524 'nb tests': total_nb_tests,
525 'nb success': total_success}})
527 # Generate json results for DB
528 # json_results = {"timestart": time_start, "duration": total_duration,
529 # "tests": int(total_nb_tests),
530 # "success": int(total_success)}
531 # logger.info("Results: "+str(json_results))
533 # Evaluation of the success criteria
535 # for Rally we decided that the overall success rate must be above 90%
536 if total_success >= 90:
540 logger.debug("Pushing Rally summary into DB...")
541 push_results_to_db("Rally", payload, status)
547 logger.debug("Deleting image '%s' with ID '%s'..."
548 % (GLANCE_IMAGE_NAME, image_id))
549 if not openstack_utils.delete_glance_image(nova_client, image_id):
550 logger.error("Error deleting the glance image")
553 logger.debug("Deleting volume type '%s'..."
554 % CINDER_VOLUME_TYPE_NAME)
555 if not openstack_utils.delete_volume_type(cinder_client, volume_type):
556 logger.error("Error in deleting volume type...")
559 if __name__ == '__main__':