3 # Copyright (c) 2015 Orange
4 # guyrodrigue.koffi@orange.com
5 # morgan.richomme@orange.com
6 # All rights reserved. This program and the accompanying materials
7 # are made available under the terms of the Apache License, Version 2.0
8 # which accompanies this distribution, and is available at
9 # http://www.apache.org/licenses/LICENSE-2.0
11 # 0.1 (05/2015) initial commit
12 # 0.2 (28/09/2015) extract Tempest, format json result, add ceilometer suite
13 # 0.3 (19/10/2015) remove Tempest from run_rally
14 # and push result into test DB
26 from novaclient import client as novaclient
27 from glanceclient import client as glanceclient
28 from keystoneclient.v2_0 import client as keystoneclient
29 from neutronclient.v2_0 import client as neutronclient
30 from cinderclient import client as cinderclient
32 import functest.utils.functest_logger as ft_logger
33 import functest.utils.functest_utils as functest_utils
34 import functest.utils.openstack_utils as openstack_utils
36 """ tests configuration """
37 tests = ['authenticate', 'glance', 'cinder', 'heat', 'keystone',
38 'neutron', 'nova', 'quotas', 'requests', 'vm', 'all']
39 parser = argparse.ArgumentParser()
40 parser.add_argument("test_name",
41 help="Module name to be tested. "
42 "Possible values are : "
43 "[ {d[0]} | {d[1]} | {d[2]} | {d[3]} | {d[4]} | "
44 "{d[5]} | {d[6]} | {d[7]} | {d[8]} | {d[9]} | "
47 "performs all possible test scenarios"
50 parser.add_argument("-d", "--debug", help="Debug mode", action="store_true")
51 parser.add_argument("-r", "--report",
52 help="Create json result file",
54 parser.add_argument("-s", "--smoke",
55 help="Smoke test mode",
57 parser.add_argument("-v", "--verbose",
58 help="Print verbose info about the progress",
60 parser.add_argument("-n", "--noclean",
61 help="Don't clean the created resources for this test.",
63 parser.add_argument("-z", "--sanity",
64 help="Sanity test mode, execute only a subset of tests",
67 args = parser.parse_args()
72 RALLY_STDERR = subprocess.STDOUT
74 RALLY_STDERR = open(os.devnull, 'w')
76 """ logging configuration """
77 logger = ft_logger.Logger("run_rally").getLogger()
79 REPO_PATH = os.environ['repos_dir'] + '/functest/'
80 if not os.path.exists(REPO_PATH):
81 logger.error("Functest repository directory not found '%s'" % REPO_PATH)
85 with open("/home/opnfv/functest/conf/config_functest.yaml") as f:
86 functest_yaml = yaml.safe_load(f)
89 HOME = os.environ['HOME'] + "/"
90 SCENARIOS_DIR = REPO_PATH + functest_yaml.get("general").get(
91 "directories").get("dir_rally_scn")
92 TEMPLATE_DIR = SCENARIOS_DIR + "scenario/templates"
93 SUPPORT_DIR = SCENARIOS_DIR + "scenario/support"
95 FLAVOR_NAME = "m1.tiny"
98 ITERATIONS_AMOUNT = 10
101 RESULTS_DIR = functest_yaml.get("general").get("directories").get(
103 TEMPEST_CONF_FILE = functest_yaml.get("general").get("directories").get(
104 "dir_results") + '/tempest/tempest.conf'
105 TEST_DB = functest_yaml.get("results").get("test_db_url")
106 PRIVATE_NETWORK = functest_yaml.get("general").get("openstack").get(
107 "neutron_private_net_name")
109 GLANCE_IMAGE_NAME = functest_yaml.get("general").get("openstack").get(
111 GLANCE_IMAGE_FILENAME = functest_yaml.get("general").get("openstack").get(
113 GLANCE_IMAGE_FORMAT = functest_yaml.get("general").get("openstack").get(
115 GLANCE_IMAGE_PATH = functest_yaml.get("general").get("directories").get(
116 "dir_functest_data") + "/" + GLANCE_IMAGE_FILENAME
118 CINDER_VOLUME_TYPE_NAME = "volume_test"
124 def push_results_to_db(case, payload, criteria):
126 url = TEST_DB + "/results"
127 installer = functest_utils.get_installer_type(logger)
128 scenario = functest_utils.get_scenario(logger)
129 version = functest_utils.get_version(logger)
130 pod_name = functest_utils.get_pod_name(logger)
132 # evalutate success criteria
134 params = {"project_name": "functest", "case_name": case,
135 "pod_name": pod_name, "installer": installer,
136 "version": version, "scenario": scenario,
137 "criteria": criteria, "details": payload}
139 headers = {'Content-Type': 'application/json'}
140 r = requests.post(url, data=json.dumps(params), headers=headers)
144 def get_task_id(cmd_raw):
146 get task id from command rally result
148 :return: task_id as string
150 taskid_re = re.compile('^Task +(.*): started$')
151 for line in cmd_raw.splitlines(True):
153 match = taskid_re.match(line)
155 return match.group(1)
159 def task_succeed(json_raw):
161 Parse JSON from rally JSON results
165 rally_report = json.loads(json_raw)
166 for report in rally_report:
167 if report is None or report.get('result') is None:
170 for result in report.get('result'):
171 if result is None or len(result.get('error')) > 0:
177 def live_migration_supported():
178 config = iniparse.ConfigParser()
179 if (config.read(TEMPEST_CONF_FILE) and
180 config.has_section('compute-feature-enabled') and
181 config.has_option('compute-feature-enabled', 'live_migration')):
182 return config.getboolean('compute-feature-enabled', 'live_migration')
187 def build_task_args(test_file_name):
188 task_args = {'service_list': [test_file_name]}
189 task_args['image_name'] = GLANCE_IMAGE_NAME
190 task_args['flavor_name'] = FLAVOR_NAME
191 task_args['glance_image_location'] = GLANCE_IMAGE_PATH
192 task_args['tmpl_dir'] = TEMPLATE_DIR
193 task_args['sup_dir'] = SUPPORT_DIR
194 task_args['users_amount'] = USERS_AMOUNT
195 task_args['tenants_amount'] = TENANTS_AMOUNT
196 task_args['iterations'] = ITERATIONS_AMOUNT
197 task_args['concurrency'] = CONCURRENCY
200 task_args['full_mode'] = False
201 task_args['smoke'] = True
203 task_args['full_mode'] = True
204 task_args['smoke'] = args.smoke
206 ext_net = openstack_utils.get_external_net(client_dict['neutron'])
208 task_args['floating_network'] = str(ext_net)
210 task_args['floating_network'] = ''
212 net_id = openstack_utils.get_network_id(client_dict['neutron'],
214 task_args['netid'] = str(net_id)
215 task_args['live_migration'] = live_migration_supported()
220 def get_output(proc, test_name):
224 overall_duration = 0.0
228 while proc.poll() is None:
229 line = proc.stdout.readline()
233 if ("Load duration" in line or
235 "finished" in line or
236 " Preparing" in line or
240 elif "test scenario" in line:
241 result += "\n" + line
242 elif "Full duration" in line:
243 result += line + "\n\n"
245 # parse output for summary report
247 "| action" not in line and
248 "| Starting" not in line and
249 "| Completed" not in line and
250 "| ITER" not in line and
252 "| total" not in line):
254 elif "| total" in line:
255 percentage = ((line.split('|')[8]).strip(' ')).strip('%')
257 success += float(percentage)
259 logger.info('Percentage error: %s, %s' % (percentage, line))
261 elif "Full duration" in line:
262 duration = line.split(': ')[1]
264 overall_duration += float(duration)
266 logger.info('Duration error: %s, %s' % (duration, line))
268 overall_duration = "{:10.2f}".format(overall_duration)
272 success_avg = "{:0.2f}".format(success / nb_totals)
274 scenario_summary = {'test_name': test_name,
275 'overall_duration': overall_duration,
276 'nb_tests': nb_tests,
277 'success': success_avg}
278 SUMMARY.append(scenario_summary)
280 logger.info("\n" + result)
285 def get_cmd_output(proc):
288 while proc.poll() is None:
289 line = proc.stdout.readline()
295 def run_task(test_name):
297 # the "main" function of the script who launch rally for a task
298 # :param test_name: name for the rally test
302 logger.info('Starting test scenario "{}" ...'.format(test_name))
304 task_file = '{}task.yaml'.format(SCENARIOS_DIR)
305 if not os.path.exists(task_file):
306 logger.error("Task file '%s' does not exist." % task_file)
309 test_file_name = '{}opnfv-{}.yaml'.format(SCENARIOS_DIR + "scenario/",
311 if not os.path.exists(test_file_name):
312 logger.error("The scenario '%s' does not exist." % test_file_name)
315 logger.debug('Scenario fetched from : {}'.format(test_file_name))
317 cmd_line = ("rally task start --abort-on-sla-failure " +
318 "--task {} ".format(task_file) +
319 "--task-args \"{}\" ".format(build_task_args(test_name)))
320 logger.debug('running command line : {}'.format(cmd_line))
322 p = subprocess.Popen(cmd_line, stdout=subprocess.PIPE,
323 stderr=RALLY_STDERR, shell=True)
324 output = get_output(p, test_name)
325 task_id = get_task_id(output)
326 logger.debug('task_id : {}'.format(task_id))
329 logger.error('Failed to retrieve task_id, validating task...')
330 cmd_line = ("rally task validate " +
331 "--task {} ".format(task_file) +
332 "--task-args \"{}\" ".format(build_task_args(test_name)))
333 logger.debug('running command line : {}'.format(cmd_line))
334 p = subprocess.Popen(cmd_line, stdout=subprocess.PIPE,
335 stderr=subprocess.STDOUT, shell=True)
336 output = get_cmd_output(p)
337 logger.error("Task validation result:" + "\n" + output)
340 # check for result directory and create it otherwise
341 if not os.path.exists(RESULTS_DIR):
342 logger.debug('%s does not exist, we create it.'.format(RESULTS_DIR))
343 os.makedirs(RESULTS_DIR)
345 # write html report file
346 report_file_name = '{}opnfv-{}.html'.format(RESULTS_DIR, test_name)
347 cmd_line = "rally task report {} --out {}".format(task_id,
350 logger.debug('running command line : {}'.format(cmd_line))
353 # get and save rally operation JSON result
354 cmd_line = "rally task results %s" % task_id
355 logger.debug('running command line : {}'.format(cmd_line))
356 cmd = os.popen(cmd_line)
357 json_results = cmd.read()
358 with open('{}opnfv-{}.json'.format(RESULTS_DIR, test_name), 'w') as f:
359 logger.debug('saving json file')
360 f.write(json_results)
362 with open('{}opnfv-{}.json'
363 .format(RESULTS_DIR, test_name)) as json_file:
364 json_data = json.load(json_file)
366 """ parse JSON operation result """
368 if task_succeed(json_results):
369 logger.info('Test scenario: "{}" OK.'.format(test_name) + "\n")
372 logger.info('Test scenario: "{}" Failed.'.format(test_name) + "\n")
374 # Push results in payload of testcase
376 logger.debug("Push result into DB")
377 push_results_to_db("Rally_details", json_data, status)
383 if not (args.test_name in tests):
384 logger.error('argument not valid')
388 creds_nova = openstack_utils.get_credentials("nova")
389 nova_client = novaclient.Client('2', **creds_nova)
390 creds_neutron = openstack_utils.get_credentials("neutron")
391 neutron_client = neutronclient.Client(**creds_neutron)
392 creds_keystone = openstack_utils.get_credentials("keystone")
393 keystone_client = keystoneclient.Client(**creds_keystone)
394 glance_endpoint = keystone_client.service_catalog.url_for(
395 service_type='image', endpoint_type='publicURL')
396 glance_client = glanceclient.Client(1, glance_endpoint,
397 token=keystone_client.auth_token)
398 creds_cinder = openstack_utils.get_credentials("cinder")
399 cinder_client = cinderclient.Client('2', creds_cinder['username'],
400 creds_cinder['api_key'],
401 creds_cinder['project_id'],
402 creds_cinder['auth_url'],
403 service_type="volume")
405 client_dict['neutron'] = neutron_client
407 volume_types = openstack_utils.list_volume_types(cinder_client,
410 volume_type = openstack_utils.create_volume_type(
411 cinder_client, CINDER_VOLUME_TYPE_NAME)
413 logger.error("Failed to create volume type...")
416 logger.debug("Volume type '%s' created succesfully..."
417 % CINDER_VOLUME_TYPE_NAME)
419 logger.debug("Using existing volume type(s)...")
421 image_id = openstack_utils.get_image_id(glance_client, GLANCE_IMAGE_NAME)
425 logger.debug("Creating image '%s' from '%s'..." % (GLANCE_IMAGE_NAME,
427 image_id = openstack_utils.create_glance_image(glance_client,
431 logger.error("Failed to create the Glance image...")
434 logger.debug("Image '%s' with ID '%s' created succesfully ."
435 % (GLANCE_IMAGE_NAME, image_id))
437 logger.debug("Using existing image '%s' with ID '%s'..."
438 % (GLANCE_IMAGE_NAME, image_id))
441 if args.test_name == "all":
442 for test_name in tests:
443 if not (test_name == 'all' or
447 logger.debug("Test name: " + args.test_name)
448 run_task(args.test_name)
453 " Rally Summary Report\n"
455 "+===================+============+===============+===========+"
457 "| Module | Duration | nb. Test Run | Success |"
459 "+===================+============+===============+===========+"
463 # for each scenario we draw a row for the table
468 name = "{0:<17}".format(s['test_name'])
469 duration = float(s['overall_duration'])
470 total_duration += duration
471 duration = time.strftime("%M:%S", time.gmtime(duration))
472 duration = "{0:<10}".format(duration)
473 nb_tests = "{0:<13}".format(s['nb_tests'])
474 total_nb_tests += int(s['nb_tests'])
475 success = "{0:<10}".format(str(s['success']) + '%')
476 total_success += float(s['success'])
478 "| " + name + " | " + duration + " | " +
479 nb_tests + " | " + success + "|\n" +
480 "+-------------------+------------"
481 "+---------------+-----------+\n")
482 payload.append({'module': name,
483 'details': {'duration': s['overall_duration'],
484 'nb tests': s['nb_tests'],
485 'success': s['success']}})
487 total_duration_str = time.strftime("%H:%M:%S", time.gmtime(total_duration))
488 total_duration_str2 = "{0:<10}".format(total_duration_str)
489 total_nb_tests_str = "{0:<13}".format(total_nb_tests)
490 total_success = "{:0.2f}".format(total_success / len(SUMMARY))
491 total_success_str = "{0:<10}".format(str(total_success) + '%')
492 report += "+===================+============+===============+===========+"
494 report += ("| TOTAL: | " + total_duration_str2 + " | " +
495 total_nb_tests_str + " | " + total_success_str + "|\n")
496 report += "+===================+============+===============+===========+"
499 logger.info("\n" + report)
500 payload.append({'summary': {'duration': total_duration,
501 'nb tests': total_nb_tests,
502 'nb success': total_success}})
504 # Generate json results for DB
505 # json_results = {"timestart": time_start, "duration": total_duration,
506 # "tests": int(total_nb_tests),
507 # "success": int(total_success)}
508 # logger.info("Results: "+str(json_results))
510 # Evaluation of the success criteria
512 # for Rally we decided that the overall success rate must be above 90%
513 if total_success >= 90:
517 logger.debug("Pushing Rally summary into DB...")
518 push_results_to_db("Rally", payload, status)
524 logger.debug("Deleting image '%s' with ID '%s'..."
525 % (GLANCE_IMAGE_NAME, image_id))
526 if not openstack_utils.delete_glance_image(nova_client, image_id):
527 logger.error("Error deleting the glance image")
530 logger.debug("Deleting volume type '%s'..."
531 % CINDER_VOLUME_TYPE_NAME)
532 if not openstack_utils.delete_volume_type(cinder_client, volume_type):
533 logger.error("Error in deleting volume type...")
536 if __name__ == '__main__':