3 # Copyright (c) 2015 Orange
4 # guyrodrigue.koffi@orange.com
5 # morgan.richomme@orange.com
6 # All rights reserved. This program and the accompanying materials
7 # are made available under the terms of the Apache License, Version 2.0
8 # which accompanies this distribution, and is available at
9 # http://www.apache.org/licenses/LICENSE-2.0
11 # 0.1 (05/2015) initial commit
12 # 0.2 (28/09/2015) extract Tempest, format json result, add ceilometer suite
13 # 0.3 (19/10/2015) remove Tempest from run_rally
14 # and push result into test DB
16 """ tests configuration """
25 import functest.utils.functest_logger as ft_logger
26 import functest.utils.functest_utils as functest_utils
27 import functest.utils.openstack_utils as os_utils
29 from functest.utils.functest_utils import FUNCTEST_REPO as REPO_PATH
31 tests = ['authenticate', 'glance', 'cinder', 'heat', 'keystone',
32 'neutron', 'nova', 'quotas', 'requests', 'vm', 'all']
33 parser = argparse.ArgumentParser()
34 parser.add_argument("test_name",
35 help="Module name to be tested. "
36 "Possible values are : "
37 "[ {d[0]} | {d[1]} | {d[2]} | {d[3]} | {d[4]} | "
38 "{d[5]} | {d[6]} | {d[7]} | {d[8]} | {d[9]} | "
41 "performs all possible test scenarios"
44 parser.add_argument("-d", "--debug", help="Debug mode", action="store_true")
45 parser.add_argument("-r", "--report",
46 help="Create json result file",
48 parser.add_argument("-s", "--smoke",
49 help="Smoke test mode",
51 parser.add_argument("-v", "--verbose",
52 help="Print verbose info about the progress",
54 parser.add_argument("-n", "--noclean",
55 help="Don't clean the created resources for this test.",
57 parser.add_argument("-z", "--sanity",
58 help="Sanity test mode, execute only a subset of tests",
61 args = parser.parse_args()
66 RALLY_STDERR = subprocess.STDOUT
68 RALLY_STDERR = open(os.devnull, 'w')
70 """ logging configuration """
71 logger = ft_logger.Logger("run_rally").getLogger()
74 functest_yaml = functest_utils.get_functest_yaml()
76 HOME = os.environ['HOME'] + "/"
77 RALLY_DIR = REPO_PATH + '/' + functest_yaml.get("general").get(
78 "directories").get("dir_rally")
79 TEMPLATE_DIR = RALLY_DIR + "scenario/templates"
80 SUPPORT_DIR = RALLY_DIR + "scenario/support"
82 FLAVOR_NAME = "m1.tiny"
85 ITERATIONS_AMOUNT = 10
88 RESULTS_DIR = functest_yaml.get("general").get("directories").get(
90 TEMPEST_CONF_FILE = functest_yaml.get("general").get("directories").get(
91 "dir_results") + '/tempest/tempest.conf'
92 TEST_DB = functest_yaml.get("results").get("test_db_url")
94 PRIVATE_NET_NAME = functest_yaml.get("rally").get("network_name")
95 PRIVATE_SUBNET_NAME = functest_yaml.get("rally").get("subnet_name")
96 PRIVATE_SUBNET_CIDR = functest_yaml.get("rally").get("subnet_cidr")
97 ROUTER_NAME = functest_yaml.get("rally").get("router_name")
99 GLANCE_IMAGE_NAME = functest_yaml.get("general").get("openstack").get(
101 GLANCE_IMAGE_FILENAME = functest_yaml.get("general").get("openstack").get(
103 GLANCE_IMAGE_FORMAT = functest_yaml.get("general").get("openstack").get(
105 GLANCE_IMAGE_PATH = functest_yaml.get("general").get("directories").get(
106 "dir_functest_data") + "/" + GLANCE_IMAGE_FILENAME
108 CINDER_VOLUME_TYPE_NAME = "volume_test"
112 neutron_client = None
115 def get_task_id(cmd_raw):
117 get task id from command rally result
119 :return: task_id as string
121 taskid_re = re.compile('^Task +(.*): started$')
122 for line in cmd_raw.splitlines(True):
124 match = taskid_re.match(line)
126 return match.group(1)
130 def task_succeed(json_raw):
132 Parse JSON from rally JSON results
136 rally_report = json.loads(json_raw)
137 for report in rally_report:
138 if report is None or report.get('result') is None:
141 for result in report.get('result'):
142 if result is None or len(result.get('error')) > 0:
148 def live_migration_supported():
149 config = iniparse.ConfigParser()
150 if (config.read(TEMPEST_CONF_FILE) and
151 config.has_section('compute-feature-enabled') and
152 config.has_option('compute-feature-enabled', 'live_migration')):
153 return config.getboolean('compute-feature-enabled', 'live_migration')
158 def build_task_args(test_file_name):
159 task_args = {'service_list': [test_file_name]}
160 task_args['image_name'] = GLANCE_IMAGE_NAME
161 task_args['flavor_name'] = FLAVOR_NAME
162 task_args['glance_image_location'] = GLANCE_IMAGE_PATH
163 task_args['tmpl_dir'] = TEMPLATE_DIR
164 task_args['sup_dir'] = SUPPORT_DIR
165 task_args['users_amount'] = USERS_AMOUNT
166 task_args['tenants_amount'] = TENANTS_AMOUNT
167 task_args['use_existing_users'] = False
168 task_args['iterations'] = ITERATIONS_AMOUNT
169 task_args['concurrency'] = CONCURRENCY
172 task_args['full_mode'] = False
173 task_args['smoke'] = True
175 task_args['full_mode'] = True
176 task_args['smoke'] = args.smoke
178 ext_net = os_utils.get_external_net(neutron_client)
180 task_args['floating_network'] = str(ext_net)
182 task_args['floating_network'] = ''
184 net_id = network_dict['net_id']
185 task_args['netid'] = str(net_id)
186 task_args['live_migration'] = live_migration_supported()
188 auth_url = os.getenv('OS_AUTH_URL')
189 if auth_url is not None:
190 task_args['request_url'] = auth_url.rsplit(":", 1)[0]
192 task_args['request_url'] = ''
197 def get_output(proc, test_name):
201 overall_duration = 0.0
205 while proc.poll() is None:
206 line = proc.stdout.readline()
210 if ("Load duration" in line or
212 "finished" in line or
213 " Preparing" in line or
217 elif "test scenario" in line:
218 result += "\n" + line
219 elif "Full duration" in line:
220 result += line + "\n\n"
222 # parse output for summary report
224 "| action" not in line and
225 "| Starting" not in line and
226 "| Completed" not in line and
227 "| ITER" not in line and
229 "| total" not in line):
231 elif "| total" in line:
232 percentage = ((line.split('|')[8]).strip(' ')).strip('%')
234 success += float(percentage)
236 logger.info('Percentage error: %s, %s' % (percentage, line))
238 elif "Full duration" in line:
239 duration = line.split(': ')[1]
241 overall_duration += float(duration)
243 logger.info('Duration error: %s, %s' % (duration, line))
245 overall_duration = "{:10.2f}".format(overall_duration)
249 success_avg = "{:0.2f}".format(success / nb_totals)
251 scenario_summary = {'test_name': test_name,
252 'overall_duration': overall_duration,
253 'nb_tests': nb_tests,
254 'success': success_avg}
255 SUMMARY.append(scenario_summary)
257 logger.debug("\n" + result)
262 def get_cmd_output(proc):
265 while proc.poll() is None:
266 line = proc.stdout.readline()
272 def run_task(test_name):
274 # the "main" function of the script who launch rally for a task
275 # :param test_name: name for the rally test
279 logger.info('Starting test scenario "{}" ...'.format(test_name))
280 start_time = time.time()
282 task_file = '{}task.yaml'.format(RALLY_DIR)
283 if not os.path.exists(task_file):
284 logger.error("Task file '%s' does not exist." % task_file)
287 test_file_name = '{}opnfv-{}.yaml'.format(RALLY_DIR + "scenario/",
289 if not os.path.exists(test_file_name):
290 logger.error("The scenario '%s' does not exist." % test_file_name)
293 logger.debug('Scenario fetched from : {}'.format(test_file_name))
295 cmd_line = ("rally task start --abort-on-sla-failure " +
296 "--task {} ".format(task_file) +
297 "--task-args \"{}\" ".format(build_task_args(test_name)))
298 logger.debug('running command line : {}'.format(cmd_line))
300 p = subprocess.Popen(cmd_line, stdout=subprocess.PIPE,
301 stderr=RALLY_STDERR, shell=True)
302 output = get_output(p, test_name)
303 task_id = get_task_id(output)
304 logger.debug('task_id : {}'.format(task_id))
307 logger.error('Failed to retrieve task_id, validating task...')
308 cmd_line = ("rally task validate " +
309 "--task {} ".format(task_file) +
310 "--task-args \"{}\" ".format(build_task_args(test_name)))
311 logger.debug('running command line : {}'.format(cmd_line))
312 p = subprocess.Popen(cmd_line, stdout=subprocess.PIPE,
313 stderr=subprocess.STDOUT, shell=True)
314 output = get_cmd_output(p)
315 logger.error("Task validation result:" + "\n" + output)
318 # check for result directory and create it otherwise
319 if not os.path.exists(RESULTS_DIR):
320 logger.debug('%s does not exist, we create it.'.format(RESULTS_DIR))
321 os.makedirs(RESULTS_DIR)
323 # write html report file
324 report_file_name = '{}opnfv-{}.html'.format(RESULTS_DIR, test_name)
325 cmd_line = "rally task report {} --out {}".format(task_id,
328 logger.debug('running command line : {}'.format(cmd_line))
331 # get and save rally operation JSON result
332 cmd_line = "rally task results %s" % task_id
333 logger.debug('running command line : {}'.format(cmd_line))
334 cmd = os.popen(cmd_line)
335 json_results = cmd.read()
336 with open('{}opnfv-{}.json'.format(RESULTS_DIR, test_name), 'w') as f:
337 logger.debug('saving json file')
338 f.write(json_results)
340 with open('{}opnfv-{}.json'
341 .format(RESULTS_DIR, test_name)) as json_file:
342 json_data = json.load(json_file)
344 """ parse JSON operation result """
346 if task_succeed(json_results):
347 logger.info('Test scenario: "{}" OK.'.format(test_name) + "\n")
350 logger.info('Test scenario: "{}" Failed.'.format(test_name) + "\n")
352 # Push results in payload of testcase
354 stop_time = time.time()
355 logger.debug("Push Rally detailed results into DB")
356 functest_utils.push_results_to_db("functest",
367 global neutron_client
369 nova_client = os_utils.get_nova_client()
370 neutron_client = os_utils.get_neutron_client()
371 cinder_client = os_utils.get_cinder_client()
373 start_time = time.time()
376 if not (args.test_name in tests):
377 logger.error('argument not valid')
382 volume_types = os_utils.list_volume_types(cinder_client,
385 volume_type = os_utils.create_volume_type(
386 cinder_client, CINDER_VOLUME_TYPE_NAME)
388 logger.error("Failed to create volume type...")
391 logger.debug("Volume type '%s' created succesfully..."
392 % CINDER_VOLUME_TYPE_NAME)
394 logger.debug("Using existing volume type(s)...")
396 image_exists, image_id = os_utils.get_or_create_image(GLANCE_IMAGE_NAME,
402 logger.debug("Creating network '%s'..." % PRIVATE_NET_NAME)
403 network_dict = os_utils.create_shared_network_full(PRIVATE_NET_NAME,
410 if args.test_name == "all":
411 for test_name in tests:
412 if not (test_name == 'all' or
416 logger.debug("Test name: " + args.test_name)
417 run_task(args.test_name)
422 " Rally Summary Report\n"
424 "+===================+============+===============+===========+"
426 "| Module | Duration | nb. Test Run | Success |"
428 "+===================+============+===============+===========+"
431 stop_time = time.time()
433 # for each scenario we draw a row for the table
438 name = "{0:<17}".format(s['test_name'])
439 duration = float(s['overall_duration'])
440 total_duration += duration
441 duration = time.strftime("%M:%S", time.gmtime(duration))
442 duration = "{0:<10}".format(duration)
443 nb_tests = "{0:<13}".format(s['nb_tests'])
444 total_nb_tests += int(s['nb_tests'])
445 success = "{0:<10}".format(str(s['success']) + '%')
446 total_success += float(s['success'])
448 "| " + name + " | " + duration + " | " +
449 nb_tests + " | " + success + "|\n" +
450 "+-------------------+------------"
451 "+---------------+-----------+\n")
452 payload.append({'module': name,
453 'details': {'duration': s['overall_duration'],
454 'nb tests': s['nb_tests'],
455 'success': s['success']}})
457 total_duration_str = time.strftime("%H:%M:%S", time.gmtime(total_duration))
458 total_duration_str2 = "{0:<10}".format(total_duration_str)
459 total_nb_tests_str = "{0:<13}".format(total_nb_tests)
460 success_rate = "{:0.2f}".format(total_success / len(SUMMARY))
461 success_rate_str = "{0:<10}".format(str(success_rate) + '%')
462 report += "+===================+============+===============+===========+"
464 report += ("| TOTAL: | " + total_duration_str2 + " | " +
465 total_nb_tests_str + " | " + success_rate_str + "|\n")
466 report += "+===================+============+===============+===========+"
469 logger.info("\n" + report)
470 payload.append({'summary': {'duration': total_duration,
471 'nb tests': total_nb_tests,
472 'nb success': success_rate}})
475 case_name = "rally_sanity"
477 case_name = "rally_full"
479 # Evaluation of the success criteria
480 status = functest_utils.check_success_rate(case_name, success_rate)
487 logger.debug("Pushing Rally summary into DB...")
488 functest_utils.push_results_to_db("functest",
498 logger.debug("Deleting image '%s' with ID '%s'..."
499 % (GLANCE_IMAGE_NAME, image_id))
500 if not os_utils.delete_glance_image(nova_client, image_id):
501 logger.error("Error deleting the glance image")
504 logger.debug("Deleting volume type '%s'..."
505 % CINDER_VOLUME_TYPE_NAME)
506 if not os_utils.delete_volume_type(cinder_client, volume_type):
507 logger.error("Error in deleting volume type...")
512 if __name__ == '__main__':