3 # Copyright (c) 2015 Orange
4 # guyrodrigue.koffi@orange.com
5 # morgan.richomme@orange.com
6 # All rights reserved. This program and the accompanying materials
7 # are made available under the terms of the Apache License, Version 2.0
8 # which accompanies this distribution, and is available at
9 # http://www.apache.org/licenses/LICENSE-2.0
11 # 0.1 (05/2015) initial commit
12 # 0.2 (28/09/2015) extract Tempest, format json result, add ceilometer suite
13 # 0.3 (19/10/2015) remove Tempest from run_rally
14 # and push result into test DB
28 from novaclient import client as novaclient
29 from glanceclient import client as glanceclient
30 from keystoneclient.v2_0 import client as keystoneclient
31 from neutronclient.v2_0 import client as neutronclient
32 from cinderclient import client as cinderclient
34 """ tests configuration """
35 tests = ['authenticate', 'glance', 'cinder', 'heat', 'keystone',
36 'neutron', 'nova', 'quotas', 'requests', 'vm', 'all']
37 parser = argparse.ArgumentParser()
38 parser.add_argument("test_name",
39 help="Module name to be tested. "
40 "Possible values are : "
41 "[ {d[0]} | {d[1]} | {d[2]} | {d[3]} | {d[4]} | "
42 "{d[5]} | {d[6]} | {d[7]} | {d[8]} | {d[9]} | "
45 "performs all possible test scenarios"
48 parser.add_argument("-d", "--debug", help="Debug mode", action="store_true")
49 parser.add_argument("-r", "--report",
50 help="Create json result file",
52 parser.add_argument("-s", "--smoke",
53 help="Smoke test mode",
55 parser.add_argument("-v", "--verbose",
56 help="Print verbose info about the progress",
58 parser.add_argument("-n", "--noclean",
59 help="Don't clean the created resources for this test.",
62 args = parser.parse_args()
67 RALLY_STDERR = subprocess.STDOUT
69 RALLY_STDERR = open(os.devnull, 'w')
71 """ logging configuration """
72 logger = logging.getLogger("run_rally")
73 logger.setLevel(logging.DEBUG)
75 ch = logging.StreamHandler()
77 ch.setLevel(logging.DEBUG)
79 ch.setLevel(logging.INFO)
81 formatter = logging.Formatter("%(asctime)s - %(name)s - "
82 "%(levelname)s - %(message)s")
83 ch.setFormatter(formatter)
86 REPO_PATH = os.environ['repos_dir']+'/functest/'
87 if not os.path.exists(REPO_PATH):
88 logger.error("Functest repository directory not found '%s'" % REPO_PATH)
90 sys.path.append(REPO_PATH + "testcases/")
93 with open("/home/opnfv/functest/conf/config_functest.yaml") as f:
94 functest_yaml = yaml.safe_load(f)
97 HOME = os.environ['HOME']+"/"
99 # SCENARIOS_DIR = REPO_PATH + functest_yaml.get("general"). \
100 # get("directories").get("dir_rally_scn")
101 SCENARIOS_DIR = REPO_PATH + "testcases/VIM/OpenStack/CI/rally_cert/"
103 TEMPLATE_DIR = SCENARIOS_DIR + "scenario/templates"
104 SUPPORT_DIR = SCENARIOS_DIR + "scenario/support"
106 FLAVOR_NAME = "m1.tiny"
109 ITERATIONS_AMOUNT = 10
113 RESULTS_DIR = functest_yaml.get("general").get("directories"). \
115 TEMPEST_CONF_FILE = functest_yaml.get("general").get("directories"). \
116 get("dir_results") + '/tempest/tempest.conf'
117 TEST_DB = functest_yaml.get("results").get("test_db_url")
118 PRIVATE_NETWORK = functest_yaml.get("general"). \
119 get("openstack").get("neutron_private_net_name")
121 GLANCE_IMAGE_NAME = functest_yaml.get("general"). \
122 get("openstack").get("image_name")
123 GLANCE_IMAGE_FILENAME = functest_yaml.get("general"). \
124 get("openstack").get("image_file_name")
125 GLANCE_IMAGE_FORMAT = functest_yaml.get("general"). \
126 get("openstack").get("image_disk_format")
127 GLANCE_IMAGE_PATH = functest_yaml.get("general"). \
128 get("directories").get("dir_functest_data") + "/" + GLANCE_IMAGE_FILENAME
130 CINDER_VOLUME_TYPE_NAME = "volume_test"
136 def push_results_to_db(case, payload):
138 url = TEST_DB + "/results"
139 installer = functest_utils.get_installer_type(logger)
140 scenario = functest_utils.get_scenario(logger)
141 pod_name = functest_utils.get_pod_name(logger)
142 # TODO pod_name hardcoded, info shall come from Jenkins
143 params = {"project_name": "functest", "case_name": case,
144 "pod_name": pod_name, "installer": installer,
145 "version": scenario, "details": payload}
147 headers = {'Content-Type': 'application/json'}
148 r = requests.post(url, data=json.dumps(params), headers=headers)
152 def get_task_id(cmd_raw):
154 get task id from command rally result
156 :return: task_id as string
158 taskid_re = re.compile('^Task +(.*): started$')
159 for line in cmd_raw.splitlines(True):
161 match = taskid_re.match(line)
163 return match.group(1)
167 def task_succeed(json_raw):
169 Parse JSON from rally JSON results
173 rally_report = json.loads(json_raw)
174 for report in rally_report:
175 if report is None or report.get('result') is None:
178 for result in report.get('result'):
179 if result is None or len(result.get('error')) > 0:
185 def live_migration_supported():
186 config = iniparse.ConfigParser()
187 if config.read(TEMPEST_CONF_FILE) and \
188 config.has_section('compute-feature-enabled') and \
189 config.has_option('compute-feature-enabled', 'live_migration'):
190 return config.getboolean('compute-feature-enabled', 'live_migration')
195 def build_task_args(test_file_name):
196 task_args = {'service_list': [test_file_name]}
197 task_args['smoke'] = args.smoke
198 task_args['image_name'] = GLANCE_IMAGE_NAME
199 task_args['flavor_name'] = FLAVOR_NAME
200 task_args['glance_image_location'] = GLANCE_IMAGE_PATH
201 task_args['tmpl_dir'] = TEMPLATE_DIR
202 task_args['sup_dir'] = SUPPORT_DIR
203 task_args['users_amount'] = USERS_AMOUNT
204 task_args['tenants_amount'] = TENANTS_AMOUNT
205 task_args['iterations'] = ITERATIONS_AMOUNT
206 task_args['concurrency'] = CONCURRENCY
208 ext_net = functest_utils.get_external_net(client_dict['neutron'])
210 task_args['floating_network'] = str(ext_net)
212 task_args['floating_network'] = ''
214 net_id = functest_utils.get_network_id(client_dict['neutron'],
216 task_args['netid'] = str(net_id)
217 task_args['live_migration'] = live_migration_supported()
222 def get_output(proc, test_name):
226 overall_duration = 0.0
230 while proc.poll() is None:
231 line = proc.stdout.readline()
235 if "Load duration" in line or \
236 "started" in line or \
237 "finished" in line or \
238 " Preparing" in line or \
242 elif "test scenario" in line:
243 result += "\n" + line
244 elif "Full duration" in line:
245 result += line + "\n\n"
247 # parse output for summary report
248 if "| " in line and \
249 "| action" not in line and \
250 "| Starting" not in line and \
251 "| Completed" not in line and \
252 "| ITER" not in line and \
253 "| " not in line and \
254 "| total" not in line:
256 elif "| total" in line:
257 percentage = ((line.split('|')[8]).strip(' ')).strip('%')
258 success += float(percentage)
260 elif "Full duration" in line:
261 overall_duration += float(line.split(': ')[1])
263 overall_duration="{:10.2f}".format(overall_duration)
267 success_avg = "{:0.2f}".format(success / nb_totals)
269 scenario_summary = {'test_name': test_name,
270 'overall_duration': overall_duration,
271 'nb_tests': nb_tests,
272 'success': success_avg}
273 SUMMARY.append(scenario_summary)
275 logger.info("\n" + result)
280 def run_task(test_name):
282 # the "main" function of the script who launch rally for a task
283 # :param test_name: name for the rally test
287 logger.info('Starting test scenario "{}" ...'.format(test_name))
289 task_file = '{}task.yaml'.format(SCENARIOS_DIR)
290 if not os.path.exists(task_file):
291 logger.error("Task file '%s' does not exist." % task_file)
294 test_file_name = '{}opnfv-{}.yaml'.format(SCENARIOS_DIR + "scenario/",
296 if not os.path.exists(test_file_name):
297 logger.error("The scenario '%s' does not exist." % test_file_name)
300 logger.debug('Scenario fetched from : {}'.format(test_file_name))
302 cmd_line = "rally task start --abort-on-sla-failure " + \
303 "--task {} ".format(task_file) + \
304 "--task-args \"{}\" ".format(build_task_args(test_name))
305 logger.debug('running command line : {}'.format(cmd_line))
307 p = subprocess.Popen(cmd_line, stdout=subprocess.PIPE,
308 stderr=RALLY_STDERR, shell=True)
309 output = get_output(p, test_name)
310 task_id = get_task_id(output)
311 logger.debug('task_id : {}'.format(task_id))
314 logger.error("Failed to retrieve task_id.")
317 # check for result directory and create it otherwise
318 if not os.path.exists(RESULTS_DIR):
319 logger.debug('%s does not exist, we create it.'.format(RESULTS_DIR))
320 os.makedirs(RESULTS_DIR)
322 # write html report file
323 report_file_name = '{}opnfv-{}.html'.format(RESULTS_DIR, test_name)
324 cmd_line = "rally task report {} --out {}".format(task_id,
327 logger.debug('running command line : {}'.format(cmd_line))
330 # get and save rally operation JSON result
331 cmd_line = "rally task results %s" % task_id
332 logger.debug('running command line : {}'.format(cmd_line))
333 cmd = os.popen(cmd_line)
334 json_results = cmd.read()
335 with open('{}opnfv-{}.json'.format(RESULTS_DIR, test_name), 'w') as f:
336 logger.debug('saving json file')
337 f.write(json_results)
339 with open('{}opnfv-{}.json'
340 .format(RESULTS_DIR, test_name)) as json_file:
341 json_data = json.load(json_file)
343 # Push results in payload of testcase
345 logger.debug("Push result into DB")
346 push_results_to_db("Rally_details", json_data)
348 """ parse JSON operation result """
349 if task_succeed(json_results):
350 logger.info('Test scenario: "{}" OK.'.format(test_name) + "\n")
352 logger.info('Test scenario: "{}" Failed.'.format(test_name) + "\n")
358 if not (args.test_name in tests):
359 logger.error('argument not valid')
363 creds_nova = functest_utils.get_credentials("nova")
364 nova_client = novaclient.Client('2', **creds_nova)
365 creds_neutron = functest_utils.get_credentials("neutron")
366 neutron_client = neutronclient.Client(**creds_neutron)
367 creds_keystone = functest_utils.get_credentials("keystone")
368 keystone_client = keystoneclient.Client(**creds_keystone)
369 glance_endpoint = keystone_client.service_catalog.url_for(service_type='image',
370 endpoint_type='publicURL')
371 glance_client = glanceclient.Client(1, glance_endpoint,
372 token=keystone_client.auth_token)
373 creds_cinder = functest_utils.get_credentials("cinder")
374 cinder_client = cinderclient.Client('2', creds_cinder['username'],
375 creds_cinder['api_key'],
376 creds_cinder['project_id'],
377 creds_cinder['auth_url'],
378 service_type="volume")
380 client_dict['neutron'] = neutron_client
382 volume_types = functest_utils.list_volume_types(cinder_client,
385 volume_type = functest_utils.create_volume_type(cinder_client,
386 CINDER_VOLUME_TYPE_NAME)
388 logger.error("Failed to create volume type...")
391 logger.debug("Volume type '%s' created succesfully..." \
392 % CINDER_VOLUME_TYPE_NAME)
394 logger.debug("Using existing volume type(s)...")
396 image_id = functest_utils.get_image_id(glance_client, GLANCE_IMAGE_NAME)
399 logger.debug("Creating image '%s' from '%s'..." % (GLANCE_IMAGE_NAME,
401 image_id = functest_utils.create_glance_image(glance_client,
405 logger.error("Failed to create the Glance image...")
408 logger.debug("Image '%s' with ID '%s' created succesfully ." \
409 % (GLANCE_IMAGE_NAME, image_id))
411 logger.debug("Using existing image '%s' with ID '%s'..." \
412 % (GLANCE_IMAGE_NAME, image_id))
414 if args.test_name == "all":
415 for test_name in tests:
416 if not (test_name == 'all' or
420 logger.debug("Test name: " + args.test_name)
421 run_task(args.test_name)
425 " Rally Summary Report\n"\
426 "+===================+============+===============+===========+\n"\
427 "| Module | Duration | nb. Test Run | Success |\n"\
428 "+===================+============+===============+===========+\n"
431 #for each scenario we draw a row for the table
436 name = "{0:<17}".format(s['test_name'])
437 duration = float(s['overall_duration'])
438 total_duration += duration
439 duration = time.strftime("%M:%S", time.gmtime(duration))
440 duration = "{0:<10}".format(duration)
441 nb_tests = "{0:<13}".format(s['nb_tests'])
442 total_nb_tests += int(s['nb_tests'])
443 success = "{0:<10}".format(str(s['success'])+'%')
444 total_success += float(s['success'])
446 "| " + name + " | " + duration + " | " + nb_tests + " | " + success + "|\n"\
447 "+-------------------+------------+---------------+-----------+\n"
448 payload.append({'module': name,
449 'details': {'duration': s['overall_duration'],
450 'nb tests': s['nb_tests'],
451 'success': s['success']}})
453 total_duration_str = time.strftime("%H:%M:%S", time.gmtime(total_duration))
454 total_duration_str2 = "{0:<10}".format(total_duration_str)
455 total_nb_tests_str = "{0:<13}".format(total_nb_tests)
456 total_success = "{:0.2f}".format(total_success / len(SUMMARY))
457 total_success_str = "{0:<10}".format(str(total_success)+'%')
458 report += "+===================+============+===============+===========+\n"
459 report += "| TOTAL: | " + total_duration_str2 + " | " + \
460 total_nb_tests_str + " | " + total_success_str + "|\n"
461 report += "+===================+============+===============+===========+\n"
463 logger.info("\n"+report)
464 payload.append({'summary': {'duration': total_duration,
465 'nb tests': total_nb_tests,
466 'nb success': total_success}})
468 # Generate json results for DB
469 #json_results = {"timestart": time_start, "duration": total_duration,
470 # "tests": int(total_nb_tests), "success": int(total_success)}
471 #logger.info("Results: "+str(json_results))
474 logger.debug("Pushing Rally summary into DB...")
475 push_results_to_db("Rally", payload)
480 logger.debug("Deleting image '%s' with ID '%s'..." \
481 % (GLANCE_IMAGE_NAME, image_id))
482 if not functest_utils.delete_glance_image(nova_client, image_id):
483 logger.error("Error deleting the glance image")
486 logger.debug("Deleting volume type '%s'..." \
487 % CINDER_VOLUME_TYPE_NAME)
488 if not functest_utils.delete_volume_type(cinder_client, volume_type):
489 logger.error("Error in deleting volume type...")
492 if __name__ == '__main__':