3 # Copyright (c) 2015 Orange
4 # guyrodrigue.koffi@orange.com
5 # morgan.richomme@orange.com
6 # All rights reserved. This program and the accompanying materials
7 # are made available under the terms of the Apache License, Version 2.0
8 # which accompanies this distribution, and is available at
9 # http://www.apache.org/licenses/LICENSE-2.0
11 # 0.1 (05/2015) initial commit
12 # 0.2 (28/09/2015) extract Tempest, format json result, add ceilometer suite
13 # 0.3 (19/10/2015) remove Tempest from run_rally
14 # and push result into test DB
27 from novaclient import client as novaclient
28 from glanceclient import client as glanceclient
29 from keystoneclient.v2_0 import client as keystoneclient
30 from neutronclient.v2_0 import client as neutronclient
31 from cinderclient import client as cinderclient
33 """ tests configuration """
34 tests = ['authenticate', 'glance', 'cinder', 'heat', 'keystone',
35 'neutron', 'nova', 'quotas', 'requests', 'vm', 'all']
36 parser = argparse.ArgumentParser()
37 parser.add_argument("test_name",
38 help="Module name to be tested. "
39 "Possible values are : "
40 "[ {d[0]} | {d[1]} | {d[2]} | {d[3]} | {d[4]} | "
41 "{d[5]} | {d[6]} | {d[7]} | {d[8]} | {d[9]} | "
44 "performs all possible test scenarios"
47 parser.add_argument("-d", "--debug", help="Debug mode", action="store_true")
48 parser.add_argument("-r", "--report",
49 help="Create json result file",
51 parser.add_argument("-s", "--smoke",
52 help="Smoke test mode",
54 parser.add_argument("-v", "--verbose",
55 help="Print verbose info about the progress",
57 parser.add_argument("-n", "--noclean",
58 help="Don't clean the created resources for this test.",
61 args = parser.parse_args()
66 RALLY_STDERR = subprocess.STDOUT
68 RALLY_STDERR = open(os.devnull, 'w')
70 """ logging configuration """
71 logger = logging.getLogger("run_rally")
72 logger.setLevel(logging.DEBUG)
74 ch = logging.StreamHandler()
76 ch.setLevel(logging.DEBUG)
78 ch.setLevel(logging.INFO)
80 formatter = logging.Formatter("%(asctime)s - %(name)s - "
81 "%(levelname)s - %(message)s")
82 ch.setFormatter(formatter)
85 REPO_PATH = os.environ['repos_dir']+'/functest/'
86 if not os.path.exists(REPO_PATH):
87 logger.error("Functest repository directory not found '%s'" % REPO_PATH)
89 sys.path.append(REPO_PATH + "testcases/")
92 with open("/home/opnfv/functest/conf/config_functest.yaml") as f:
93 functest_yaml = yaml.safe_load(f)
96 HOME = os.environ['HOME']+"/"
98 # SCENARIOS_DIR = REPO_PATH + functest_yaml.get("general"). \
99 # get("directories").get("dir_rally_scn")
100 SCENARIOS_DIR = REPO_PATH + "testcases/VIM/OpenStack/CI/rally_cert/"
102 TEMPLATE_DIR = SCENARIOS_DIR + "scenario/templates"
103 SUPPORT_DIR = SCENARIOS_DIR + "scenario/support"
105 FLAVOR_NAME = "m1.tiny"
108 ITERATIONS_AMOUNT = 10
112 RESULTS_DIR = functest_yaml.get("general").get("directories"). \
114 TEST_DB = functest_yaml.get("results").get("test_db_url")
115 PRIVATE_NETWORK = functest_yaml.get("general"). \
116 get("openstack").get("neutron_private_net_name")
118 GLANCE_IMAGE_NAME = functest_yaml.get("general"). \
119 get("openstack").get("image_name")
120 GLANCE_IMAGE_FILENAME = functest_yaml.get("general"). \
121 get("openstack").get("image_file_name")
122 GLANCE_IMAGE_FORMAT = functest_yaml.get("general"). \
123 get("openstack").get("image_disk_format")
124 GLANCE_IMAGE_PATH = functest_yaml.get("general"). \
125 get("directories").get("dir_functest_data") + "/" + GLANCE_IMAGE_FILENAME
127 CINDER_VOLUME_TYPE_NAME = "volume_test"
133 def push_results_to_db(case, payload):
135 url = TEST_DB + "/results"
136 installer = functest_utils.get_installer_type(logger)
137 scenario = functest_utils.get_scenario(logger)
138 pod_name = functest_utils.get_pod_name(logger)
139 # TODO pod_name hardcoded, info shall come from Jenkins
140 params = {"project_name": "functest", "case_name": case,
141 "pod_name": pod_name, "installer": installer,
142 "version": scenario, "details": payload}
144 headers = {'Content-Type': 'application/json'}
145 r = requests.post(url, data=json.dumps(params), headers=headers)
149 def get_task_id(cmd_raw):
151 get task id from command rally result
153 :return: task_id as string
155 taskid_re = re.compile('^Task +(.*): started$')
156 for line in cmd_raw.splitlines(True):
158 match = taskid_re.match(line)
160 return match.group(1)
164 def task_succeed(json_raw):
166 Parse JSON from rally JSON results
170 rally_report = json.loads(json_raw)
171 for report in rally_report:
172 if report is None or report.get('result') is None:
175 for result in report.get('result'):
176 if result is None or len(result.get('error')) > 0:
182 def build_task_args(test_file_name):
183 task_args = {'service_list': [test_file_name]}
184 task_args['smoke'] = args.smoke
185 task_args['image_name'] = GLANCE_IMAGE_NAME
186 task_args['flavor_name'] = FLAVOR_NAME
187 task_args['glance_image_location'] = GLANCE_IMAGE_PATH
188 task_args['tmpl_dir'] = TEMPLATE_DIR
189 task_args['sup_dir'] = SUPPORT_DIR
190 task_args['users_amount'] = USERS_AMOUNT
191 task_args['tenants_amount'] = TENANTS_AMOUNT
192 task_args['iterations'] = ITERATIONS_AMOUNT
193 task_args['concurrency'] = CONCURRENCY
195 ext_net = functest_utils.get_external_net(client_dict['neutron'])
197 task_args['floating_network'] = str(ext_net)
199 task_args['floating_network'] = ''
201 net_id = functest_utils.get_network_id(client_dict['neutron'],
203 task_args['netid'] = str(net_id)
208 def get_output(proc, test_name):
212 overall_duration = 0.0
216 while proc.poll() is None:
217 line = proc.stdout.readline()
221 if "Load duration" in line or \
222 "started" in line or \
223 "finished" in line or \
224 " Preparing" in line or \
228 elif "test scenario" in line:
229 result += "\n" + line
230 elif "Full duration" in line:
231 result += line + "\n\n"
233 # parse output for summary report
234 if "| " in line and \
235 "| action" not in line and \
236 "| Starting" not in line and \
237 "| Completed" not in line and \
238 "| ITER" not in line and \
239 "| " not in line and \
240 "| total" not in line:
242 elif "| total" in line:
243 percentage = ((line.split('|')[8]).strip(' ')).strip('%')
244 success += float(percentage)
246 elif "Full duration" in line:
247 overall_duration += float(line.split(': ')[1])
249 overall_duration="{:10.2f}".format(overall_duration)
253 success_avg = "{:0.2f}".format(success / nb_totals)
255 scenario_summary = {'test_name': test_name,
256 'overall_duration': overall_duration,
257 'nb_tests': nb_tests,
258 'success': success_avg}
259 SUMMARY.append(scenario_summary)
261 logger.info("\n" + result)
266 def run_task(test_name):
268 # the "main" function of the script who launch rally for a task
269 # :param test_name: name for the rally test
273 logger.info('Starting test scenario "{}" ...'.format(test_name))
275 task_file = '{}task.yaml'.format(SCENARIOS_DIR)
276 if not os.path.exists(task_file):
277 logger.error("Task file '%s' does not exist." % task_file)
280 test_file_name = '{}opnfv-{}.yaml'.format(SCENARIOS_DIR + "scenario/",
282 if not os.path.exists(test_file_name):
283 logger.error("The scenario '%s' does not exist." % test_file_name)
286 logger.debug('Scenario fetched from : {}'.format(test_file_name))
288 cmd_line = "rally task start --abort-on-sla-failure " + \
289 "--task {} ".format(task_file) + \
290 "--task-args \"{}\" ".format(build_task_args(test_name))
291 logger.debug('running command line : {}'.format(cmd_line))
293 p = subprocess.Popen(cmd_line, stdout=subprocess.PIPE,
294 stderr=RALLY_STDERR, shell=True)
295 output = get_output(p, test_name)
296 task_id = get_task_id(output)
297 logger.debug('task_id : {}'.format(task_id))
300 logger.error("Failed to retrieve task_id.")
303 # check for result directory and create it otherwise
304 if not os.path.exists(RESULTS_DIR):
305 logger.debug('%s does not exist, we create it.'.format(RESULTS_DIR))
306 os.makedirs(RESULTS_DIR)
308 # write html report file
309 report_file_name = '{}opnfv-{}.html'.format(RESULTS_DIR, test_name)
310 cmd_line = "rally task report {} --out {}".format(task_id,
313 logger.debug('running command line : {}'.format(cmd_line))
316 # get and save rally operation JSON result
317 cmd_line = "rally task results %s" % task_id
318 logger.debug('running command line : {}'.format(cmd_line))
319 cmd = os.popen(cmd_line)
320 json_results = cmd.read()
321 with open('{}opnfv-{}.json'.format(RESULTS_DIR, test_name), 'w') as f:
322 logger.debug('saving json file')
323 f.write(json_results)
325 with open('{}opnfv-{}.json'
326 .format(RESULTS_DIR, test_name)) as json_file:
327 json_data = json.load(json_file)
329 # Push results in payload of testcase
331 logger.debug("Push result into DB")
332 push_results_to_db("Rally_details", json_data)
334 """ parse JSON operation result """
335 if task_succeed(json_results):
336 logger.info('Test scenario: "{}" OK.'.format(test_name) + "\n")
338 logger.info('Test scenario: "{}" Failed.'.format(test_name) + "\n")
344 if not (args.test_name in tests):
345 logger.error('argument not valid')
349 creds_nova = functest_utils.get_credentials("nova")
350 nova_client = novaclient.Client('2', **creds_nova)
351 creds_neutron = functest_utils.get_credentials("neutron")
352 neutron_client = neutronclient.Client(**creds_neutron)
353 creds_keystone = functest_utils.get_credentials("keystone")
354 keystone_client = keystoneclient.Client(**creds_keystone)
355 glance_endpoint = keystone_client.service_catalog.url_for(service_type='image',
356 endpoint_type='publicURL')
357 glance_client = glanceclient.Client(1, glance_endpoint,
358 token=keystone_client.auth_token)
359 creds_cinder = functest_utils.get_credentials("cinder")
360 cinder_client = cinderclient.Client('2', creds_cinder['username'],
361 creds_cinder['api_key'],
362 creds_cinder['project_id'],
363 creds_cinder['auth_url'],
364 service_type="volume")
366 client_dict['neutron'] = neutron_client
368 volume_types = functest_utils.list_volume_types(cinder_client,
371 volume_type = functest_utils.create_volume_type(cinder_client,
372 CINDER_VOLUME_TYPE_NAME)
374 logger.error("Failed to create volume type...")
377 logger.debug("Volume type '%s' created succesfully..." \
378 % CINDER_VOLUME_TYPE_NAME)
380 logger.debug("Using existing volume type(s)...")
382 image_id = functest_utils.get_image_id(glance_client, GLANCE_IMAGE_NAME)
385 logger.debug("Creating image '%s' from '%s'..." % (GLANCE_IMAGE_NAME,
387 image_id = functest_utils.create_glance_image(glance_client,
391 logger.error("Failed to create the Glance image...")
394 logger.debug("Image '%s' with ID '%s' created succesfully ." \
395 % (GLANCE_IMAGE_NAME, image_id))
397 logger.debug("Using existing image '%s' with ID '%s'..." \
398 % (GLANCE_IMAGE_NAME, image_id))
400 if args.test_name == "all":
401 for test_name in tests:
402 if not (test_name == 'all' or
406 logger.debug("Test name: " + args.test_name)
407 run_task(args.test_name)
411 " Rally Summary Report\n"\
412 "+===================+============+===============+===========+\n"\
413 "| Module | Duration | nb. Test Run | Success |\n"\
414 "+===================+============+===============+===========+\n"
417 #for each scenario we draw a row for the table
422 name = "{0:<17}".format(s['test_name'])
423 duration = float(s['overall_duration'])
424 total_duration += duration
425 duration = time.strftime("%M:%S", time.gmtime(duration))
426 duration = "{0:<10}".format(duration)
427 nb_tests = "{0:<13}".format(s['nb_tests'])
428 total_nb_tests += int(s['nb_tests'])
429 success = "{0:<10}".format(str(s['success'])+'%')
430 total_success += float(s['success'])
432 "| " + name + " | " + duration + " | " + nb_tests + " | " + success + "|\n"\
433 "+-------------------+------------+---------------+-----------+\n"
434 payload.append({'module': name, 'duration': duration,
435 'nb tests': nb_tests, 'success': success})
437 total_duration_str = time.strftime("%H:%M:%S", time.gmtime(total_duration))
438 total_duration_str2 = "{0:<10}".format(total_duration_str)
439 total_nb_tests_str = "{0:<13}".format(total_nb_tests)
440 total_success = "{:0.2f}".format(total_success / len(SUMMARY))
441 total_success_str = "{0:<10}".format(str(total_success)+'%')
442 report += "+===================+============+===============+===========+\n"
443 report += "| TOTAL: | " + total_duration_str2 + " | " + \
444 total_nb_tests_str + " | " + total_success_str + "|\n"
445 report += "+===================+============+===============+===========+\n"
447 logger.info("\n"+report)
448 payload.append({'summary': {'duration': total_duration_str2,
449 'nb tests': total_nb_tests_str,
450 'nb success': total_success_str}})
452 # Generate json results for DB
453 #json_results = {"timestart": time_start, "duration": total_duration,
454 # "tests": int(total_nb_tests), "success": int(total_success)}
455 #logger.info("Results: "+str(json_results))
458 logger.debug("Pushing Rally summary into DB...")
459 push_results_to_db("Rally", payload)
464 logger.debug("Deleting image '%s' with ID '%s'..." \
465 % (GLANCE_IMAGE_NAME, image_id))
466 if not functest_utils.delete_glance_image(nova_client, image_id):
467 logger.error("Error deleting the glance image")
470 logger.debug("Deleting volume type '%s'..." \
471 % CINDER_VOLUME_TYPE_NAME)
472 if not functest_utils.delete_volume_type(cinder_client, volume_type):
473 logger.error("Error in deleting volume type...")
476 if __name__ == '__main__':