Add exception handling for output processing
[functest.git] / testcases / VIM / OpenStack / CI / libraries / run_rally-cert.py
1 #!/usr/bin/env python
2 #
3 # Copyright (c) 2015 Orange
4 # guyrodrigue.koffi@orange.com
5 # morgan.richomme@orange.com
6 # All rights reserved. This program and the accompanying materials
7 # are made available under the terms of the Apache License, Version 2.0
8 # which accompanies this distribution, and is available at
9 # http://www.apache.org/licenses/LICENSE-2.0
10 #
11 # 0.1 (05/2015) initial commit
12 # 0.2 (28/09/2015) extract Tempest, format json result, add ceilometer suite
13 # 0.3 (19/10/2015) remove Tempest from run_rally
14 # and push result into test DB
15 #
16 import argparse
17 import iniparse
18 import json
19 import logging
20 import os
21 import re
22 import requests
23 import subprocess
24 import sys
25 import time
26 import yaml
27
28 from novaclient import client as novaclient
29 from glanceclient import client as glanceclient
30 from keystoneclient.v2_0 import client as keystoneclient
31 from neutronclient.v2_0 import client as neutronclient
32 from cinderclient import client as cinderclient
33
34 """ tests configuration """
35 tests = ['authenticate', 'glance', 'cinder', 'heat', 'keystone',
36          'neutron', 'nova', 'quotas', 'requests', 'vm', 'all']
37 parser = argparse.ArgumentParser()
38 parser.add_argument("test_name",
39                     help="Module name to be tested. "
40                          "Possible values are : "
41                          "[ {d[0]} | {d[1]} | {d[2]} | {d[3]} | {d[4]} | "
42                          "{d[5]} | {d[6]} | {d[7]} | {d[8]} | {d[9]} | "
43                          "{d[10]} ] "
44                          "The 'all' value "
45                          "performs all possible test scenarios"
46                          .format(d=tests))
47
48 parser.add_argument("-d", "--debug", help="Debug mode",  action="store_true")
49 parser.add_argument("-r", "--report",
50                     help="Create json result file",
51                     action="store_true")
52 parser.add_argument("-s", "--smoke",
53                     help="Smoke test mode",
54                     action="store_true")
55 parser.add_argument("-v", "--verbose",
56                     help="Print verbose info about the progress",
57                     action="store_true")
58 parser.add_argument("-n", "--noclean",
59                     help="Don't clean the created resources for this test.",
60                     action="store_true")
61
62 args = parser.parse_args()
63
64 client_dict = {}
65
66 if args.verbose:
67     RALLY_STDERR = subprocess.STDOUT
68 else:
69     RALLY_STDERR = open(os.devnull, 'w')
70
71 """ logging configuration """
72 logger = logging.getLogger("run_rally")
73 logger.setLevel(logging.DEBUG)
74
75 ch = logging.StreamHandler()
76 if args.debug:
77     ch.setLevel(logging.DEBUG)
78 else:
79     ch.setLevel(logging.INFO)
80
81 formatter = logging.Formatter("%(asctime)s - %(name)s - "
82                               "%(levelname)s - %(message)s")
83 ch.setFormatter(formatter)
84 logger.addHandler(ch)
85
86 REPO_PATH = os.environ['repos_dir']+'/functest/'
87 if not os.path.exists(REPO_PATH):
88     logger.error("Functest repository directory not found '%s'" % REPO_PATH)
89     exit(-1)
90 sys.path.append(REPO_PATH + "testcases/")
91 import functest_utils
92
93 with open("/home/opnfv/functest/conf/config_functest.yaml") as f:
94     functest_yaml = yaml.safe_load(f)
95 f.close()
96
97 HOME = os.environ['HOME']+"/"
98 ### todo:
99 # SCENARIOS_DIR = REPO_PATH + functest_yaml.get("general"). \
100 #    get("directories").get("dir_rally_scn")
101 SCENARIOS_DIR = REPO_PATH + "testcases/VIM/OpenStack/CI/rally_cert/"
102 ###
103 TEMPLATE_DIR = SCENARIOS_DIR + "scenario/templates"
104 SUPPORT_DIR = SCENARIOS_DIR + "scenario/support"
105 ###todo:
106 FLAVOR_NAME = "m1.tiny"
107 USERS_AMOUNT = 2
108 TENANTS_AMOUNT = 3
109 ITERATIONS_AMOUNT = 10
110 CONCURRENCY = 4
111
112 ###
113 RESULTS_DIR = functest_yaml.get("general").get("directories"). \
114     get("dir_rally_res")
115 TEMPEST_CONF_FILE = functest_yaml.get("general").get("directories"). \
116     get("dir_results") + '/tempest/tempest.conf'
117 TEST_DB = functest_yaml.get("results").get("test_db_url")
118 PRIVATE_NETWORK = functest_yaml.get("general"). \
119     get("openstack").get("neutron_private_net_name")
120
121 GLANCE_IMAGE_NAME = functest_yaml.get("general"). \
122     get("openstack").get("image_name")
123 GLANCE_IMAGE_FILENAME = functest_yaml.get("general"). \
124     get("openstack").get("image_file_name")
125 GLANCE_IMAGE_FORMAT = functest_yaml.get("general"). \
126     get("openstack").get("image_disk_format")
127 GLANCE_IMAGE_PATH = functest_yaml.get("general"). \
128     get("directories").get("dir_functest_data") + "/" + GLANCE_IMAGE_FILENAME
129
130 CINDER_VOLUME_TYPE_NAME = "volume_test"
131
132
133 SUMMARY = []
134
135
136 def push_results_to_db(case, payload):
137
138     url = TEST_DB + "/results"
139     installer = functest_utils.get_installer_type(logger)
140     scenario = functest_utils.get_scenario(logger)
141     pod_name = functest_utils.get_pod_name(logger)
142     # TODO pod_name hardcoded, info shall come from Jenkins
143     params = {"project_name": "functest", "case_name": case,
144               "pod_name": pod_name, "installer": installer,
145               "version": scenario, "details": payload}
146
147     headers = {'Content-Type': 'application/json'}
148     r = requests.post(url, data=json.dumps(params), headers=headers)
149     logger.debug(r)
150
151
152 def get_task_id(cmd_raw):
153     """
154     get task id from command rally result
155     :param cmd_raw:
156     :return: task_id as string
157     """
158     taskid_re = re.compile('^Task +(.*): started$')
159     for line in cmd_raw.splitlines(True):
160         line = line.strip()
161         match = taskid_re.match(line)
162         if match:
163             return match.group(1)
164     return None
165
166
167 def task_succeed(json_raw):
168     """
169     Parse JSON from rally JSON results
170     :param json_raw:
171     :return: Bool
172     """
173     rally_report = json.loads(json_raw)
174     for report in rally_report:
175         if report is None or report.get('result') is None:
176             return False
177
178         for result in report.get('result'):
179             if result is None or len(result.get('error')) > 0:
180                 return False
181
182     return True
183
184
185 def live_migration_supported():
186     config = iniparse.ConfigParser()
187     if config.read(TEMPEST_CONF_FILE) and \
188        config.has_section('compute-feature-enabled') and \
189        config.has_option('compute-feature-enabled', 'live_migration'):
190        return config.getboolean('compute-feature-enabled', 'live_migration')
191
192     return False
193
194
195 def build_task_args(test_file_name):
196     task_args = {'service_list': [test_file_name]}
197     task_args['smoke'] = args.smoke
198     task_args['image_name'] = GLANCE_IMAGE_NAME
199     task_args['flavor_name'] = FLAVOR_NAME
200     task_args['glance_image_location'] = GLANCE_IMAGE_PATH
201     task_args['tmpl_dir'] = TEMPLATE_DIR
202     task_args['sup_dir'] = SUPPORT_DIR
203     task_args['users_amount'] = USERS_AMOUNT
204     task_args['tenants_amount'] = TENANTS_AMOUNT
205     task_args['iterations'] = ITERATIONS_AMOUNT
206     task_args['concurrency'] = CONCURRENCY
207
208     ext_net = functest_utils.get_external_net(client_dict['neutron'])
209     if ext_net:
210         task_args['floating_network'] = str(ext_net)
211     else:
212         task_args['floating_network'] = ''
213
214     net_id = functest_utils.get_network_id(client_dict['neutron'],
215                                            PRIVATE_NETWORK)
216     task_args['netid'] = str(net_id)
217     task_args['live_migration'] = live_migration_supported()
218
219     return task_args
220
221
222 def get_output(proc, test_name):
223     global SUMMARY
224     result = ""
225     nb_tests = 0
226     overall_duration = 0.0
227     success = 0.0
228     nb_totals = 0
229
230     while proc.poll() is None:
231         line = proc.stdout.readline()
232         if args.verbose:
233             result += line
234         else:
235             if "Load duration" in line or \
236                "started" in line or \
237                "finished" in line or \
238                " Preparing" in line or \
239                "+-" in line or \
240                "|" in line:
241                 result += line
242             elif "test scenario" in line:
243                 result += "\n" + line
244             elif "Full duration" in line:
245                 result += line + "\n\n"
246
247         # parse output for summary report
248         if "| " in line and \
249            "| action" not in line and \
250            "| Starting" not in line and \
251            "| Completed" not in line and \
252            "| ITER" not in line and \
253            "|   " not in line and \
254            "| total" not in line:
255             nb_tests += 1
256         elif "| total" in line:
257             percentage = ((line.split('|')[8]).strip(' ')).strip('%')
258             try:
259                 success += float(percentage)
260             except ValueError:
261                 logger.info('Percentage error: %s, %s' % (percentage, line))
262             nb_totals += 1
263         elif "Full duration" in line:
264             duration = line.split(': ')[1]
265             try:
266                 overall_duration += float(duration)
267             except ValueError:
268                 logger.info('Duration error: %s, %s' % (duration, line))
269
270     overall_duration="{:10.2f}".format(overall_duration)
271     if nb_totals == 0:
272         success_avg = 0
273     else:
274         success_avg = "{:0.2f}".format(success / nb_totals)
275
276     scenario_summary = {'test_name': test_name,
277                         'overall_duration': overall_duration,
278                         'nb_tests': nb_tests,
279                         'success': success_avg}
280     SUMMARY.append(scenario_summary)
281
282     logger.info("\n" + result)
283
284     return result
285
286
287 def run_task(test_name):
288     #
289     # the "main" function of the script who launch rally for a task
290     # :param test_name: name for the rally test
291     # :return: void
292     #
293     global SUMMARY
294     logger.info('Starting test scenario "{}" ...'.format(test_name))
295
296     task_file = '{}task.yaml'.format(SCENARIOS_DIR)
297     if not os.path.exists(task_file):
298         logger.error("Task file '%s' does not exist." % task_file)
299         exit(-1)
300
301     test_file_name = '{}opnfv-{}.yaml'.format(SCENARIOS_DIR + "scenario/",
302                                               test_name)
303     if not os.path.exists(test_file_name):
304         logger.error("The scenario '%s' does not exist." % test_file_name)
305         exit(-1)
306
307     logger.debug('Scenario fetched from : {}'.format(test_file_name))
308
309     cmd_line = "rally task start --abort-on-sla-failure " + \
310                "--task {} ".format(task_file) + \
311                "--task-args \"{}\" ".format(build_task_args(test_name))
312     logger.debug('running command line : {}'.format(cmd_line))
313
314     p = subprocess.Popen(cmd_line, stdout=subprocess.PIPE,
315                          stderr=RALLY_STDERR, shell=True)
316     output = get_output(p, test_name)
317     task_id = get_task_id(output)
318     logger.debug('task_id : {}'.format(task_id))
319
320     if task_id is None:
321         logger.error("Failed to retrieve task_id.")
322         exit(-1)
323
324     # check for result directory and create it otherwise
325     if not os.path.exists(RESULTS_DIR):
326         logger.debug('%s does not exist, we create it.'.format(RESULTS_DIR))
327         os.makedirs(RESULTS_DIR)
328
329     # write html report file
330     report_file_name = '{}opnfv-{}.html'.format(RESULTS_DIR, test_name)
331     cmd_line = "rally task report {} --out {}".format(task_id,
332                                                       report_file_name)
333
334     logger.debug('running command line : {}'.format(cmd_line))
335     os.popen(cmd_line)
336
337     # get and save rally operation JSON result
338     cmd_line = "rally task results %s" % task_id
339     logger.debug('running command line : {}'.format(cmd_line))
340     cmd = os.popen(cmd_line)
341     json_results = cmd.read()
342     with open('{}opnfv-{}.json'.format(RESULTS_DIR, test_name), 'w') as f:
343         logger.debug('saving json file')
344         f.write(json_results)
345
346     with open('{}opnfv-{}.json'
347               .format(RESULTS_DIR, test_name)) as json_file:
348         json_data = json.load(json_file)
349
350     # Push results in payload of testcase
351     if args.report:
352         logger.debug("Push result into DB")
353         push_results_to_db("Rally_details", json_data)
354
355     """ parse JSON operation result """
356     if task_succeed(json_results):
357         logger.info('Test scenario: "{}" OK.'.format(test_name) + "\n")
358     else:
359         logger.info('Test scenario: "{}" Failed.'.format(test_name) + "\n")
360
361
362 def main():
363     global SUMMARY
364     # configure script
365     if not (args.test_name in tests):
366         logger.error('argument not valid')
367         exit(-1)
368
369     SUMMARY = []
370     creds_nova = functest_utils.get_credentials("nova")
371     nova_client = novaclient.Client('2', **creds_nova)
372     creds_neutron = functest_utils.get_credentials("neutron")
373     neutron_client = neutronclient.Client(**creds_neutron)
374     creds_keystone = functest_utils.get_credentials("keystone")
375     keystone_client = keystoneclient.Client(**creds_keystone)
376     glance_endpoint = keystone_client.service_catalog.url_for(service_type='image',
377                                                               endpoint_type='publicURL')
378     glance_client = glanceclient.Client(1, glance_endpoint,
379                                         token=keystone_client.auth_token)
380     creds_cinder = functest_utils.get_credentials("cinder")
381     cinder_client = cinderclient.Client('2', creds_cinder['username'],
382                                         creds_cinder['api_key'],
383                                         creds_cinder['project_id'],
384                                         creds_cinder['auth_url'],
385                                         service_type="volume")
386
387     client_dict['neutron'] = neutron_client
388
389     volume_types = functest_utils.list_volume_types(cinder_client,
390                                                     private=False)
391     if not volume_types:
392         volume_type = functest_utils.create_volume_type(cinder_client,
393                                                         CINDER_VOLUME_TYPE_NAME)
394         if not volume_type:
395             logger.error("Failed to create volume type...")
396             exit(-1)
397         else:
398             logger.debug("Volume type '%s' created succesfully..." \
399                          % CINDER_VOLUME_TYPE_NAME)
400     else:
401         logger.debug("Using existing volume type(s)...")
402
403     image_id = functest_utils.get_image_id(glance_client, GLANCE_IMAGE_NAME)
404
405     if image_id == '':
406         logger.debug("Creating image '%s' from '%s'..." % (GLANCE_IMAGE_NAME,
407                                                            GLANCE_IMAGE_PATH))
408         image_id = functest_utils.create_glance_image(glance_client,
409                                                       GLANCE_IMAGE_NAME,
410                                                       GLANCE_IMAGE_PATH)
411         if not image_id:
412             logger.error("Failed to create the Glance image...")
413             exit(-1)
414         else:
415             logger.debug("Image '%s' with ID '%s' created succesfully ." \
416                          % (GLANCE_IMAGE_NAME, image_id))
417     else:
418         logger.debug("Using existing image '%s' with ID '%s'..." \
419                      % (GLANCE_IMAGE_NAME, image_id))
420
421     if args.test_name == "all":
422         for test_name in tests:
423             if not (test_name == 'all' or
424                     test_name == 'vm'):
425                 run_task(test_name)
426     else:
427         logger.debug("Test name: " + args.test_name)
428         run_task(args.test_name)
429
430     report = "\n"\
431              "                                                              \n"\
432              "                     Rally Summary Report\n"\
433              "+===================+============+===============+===========+\n"\
434              "| Module            | Duration   | nb. Test Run  | Success   |\n"\
435              "+===================+============+===============+===========+\n"
436     payload = []
437
438     #for each scenario we draw a row for the table
439     total_duration = 0.0
440     total_nb_tests = 0
441     total_success = 0.0
442     for s in SUMMARY:
443         name = "{0:<17}".format(s['test_name'])
444         duration = float(s['overall_duration'])
445         total_duration += duration
446         duration = time.strftime("%M:%S", time.gmtime(duration))
447         duration = "{0:<10}".format(duration)
448         nb_tests = "{0:<13}".format(s['nb_tests'])
449         total_nb_tests += int(s['nb_tests'])
450         success = "{0:<10}".format(str(s['success'])+'%')
451         total_success += float(s['success'])
452         report += ""\
453         "| " + name + " | " + duration + " | " + nb_tests + " | " + success + "|\n"\
454         "+-------------------+------------+---------------+-----------+\n"
455         payload.append({'module': name,
456                         'details': {'duration': s['overall_duration'],
457                                     'nb tests': s['nb_tests'],
458                                     'success': s['success']}})
459
460     total_duration_str = time.strftime("%H:%M:%S", time.gmtime(total_duration))
461     total_duration_str2 = "{0:<10}".format(total_duration_str)
462     total_nb_tests_str = "{0:<13}".format(total_nb_tests)
463     total_success = "{:0.2f}".format(total_success / len(SUMMARY))
464     total_success_str = "{0:<10}".format(str(total_success)+'%')
465     report += "+===================+============+===============+===========+\n"
466     report += "| TOTAL:            | " + total_duration_str2 + " | " + \
467             total_nb_tests_str  + " | " + total_success_str + "|\n"
468     report += "+===================+============+===============+===========+\n"
469
470     logger.info("\n"+report)
471     payload.append({'summary': {'duration': total_duration,
472                                'nb tests': total_nb_tests,
473                                'nb success': total_success}})
474
475     # Generate json results for DB
476     #json_results = {"timestart": time_start, "duration": total_duration,
477     #                "tests": int(total_nb_tests), "success": int(total_success)}
478     #logger.info("Results: "+str(json_results))
479
480     if args.report:
481         logger.debug("Pushing Rally summary into DB...")
482         push_results_to_db("Rally", payload)
483
484     if args.noclean:
485         exit(0)
486
487     logger.debug("Deleting image '%s' with ID '%s'..." \
488                          % (GLANCE_IMAGE_NAME, image_id))
489     if not functest_utils.delete_glance_image(nova_client, image_id):
490         logger.error("Error deleting the glance image")
491
492     if not volume_types:
493         logger.debug("Deleting volume type '%s'..." \
494                              % CINDER_VOLUME_TYPE_NAME)
495         if not functest_utils.delete_volume_type(cinder_client, volume_type):
496             logger.error("Error in deleting volume type...")
497
498
499 if __name__ == '__main__':
500     main()