Create Rally summary report also for verbose mode
[functest-xtesting.git] / testcases / VIM / OpenStack / CI / libraries / run_rally-cert.py
1 #!/usr/bin/env python
2 #
3 # Copyright (c) 2015 Orange
4 # guyrodrigue.koffi@orange.com
5 # morgan.richomme@orange.com
6 # All rights reserved. This program and the accompanying materials
7 # are made available under the terms of the Apache License, Version 2.0
8 # which accompanies this distribution, and is available at
9 # http://www.apache.org/licenses/LICENSE-2.0
10 #
11 # 0.1 (05/2015) initial commit
12 # 0.2 (28/09/2015) extract Tempest, format json result, add ceilometer suite
13 # 0.3 (19/10/2015) remove Tempest from run_rally
14 # and push result into test DB
15 #
16 import argparse
17 import json
18 import logging
19 import os
20 import re
21 import requests
22 import subprocess
23 import sys
24 import time
25 import yaml
26
27 from novaclient import client as novaclient
28 from glanceclient import client as glanceclient
29 from keystoneclient.v2_0 import client as keystoneclient
30 from neutronclient.v2_0 import client as neutronclient
31 from cinderclient import client as cinderclient
32
33 """ tests configuration """
34 tests = ['authenticate', 'glance', 'cinder', 'heat', 'keystone',
35          'neutron', 'nova', 'quotas', 'requests', 'vm', 'all']
36 parser = argparse.ArgumentParser()
37 parser.add_argument("test_name",
38                     help="Module name to be tested. "
39                          "Possible values are : "
40                          "[ {d[0]} | {d[1]} | {d[2]} | {d[3]} | {d[4]} | "
41                          "{d[5]} | {d[6]} | {d[7]} | {d[8]} | {d[9]} | "
42                          "{d[10]} ] "
43                          "The 'all' value "
44                          "performs all possible test scenarios"
45                          .format(d=tests))
46
47 parser.add_argument("-d", "--debug", help="Debug mode",  action="store_true")
48 parser.add_argument("-r", "--report",
49                     help="Create json result file",
50                     action="store_true")
51 parser.add_argument("-s", "--smoke",
52                     help="Smoke test mode",
53                     action="store_true")
54 parser.add_argument("-v", "--verbose",
55                     help="Print verbose info about the progress",
56                     action="store_true")
57
58 args = parser.parse_args()
59
60 client_dict = {}
61
62 if args.verbose:
63     RALLY_STDERR = subprocess.STDOUT
64 else:
65     RALLY_STDERR = open(os.devnull, 'w')
66
67 """ logging configuration """
68 logger = logging.getLogger("run_rally")
69 logger.setLevel(logging.DEBUG)
70
71 ch = logging.StreamHandler()
72 if args.debug:
73     ch.setLevel(logging.DEBUG)
74 else:
75     ch.setLevel(logging.INFO)
76
77 formatter = logging.Formatter("%(asctime)s - %(name)s - "
78                               "%(levelname)s - %(message)s")
79 ch.setFormatter(formatter)
80 logger.addHandler(ch)
81
82 REPO_PATH = os.environ['repos_dir']+'/functest/'
83 if not os.path.exists(REPO_PATH):
84     logger.error("Functest repository directory not found '%s'" % REPO_PATH)
85     exit(-1)
86 sys.path.append(REPO_PATH + "testcases/")
87 import functest_utils
88
89 with open("/home/opnfv/functest/conf/config_functest.yaml") as f:
90     functest_yaml = yaml.safe_load(f)
91 f.close()
92
93 HOME = os.environ['HOME']+"/"
94 ### todo:
95 # SCENARIOS_DIR = REPO_PATH + functest_yaml.get("general"). \
96 #    get("directories").get("dir_rally_scn")
97 SCENARIOS_DIR = REPO_PATH + "testcases/VIM/OpenStack/CI/rally_cert/"
98 ###
99 TEMPLATE_DIR = SCENARIOS_DIR + "scenario/templates"
100 SUPPORT_DIR = SCENARIOS_DIR + "scenario/support"
101 ###todo:
102 FLAVOR_NAME = "m1.tiny"
103 USERS_AMOUNT = 2
104 TENANTS_AMOUNT = 3
105 ITERATIONS_AMOUNT = 10
106 CONCURRENCY = 4
107
108 ###
109 RESULTS_DIR = functest_yaml.get("general").get("directories"). \
110     get("dir_rally_res")
111 TEST_DB = functest_yaml.get("results").get("test_db_url")
112 PRIVATE_NETWORK = functest_yaml.get("general"). \
113     get("openstack").get("neutron_private_net_name")
114
115 GLANCE_IMAGE_NAME = functest_yaml.get("general"). \
116     get("openstack").get("image_name")
117 GLANCE_IMAGE_FILENAME = functest_yaml.get("general"). \
118     get("openstack").get("image_file_name")
119 GLANCE_IMAGE_FORMAT = functest_yaml.get("general"). \
120     get("openstack").get("image_disk_format")
121 GLANCE_IMAGE_PATH = functest_yaml.get("general"). \
122     get("directories").get("dir_functest_data") + "/" + GLANCE_IMAGE_FILENAME
123
124 CINDER_VOLUME_TYPE_NAME = "volume_test"
125
126
127 SUMMARY = []
128
129
130 def push_results_to_db(case, payload):
131
132     url = TEST_DB + "/results"
133     installer = functest_utils.get_installer_type(logger)
134     scenario = functest_utils.get_scenario(logger)
135     pod_name = functest_utils.get_pod_name(logger)
136     # TODO pod_name hardcoded, info shall come from Jenkins
137     params = {"project_name": "functest", "case_name": case,
138               "pod_name": pod_name, "installer": installer,
139               "version": scenario, "details": payload}
140
141     headers = {'Content-Type': 'application/json'}
142     r = requests.post(url, data=json.dumps(params), headers=headers)
143     logger.debug(r)
144
145
146 def get_task_id(cmd_raw):
147     """
148     get task id from command rally result
149     :param cmd_raw:
150     :return: task_id as string
151     """
152     taskid_re = re.compile('^Task +(.*): started$')
153     for line in cmd_raw.splitlines(True):
154         line = line.strip()
155         match = taskid_re.match(line)
156         if match:
157             return match.group(1)
158     return None
159
160
161 def task_succeed(json_raw):
162     """
163     Parse JSON from rally JSON results
164     :param json_raw:
165     :return: Bool
166     """
167     rally_report = json.loads(json_raw)
168     for report in rally_report:
169         if report is None or report.get('result') is None:
170             return False
171
172         for result in report.get('result'):
173             if result is None or len(result.get('error')) > 0:
174                 return False
175
176     return True
177
178
179 def build_task_args(test_file_name):
180     task_args = {'service_list': [test_file_name]}
181     task_args['smoke'] = args.smoke
182     task_args['image_name'] = GLANCE_IMAGE_NAME
183     task_args['flavor_name'] = FLAVOR_NAME
184     task_args['glance_image_location'] = GLANCE_IMAGE_PATH
185     task_args['tmpl_dir'] = TEMPLATE_DIR
186     task_args['sup_dir'] = SUPPORT_DIR
187     task_args['users_amount'] = USERS_AMOUNT
188     task_args['tenants_amount'] = TENANTS_AMOUNT
189     task_args['iterations'] = ITERATIONS_AMOUNT
190     task_args['concurrency'] = CONCURRENCY
191
192     ext_net = functest_utils.get_external_net(client_dict['neutron'])
193     if ext_net:
194         task_args['floating_network'] = str(ext_net)
195     else:
196         task_args['floating_network'] = ''
197
198     net_id = functest_utils.get_network_id(client_dict['neutron'],
199                                            PRIVATE_NETWORK)
200     task_args['netid'] = str(net_id)
201
202     return task_args
203
204
205 def get_output(proc, test_name):
206     global SUMMARY
207     result = ""
208     nb_tests = 0
209     overall_duration = 0.0
210     success = 0.0
211     nb_totals = 0
212
213     while proc.poll() is None:
214         line = proc.stdout.readline()
215         if args.verbose:
216             result += line
217         else:
218             if "Load duration" in line or \
219                "started" in line or \
220                "finished" in line or \
221                " Preparing" in line or \
222                "+-" in line or \
223                "|" in line:
224                 result += line
225             elif "test scenario" in line:
226                 result += "\n" + line
227             elif "Full duration" in line:
228                 result += line + "\n\n"
229
230         # parse output for summary report
231         if "| " in line and \
232            "| action" not in line and \
233            "| Starting" not in line and \
234            "| Completed" not in line and \
235            "| ITER" not in line and \
236            "|   " not in line and \
237            "| total" not in line:
238             nb_tests += 1
239         elif "| total" in line:
240             percentage = ((line.split('|')[8]).strip(' ')).strip('%')
241             success += float(percentage)
242             nb_totals += 1
243         elif "Full duration" in line:
244             overall_duration += float(line.split(': ')[1])
245
246     overall_duration="{:10.2f}".format(overall_duration)
247     if nb_totals == 0:
248         success_avg = 0
249     else:
250         success_avg = "{:0.2f}".format(success / nb_totals)
251
252     scenario_summary = {'test_name': test_name,
253                         'overall_duration': overall_duration,
254                         'nb_tests': nb_tests,
255                         'success': success_avg}
256     SUMMARY.append(scenario_summary)
257
258     logger.info("\n" + result)
259
260     return result
261
262
263 def run_task(test_name):
264     #
265     # the "main" function of the script who launch rally for a task
266     # :param test_name: name for the rally test
267     # :return: void
268     #
269     global SUMMARY
270     logger.info('Starting test scenario "{}" ...'.format(test_name))
271
272     task_file = '{}task.yaml'.format(SCENARIOS_DIR)
273     if not os.path.exists(task_file):
274         logger.error("Task file '%s' does not exist." % task_file)
275         exit(-1)
276
277     test_file_name = '{}opnfv-{}.yaml'.format(SCENARIOS_DIR + "scenario/",
278                                               test_name)
279     if not os.path.exists(test_file_name):
280         logger.error("The scenario '%s' does not exist." % test_file_name)
281         exit(-1)
282
283     logger.debug('Scenario fetched from : {}'.format(test_file_name))
284
285     cmd_line = "rally task start --abort-on-sla-failure " + \
286                "--task {} ".format(task_file) + \
287                "--task-args \"{}\" ".format(build_task_args(test_name))
288     logger.debug('running command line : {}'.format(cmd_line))
289
290     p = subprocess.Popen(cmd_line, stdout=subprocess.PIPE,
291                          stderr=RALLY_STDERR, shell=True)
292     output = get_output(p, test_name)
293     task_id = get_task_id(output)
294     logger.debug('task_id : {}'.format(task_id))
295
296     if task_id is None:
297         logger.error("Failed to retrieve task_id.")
298         exit(-1)
299
300     # check for result directory and create it otherwise
301     if not os.path.exists(RESULTS_DIR):
302         logger.debug('%s does not exist, we create it.'.format(RESULTS_DIR))
303         os.makedirs(RESULTS_DIR)
304
305     # write html report file
306     report_file_name = '{}opnfv-{}.html'.format(RESULTS_DIR, test_name)
307     cmd_line = "rally task report {} --out {}".format(task_id,
308                                                       report_file_name)
309
310     logger.debug('running command line : {}'.format(cmd_line))
311     os.popen(cmd_line)
312
313     # get and save rally operation JSON result
314     cmd_line = "rally task results %s" % task_id
315     logger.debug('running command line : {}'.format(cmd_line))
316     cmd = os.popen(cmd_line)
317     json_results = cmd.read()
318     with open('{}opnfv-{}.json'.format(RESULTS_DIR, test_name), 'w') as f:
319         logger.debug('saving json file')
320         f.write(json_results)
321
322     with open('{}opnfv-{}.json'
323               .format(RESULTS_DIR, test_name)) as json_file:
324         json_data = json.load(json_file)
325
326     # Push results in payload of testcase
327     if args.report:
328         logger.debug("Push result into DB")
329         push_results_to_db("Rally_details", json_data)
330
331     """ parse JSON operation result """
332     if task_succeed(json_results):
333         logger.info('Test scenario: "{}" OK.'.format(test_name) + "\n")
334     else:
335         logger.info('Test scenario: "{}" Failed.'.format(test_name) + "\n")
336
337
338 def main():
339     global SUMMARY
340     # configure script
341     if not (args.test_name in tests):
342         logger.error('argument not valid')
343         exit(-1)
344
345     SUMMARY = []
346     creds_nova = functest_utils.get_credentials("nova")
347     nova_client = novaclient.Client('2', **creds_nova)
348     creds_neutron = functest_utils.get_credentials("neutron")
349     neutron_client = neutronclient.Client(**creds_neutron)
350     creds_keystone = functest_utils.get_credentials("keystone")
351     keystone_client = keystoneclient.Client(**creds_keystone)
352     glance_endpoint = keystone_client.service_catalog.url_for(service_type='image',
353                                                               endpoint_type='publicURL')
354     glance_client = glanceclient.Client(1, glance_endpoint,
355                                         token=keystone_client.auth_token)
356     creds_cinder = functest_utils.get_credentials("cinder")
357     cinder_client = cinderclient.Client('2', creds_cinder['username'],
358                                         creds_cinder['api_key'],
359                                         creds_cinder['project_id'],
360                                         creds_cinder['auth_url'],
361                                         service_type="volume")
362
363     client_dict['neutron'] = neutron_client
364
365     volume_types = functest_utils.list_volume_types(cinder_client,
366                                                     private=False)
367     if not volume_types:
368         volume_type = functest_utils.create_volume_type(cinder_client,
369                                                         CINDER_VOLUME_TYPE_NAME)
370         if not volume_type:
371             logger.error("Failed to create volume type...")
372             exit(-1)
373         else:
374             logger.debug("Volume type '%s' created succesfully..." \
375                          % CINDER_VOLUME_TYPE_NAME)
376     else:
377         logger.debug("Using existing volume type(s)...")
378
379     image_id = functest_utils.get_image_id(glance_client, GLANCE_IMAGE_NAME)
380
381     if image_id == '':
382         logger.debug("Creating image '%s' from '%s'..." % (GLANCE_IMAGE_NAME,
383                                                            GLANCE_IMAGE_PATH))
384         image_id = functest_utils.create_glance_image(glance_client,
385                                                       GLANCE_IMAGE_NAME,
386                                                       GLANCE_IMAGE_PATH)
387         if not image_id:
388             logger.error("Failed to create the Glance image...")
389             exit(-1)
390         else:
391             logger.debug("Image '%s' with ID '%s' created succesfully ." \
392                          % (GLANCE_IMAGE_NAME, image_id))
393     else:
394         logger.debug("Using existing image '%s' with ID '%s'..." \
395                      % (GLANCE_IMAGE_NAME, image_id))
396
397     if args.test_name == "all":
398         for test_name in tests:
399             if not (test_name == 'all' or
400                     test_name == 'vm'):
401                 run_task(test_name)
402     else:
403         logger.debug("Test name: " + args.test_name)
404         run_task(args.test_name)
405
406     report = "\n"\
407              "                                                              \n"\
408              "                     Rally Summary Report\n"\
409              "+===================+============+===============+===========+\n"\
410              "| Module            | Duration   | nb. Test Run  | Success   |\n"\
411              "+===================+============+===============+===========+\n"
412     payload = []
413
414     #for each scenario we draw a row for the table
415     total_duration = 0.0
416     total_nb_tests = 0
417     total_success = 0.0
418     for s in SUMMARY:
419         name = "{0:<17}".format(s['test_name'])
420         duration = float(s['overall_duration'])
421         total_duration += duration
422         duration = time.strftime("%M:%S", time.gmtime(duration))
423         duration = "{0:<10}".format(duration)
424         nb_tests = "{0:<13}".format(s['nb_tests'])
425         total_nb_tests += int(s['nb_tests'])
426         success = "{0:<10}".format(str(s['success'])+'%')
427         total_success += float(s['success'])
428         report += ""\
429         "| " + name + " | " + duration + " | " + nb_tests + " | " + success + "|\n"\
430         "+-------------------+------------+---------------+-----------+\n"
431         payload.append({'module': name, 'duration': duration,
432                          'nb tests': nb_tests, 'success': success})
433
434     total_duration_str = time.strftime("%H:%M:%S", time.gmtime(total_duration))
435     total_duration_str2 = "{0:<10}".format(total_duration_str)
436     total_nb_tests_str = "{0:<13}".format(total_nb_tests)
437     total_success = "{:0.2f}".format(total_success / len(SUMMARY))
438     total_success_str = "{0:<10}".format(str(total_success)+'%')
439     report += "+===================+============+===============+===========+\n"
440     report += "| TOTAL:            | " + total_duration_str2 + " | " + \
441             total_nb_tests_str  + " | " + total_success_str + "|\n"
442     report += "+===================+============+===============+===========+\n"
443
444     logger.info("\n"+report)
445     payload.append({'summary': {'duration': total_duration_str2,
446                                'nb tests': total_nb_tests_str,
447                                'nb success': total_success_str}})
448
449     # Generate json results for DB
450     #json_results = {"timestart": time_start, "duration": total_duration,
451     #                "tests": int(total_nb_tests), "success": int(total_success)}
452     #logger.info("Results: "+str(json_results))
453
454     if args.report:
455         logger.debug("Pushing Rally summary into DB...")
456         push_results_to_db("Rally", payload)
457
458     logger.debug("Deleting image '%s' with ID '%s'..." \
459                          % (GLANCE_IMAGE_NAME, image_id))
460     if not functest_utils.delete_glance_image(nova_client, image_id):
461         logger.error("Error deleting the glance image")
462
463     if not volume_types:
464         logger.debug("Deleting volume type '%s'..." \
465                              % CINDER_VOLUME_TYPE_NAME)
466         if not functest_utils.delete_volume_type(cinder_client, volume_type):
467             logger.error("Error in deleting volume type...")
468
469
470 if __name__ == '__main__':
471     main()