Push results without extra formating
[functest.git] / testcases / VIM / OpenStack / CI / libraries / run_rally-cert.py
1 #!/usr/bin/env python
2 #
3 # Copyright (c) 2015 Orange
4 # guyrodrigue.koffi@orange.com
5 # morgan.richomme@orange.com
6 # All rights reserved. This program and the accompanying materials
7 # are made available under the terms of the Apache License, Version 2.0
8 # which accompanies this distribution, and is available at
9 # http://www.apache.org/licenses/LICENSE-2.0
10 #
11 # 0.1 (05/2015) initial commit
12 # 0.2 (28/09/2015) extract Tempest, format json result, add ceilometer suite
13 # 0.3 (19/10/2015) remove Tempest from run_rally
14 # and push result into test DB
15 #
16 import argparse
17 import json
18 import logging
19 import os
20 import re
21 import requests
22 import subprocess
23 import sys
24 import time
25 import yaml
26
27 from novaclient import client as novaclient
28 from glanceclient import client as glanceclient
29 from keystoneclient.v2_0 import client as keystoneclient
30 from neutronclient.v2_0 import client as neutronclient
31 from cinderclient import client as cinderclient
32
33 """ tests configuration """
34 tests = ['authenticate', 'glance', 'cinder', 'heat', 'keystone',
35          'neutron', 'nova', 'quotas', 'requests', 'vm', 'all']
36 parser = argparse.ArgumentParser()
37 parser.add_argument("test_name",
38                     help="Module name to be tested. "
39                          "Possible values are : "
40                          "[ {d[0]} | {d[1]} | {d[2]} | {d[3]} | {d[4]} | "
41                          "{d[5]} | {d[6]} | {d[7]} | {d[8]} | {d[9]} | "
42                          "{d[10]} ] "
43                          "The 'all' value "
44                          "performs all possible test scenarios"
45                          .format(d=tests))
46
47 parser.add_argument("-d", "--debug", help="Debug mode",  action="store_true")
48 parser.add_argument("-r", "--report",
49                     help="Create json result file",
50                     action="store_true")
51 parser.add_argument("-s", "--smoke",
52                     help="Smoke test mode",
53                     action="store_true")
54 parser.add_argument("-v", "--verbose",
55                     help="Print verbose info about the progress",
56                     action="store_true")
57
58 args = parser.parse_args()
59
60 client_dict = {}
61
62 if args.verbose:
63     RALLY_STDERR = subprocess.STDOUT
64 else:
65     RALLY_STDERR = open(os.devnull, 'w')
66
67 """ logging configuration """
68 logger = logging.getLogger("run_rally")
69 logger.setLevel(logging.DEBUG)
70
71 ch = logging.StreamHandler()
72 if args.debug:
73     ch.setLevel(logging.DEBUG)
74 else:
75     ch.setLevel(logging.INFO)
76
77 formatter = logging.Formatter("%(asctime)s - %(name)s - "
78                               "%(levelname)s - %(message)s")
79 ch.setFormatter(formatter)
80 logger.addHandler(ch)
81
82 REPO_PATH = os.environ['repos_dir']+'/functest/'
83 if not os.path.exists(REPO_PATH):
84     logger.error("Functest repository directory not found '%s'" % REPO_PATH)
85     exit(-1)
86 sys.path.append(REPO_PATH + "testcases/")
87 import functest_utils
88
89 with open("/home/opnfv/functest/conf/config_functest.yaml") as f:
90     functest_yaml = yaml.safe_load(f)
91 f.close()
92
93 HOME = os.environ['HOME']+"/"
94 ### todo:
95 # SCENARIOS_DIR = REPO_PATH + functest_yaml.get("general"). \
96 #    get("directories").get("dir_rally_scn")
97 SCENARIOS_DIR = REPO_PATH + "testcases/VIM/OpenStack/CI/rally_cert/"
98 ###
99 TEMPLATE_DIR = SCENARIOS_DIR + "scenario/templates"
100 SUPPORT_DIR = SCENARIOS_DIR + "scenario/support"
101 ###todo:
102 FLAVOR_NAME = "m1.tiny"
103 USERS_AMOUNT = 2
104 TENANTS_AMOUNT = 3
105 ITERATIONS_AMOUNT = 10
106 CONCURRENCY = 4
107
108 ###
109 RESULTS_DIR = functest_yaml.get("general").get("directories"). \
110     get("dir_rally_res")
111 TEST_DB = functest_yaml.get("results").get("test_db_url")
112 PRIVATE_NETWORK = functest_yaml.get("general"). \
113     get("openstack").get("neutron_private_net_name")
114
115 GLANCE_IMAGE_NAME = functest_yaml.get("general"). \
116     get("openstack").get("image_name")
117 GLANCE_IMAGE_FILENAME = functest_yaml.get("general"). \
118     get("openstack").get("image_file_name")
119 GLANCE_IMAGE_FORMAT = functest_yaml.get("general"). \
120     get("openstack").get("image_disk_format")
121 GLANCE_IMAGE_PATH = functest_yaml.get("general"). \
122     get("directories").get("dir_functest_data") + "/" + GLANCE_IMAGE_FILENAME
123
124 CINDER_VOLUME_TYPE_NAME = "volume_test"
125
126
127 SUMMARY = []
128
129
130 def push_results_to_db(case, payload):
131
132     url = TEST_DB + "/results"
133     installer = functest_utils.get_installer_type(logger)
134     scenario = functest_utils.get_scenario(logger)
135     pod_name = functest_utils.get_pod_name(logger)
136     # TODO pod_name hardcoded, info shall come from Jenkins
137     params = {"project_name": "functest", "case_name": case,
138               "pod_name": pod_name, "installer": installer,
139               "version": scenario, "details": payload}
140
141     headers = {'Content-Type': 'application/json'}
142     r = requests.post(url, data=json.dumps(params), headers=headers)
143     logger.debug(r)
144
145
146 def get_task_id(cmd_raw):
147     """
148     get task id from command rally result
149     :param cmd_raw:
150     :return: task_id as string
151     """
152     taskid_re = re.compile('^Task +(.*): started$')
153     for line in cmd_raw.splitlines(True):
154         line = line.strip()
155         match = taskid_re.match(line)
156         if match:
157             return match.group(1)
158     return None
159
160
161 def task_succeed(json_raw):
162     """
163     Parse JSON from rally JSON results
164     :param json_raw:
165     :return: Bool
166     """
167     rally_report = json.loads(json_raw)
168     for report in rally_report:
169         if report is None or report.get('result') is None:
170             return False
171
172         for result in report.get('result'):
173             if result is None or len(result.get('error')) > 0:
174                 return False
175
176     return True
177
178
179 def build_task_args(test_file_name):
180     task_args = {'service_list': [test_file_name]}
181     task_args['smoke'] = args.smoke
182     task_args['image_name'] = GLANCE_IMAGE_NAME
183     task_args['flavor_name'] = FLAVOR_NAME
184     task_args['glance_image_location'] = GLANCE_IMAGE_PATH
185     task_args['tmpl_dir'] = TEMPLATE_DIR
186     task_args['sup_dir'] = SUPPORT_DIR
187     task_args['users_amount'] = USERS_AMOUNT
188     task_args['tenants_amount'] = TENANTS_AMOUNT
189     task_args['iterations'] = ITERATIONS_AMOUNT
190     task_args['concurrency'] = CONCURRENCY
191
192     ext_net = functest_utils.get_external_net(client_dict['neutron'])
193     if ext_net:
194         task_args['floating_network'] = str(ext_net)
195     else:
196         task_args['floating_network'] = ''
197
198     net_id = functest_utils.get_network_id(client_dict['neutron'],
199                                            PRIVATE_NETWORK)
200     task_args['netid'] = str(net_id)
201
202     return task_args
203
204
205 def get_output(proc, test_name):
206     global SUMMARY
207     result = ""
208     nb_tests = 0
209     overall_duration = 0.0
210     success = 0.0
211
212     if args.verbose:
213         while proc.poll() is None:
214             line = proc.stdout.readline()
215             print line.replace('\n', '')
216             result += line
217     else:
218         while proc.poll() is None:
219             line = proc.stdout.readline()
220             if "Load duration" in line or \
221                "started" in line or \
222                "finished" in line or \
223                " Preparing" in line or \
224                "+-" in line or \
225                "|" in line:
226                 result += line
227                 if "| " in line and \
228                    "| action" not in line and \
229                    "|   " not in line and \
230                    "| total" not in line:
231                     nb_tests += 1
232                     percentage = ((line.split('|')[8]).strip(' ')).strip('%')
233                     success += float(percentage)
234
235             elif "test scenario" in line:
236                 result += "\n" + line
237             elif "Full duration" in line:
238                 result += line + "\n\n"
239                 overall_duration += float(line.split(': ')[1])
240         logger.info("\n" + result)
241     overall_duration = "{:10.2f}".format(overall_duration)
242     success_avg = success / nb_tests
243     scenario_summary = {'test_name': test_name,
244                         'overall_duration': overall_duration,
245                         'nb_tests': nb_tests,
246                         'success': success_avg}
247
248     SUMMARY.append(scenario_summary)
249     return result
250
251
252 def run_task(test_name):
253     #
254     # the "main" function of the script who launch rally for a task
255     # :param test_name: name for the rally test
256     # :return: void
257     #
258     logger.info('Starting test scenario "{}" ...'.format(test_name))
259
260     task_file = '{}task.yaml'.format(SCENARIOS_DIR)
261     if not os.path.exists(task_file):
262         logger.error("Task file '%s' does not exist." % task_file)
263         exit(-1)
264
265     test_file_name = '{}opnfv-{}.yaml'.format(SCENARIOS_DIR + "scenario/",
266                                               test_name)
267     if not os.path.exists(test_file_name):
268         logger.error("The scenario '%s' does not exist." % test_file_name)
269         exit(-1)
270
271     logger.debug('Scenario fetched from : {}'.format(test_file_name))
272
273     cmd_line = "rally task start --abort-on-sla-failure " + \
274                "--task {} ".format(task_file) + \
275                "--task-args \"{}\" ".format(build_task_args(test_name))
276     logger.debug('running command line : {}'.format(cmd_line))
277
278     p = subprocess.Popen(cmd_line, stdout=subprocess.PIPE,
279                          stderr=RALLY_STDERR, shell=True)
280     output = get_output(p, test_name)
281     task_id = get_task_id(output)
282     logger.debug('task_id : {}'.format(task_id))
283
284     if task_id is None:
285         logger.error("failed to retrieve task_id")
286         exit(-1)
287
288     # check for result directory and create it otherwise
289     if not os.path.exists(RESULTS_DIR):
290         logger.debug('does not exists, we create it'.format(RESULTS_DIR))
291         os.makedirs(RESULTS_DIR)
292
293     # write html report file
294     report_file_name = '{}opnfv-{}.html'.format(RESULTS_DIR, test_name)
295     cmd_line = "rally task report {} --out {}".format(task_id,
296                                                       report_file_name)
297
298     logger.debug('running command line : {}'.format(cmd_line))
299     os.popen(cmd_line)
300
301     # get and save rally operation JSON result
302     cmd_line = "rally task results %s" % task_id
303     logger.debug('running command line : {}'.format(cmd_line))
304     cmd = os.popen(cmd_line)
305     json_results = cmd.read()
306     with open('{}opnfv-{}.json'.format(RESULTS_DIR, test_name), 'w') as f:
307         logger.debug('saving json file')
308         f.write(json_results)
309
310     with open('{}opnfv-{}.json'
311               .format(RESULTS_DIR, test_name)) as json_file:
312         json_data = json.load(json_file)
313
314     # Push results in payload of testcase
315     if args.report:
316         logger.debug("Push result into DB")
317         push_results_to_db("Rally_details", json_data)
318
319     """ parse JSON operation result """
320     if task_succeed(json_results):
321         logger.info('Test scenario: "{}" OK.'.format(test_name) + "\n")
322     else:
323         logger.info('Test scenario: "{}" Failed.'.format(test_name) + "\n")
324
325
326 def main():
327     global SUMMARY
328     # configure script
329     if not (args.test_name in tests):
330         logger.error('argument not valid')
331         exit(-1)
332
333     SUMMARY = []
334     creds_nova = functest_utils.get_credentials("nova")
335     nova_client = novaclient.Client('2', **creds_nova)
336     creds_neutron = functest_utils.get_credentials("neutron")
337     neutron_client = neutronclient.Client(**creds_neutron)
338     creds_keystone = functest_utils.get_credentials("keystone")
339     keystone_client = keystoneclient.Client(**creds_keystone)
340     glance_endpoint = keystone_client.service_catalog.url_for(service_type='image',
341                                                               endpoint_type='publicURL')
342     glance_client = glanceclient.Client(1, glance_endpoint,
343                                         token=keystone_client.auth_token)
344     creds_cinder = functest_utils.get_credentials("cinder")
345     cinder_client = cinderclient.Client('2', creds_cinder['username'],
346                                         creds_cinder['api_key'],
347                                         creds_cinder['project_id'],
348                                         creds_cinder['auth_url'],
349                                         service_type="volume")
350
351     client_dict['neutron'] = neutron_client
352
353     volume_types = functest_utils.list_volume_types(cinder_client,
354                                                     private=False)
355     if not volume_types:
356         volume_type = functest_utils.create_volume_type(cinder_client,
357                                                         CINDER_VOLUME_TYPE_NAME)
358         if not volume_type:
359             logger.error("Failed to create volume type...")
360             exit(-1)
361         else:
362             logger.debug("Volume type '%s' created succesfully..." \
363                          % CINDER_VOLUME_TYPE_NAME)
364     else:
365         logger.debug("Using existing volume type(s)...")
366
367     image_id = functest_utils.get_image_id(glance_client, GLANCE_IMAGE_NAME)
368
369     if image_id == '':
370         logger.debug("Creating image '%s' from '%s'..." % (GLANCE_IMAGE_NAME,
371                                                            GLANCE_IMAGE_PATH))
372         image_id = functest_utils.create_glance_image(glance_client,
373                                                       GLANCE_IMAGE_NAME,
374                                                       GLANCE_IMAGE_PATH)
375         if not image_id:
376             logger.error("Failed to create the Glance image...")
377             exit(-1)
378         else:
379             logger.debug("Image '%s' with ID '%s' created succesfully ." \
380                          % (GLANCE_IMAGE_NAME, image_id))
381     else:
382         logger.debug("Using existing image '%s' with ID '%s'..." \
383                      % (GLANCE_IMAGE_NAME, image_id))
384
385     if args.test_name == "all":
386         for test_name in tests:
387             if not (test_name == 'all' or
388                     test_name == 'vm'):
389                 run_task(test_name)
390     else:
391         print(args.test_name)
392         run_task(args.test_name)
393
394     report = "\n"\
395              "                                                              \n"\
396              "                     Rally Summary Report\n"\
397              "+===================+============+===============+===========+\n"\
398              "| Module            | Duration   | nb. Test Run  | Success   |\n"\
399              "+===================+============+===============+===========+\n"
400     payload = []
401
402     #for each scenario we draw a row for the table
403     total_duration = 0.0
404     total_nb_tests = 0
405     total_success = 0.0
406     for s in SUMMARY:
407         name = "{0:<17}".format(s['test_name'])
408         duration = float(s['overall_duration'])
409         total_duration += duration
410         duration = time.strftime("%M:%S", time.gmtime(duration))
411         duration = "{0:<10}".format(duration)
412         nb_tests = "{0:<13}".format(s['nb_tests'])
413         total_nb_tests += int(s['nb_tests'])
414         success = "{0:<10}".format(str(s['success'])+'%')
415         total_success += float(s['success'])
416         report += ""\
417         "| " + name + " | " + duration + " | " + nb_tests + " | " + success + "|\n"\
418         "+-------------------+------------+---------------+-----------+\n"
419         payload.append({'module': name,
420                         'details': {'duration': s['overall_duration'],
421                                     'nb tests': s['nb_tests'],
422                                     'success': s['success']}})
423
424     total_duration_str = time.strftime("%H:%M:%S", time.gmtime(total_duration))
425     total_duration_str2 = "{0:<10}".format(total_duration_str)
426     total_nb_tests_str = "{0:<13}".format(total_nb_tests)
427     total_success = total_success / len(SUMMARY)
428     total_success_str = "{0:<10}".format(str(total_success)+'%')
429     report += "+===================+============+===============+===========+\n"
430     report += "| TOTAL:            | " + total_duration_str2 + " | " + \
431             total_nb_tests_str  + " | " + total_success_str + "|\n"
432     report += "+===================+============+===============+===========+\n"
433
434     logger.info("\n"+report)
435     payload.append({'summary': {'duration': total_duration,
436                                'nb tests': total_nb_tests,
437                                'nb success': total_success}})
438
439     # Generate json results for DB
440     #json_results = {"timestart": time_start, "duration": total_duration,
441     #                "tests": int(total_nb_tests), "success": int(total_success)}
442     #logger.info("Results: "+str(json_results))
443
444     if args.report:
445         logger.debug("Pushing Rally summary into DB...")
446         push_results_to_db("Rally", payload)
447
448     logger.debug("Deleting image '%s' with ID '%s'..." \
449                          % (GLANCE_IMAGE_NAME, image_id))
450     if not functest_utils.delete_glance_image(nova_client, image_id):
451         logger.error("Error deleting the glance image")
452
453     if not volume_types:
454         logger.debug("Deleting volume type '%s'..." \
455                              % CINDER_VOLUME_TYPE_NAME)
456         if not functest_utils.delete_volume_type(cinder_client, volume_type):
457             logger.error("Error in deleting volume type...")
458
459
460 if __name__ == '__main__':
461     main()