remove "print" in tempest to avoid having a message "None" in the output
[functest.git] / testcases / VIM / OpenStack / CI / libraries / run_rally-cert.py
1 #!/usr/bin/env python
2 #
3 # Copyright (c) 2015 Orange
4 # guyrodrigue.koffi@orange.com
5 # morgan.richomme@orange.com
6 # All rights reserved. This program and the accompanying materials
7 # are made available under the terms of the Apache License, Version 2.0
8 # which accompanies this distribution, and is available at
9 # http://www.apache.org/licenses/LICENSE-2.0
10 #
11 # 0.1 (05/2015) initial commit
12 # 0.2 (28/09/2015) extract Tempest, format json result, add ceilometer suite
13 # 0.3 (19/10/2015) remove Tempest from run_rally
14 # and push result into test DB
15 #
16 import argparse
17 import iniparse
18 import json
19 import logging
20 import os
21 import re
22 import requests
23 import subprocess
24 import sys
25 import time
26 import yaml
27
28 from novaclient import client as novaclient
29 from glanceclient import client as glanceclient
30 from keystoneclient.v2_0 import client as keystoneclient
31 from neutronclient.v2_0 import client as neutronclient
32 from cinderclient import client as cinderclient
33
34 """ tests configuration """
35 tests = ['authenticate', 'glance', 'cinder', 'heat', 'keystone',
36          'neutron', 'nova', 'quotas', 'requests', 'vm', 'all']
37 parser = argparse.ArgumentParser()
38 parser.add_argument("test_name",
39                     help="Module name to be tested. "
40                          "Possible values are : "
41                          "[ {d[0]} | {d[1]} | {d[2]} | {d[3]} | {d[4]} | "
42                          "{d[5]} | {d[6]} | {d[7]} | {d[8]} | {d[9]} | "
43                          "{d[10]} ] "
44                          "The 'all' value "
45                          "performs all possible test scenarios"
46                          .format(d=tests))
47
48 parser.add_argument("-d", "--debug", help="Debug mode",  action="store_true")
49 parser.add_argument("-r", "--report",
50                     help="Create json result file",
51                     action="store_true")
52 parser.add_argument("-s", "--smoke",
53                     help="Smoke test mode",
54                     action="store_true")
55 parser.add_argument("-v", "--verbose",
56                     help="Print verbose info about the progress",
57                     action="store_true")
58 parser.add_argument("-n", "--noclean",
59                     help="Don't clean the created resources for this test.",
60                     action="store_true")
61
62 args = parser.parse_args()
63
64 client_dict = {}
65
66 if args.verbose:
67     RALLY_STDERR = subprocess.STDOUT
68 else:
69     RALLY_STDERR = open(os.devnull, 'w')
70
71 """ logging configuration """
72 logger = logging.getLogger("run_rally")
73 logger.setLevel(logging.DEBUG)
74
75 ch = logging.StreamHandler()
76 if args.debug:
77     ch.setLevel(logging.DEBUG)
78 else:
79     ch.setLevel(logging.INFO)
80
81 formatter = logging.Formatter("%(asctime)s - %(name)s - "
82                               "%(levelname)s - %(message)s")
83 ch.setFormatter(formatter)
84 logger.addHandler(ch)
85
86 REPO_PATH = os.environ['repos_dir']+'/functest/'
87 if not os.path.exists(REPO_PATH):
88     logger.error("Functest repository directory not found '%s'" % REPO_PATH)
89     exit(-1)
90 sys.path.append(REPO_PATH + "testcases/")
91 import functest_utils
92
93 with open("/home/opnfv/functest/conf/config_functest.yaml") as f:
94     functest_yaml = yaml.safe_load(f)
95 f.close()
96
97 HOME = os.environ['HOME']+"/"
98 ### todo:
99 # SCENARIOS_DIR = REPO_PATH + functest_yaml.get("general"). \
100 #    get("directories").get("dir_rally_scn")
101 SCENARIOS_DIR = REPO_PATH + "testcases/VIM/OpenStack/CI/rally_cert/"
102 ###
103 TEMPLATE_DIR = SCENARIOS_DIR + "scenario/templates"
104 SUPPORT_DIR = SCENARIOS_DIR + "scenario/support"
105 ###todo:
106 FLAVOR_NAME = "m1.tiny"
107 USERS_AMOUNT = 2
108 TENANTS_AMOUNT = 3
109 ITERATIONS_AMOUNT = 10
110 CONCURRENCY = 4
111
112 ###
113 RESULTS_DIR = functest_yaml.get("general").get("directories"). \
114     get("dir_rally_res")
115 TEMPEST_CONF_FILE = functest_yaml.get("general").get("directories"). \
116     get("dir_results") + '/tempest/tempest.conf'
117 TEST_DB = functest_yaml.get("results").get("test_db_url")
118 PRIVATE_NETWORK = functest_yaml.get("general"). \
119     get("openstack").get("neutron_private_net_name")
120
121 GLANCE_IMAGE_NAME = functest_yaml.get("general"). \
122     get("openstack").get("image_name")
123 GLANCE_IMAGE_FILENAME = functest_yaml.get("general"). \
124     get("openstack").get("image_file_name")
125 GLANCE_IMAGE_FORMAT = functest_yaml.get("general"). \
126     get("openstack").get("image_disk_format")
127 GLANCE_IMAGE_PATH = functest_yaml.get("general"). \
128     get("directories").get("dir_functest_data") + "/" + GLANCE_IMAGE_FILENAME
129
130 CINDER_VOLUME_TYPE_NAME = "volume_test"
131
132
133 SUMMARY = []
134
135
136 def push_results_to_db(case, payload):
137
138     url = TEST_DB + "/results"
139     installer = functest_utils.get_installer_type(logger)
140     scenario = functest_utils.get_scenario(logger)
141     pod_name = functest_utils.get_pod_name(logger)
142     # TODO pod_name hardcoded, info shall come from Jenkins
143     params = {"project_name": "functest", "case_name": case,
144               "pod_name": pod_name, "installer": installer,
145               "version": scenario, "details": payload}
146
147     headers = {'Content-Type': 'application/json'}
148     r = requests.post(url, data=json.dumps(params), headers=headers)
149     logger.debug(r)
150
151
152 def get_task_id(cmd_raw):
153     """
154     get task id from command rally result
155     :param cmd_raw:
156     :return: task_id as string
157     """
158     taskid_re = re.compile('^Task +(.*): started$')
159     for line in cmd_raw.splitlines(True):
160         line = line.strip()
161         match = taskid_re.match(line)
162         if match:
163             return match.group(1)
164     return None
165
166
167 def task_succeed(json_raw):
168     """
169     Parse JSON from rally JSON results
170     :param json_raw:
171     :return: Bool
172     """
173     rally_report = json.loads(json_raw)
174     for report in rally_report:
175         if report is None or report.get('result') is None:
176             return False
177
178         for result in report.get('result'):
179             if result is None or len(result.get('error')) > 0:
180                 return False
181
182     return True
183
184
185 def live_migration_supported():
186     config = iniparse.ConfigParser()
187     if config.read(TEMPEST_CONF_FILE) and \
188        config.has_section('compute-feature-enabled') and \
189        config.has_option('compute-feature-enabled', 'live_migration'):
190        return config.getboolean('compute-feature-enabled', 'live_migration')
191
192     return False
193
194
195 def build_task_args(test_file_name):
196     task_args = {'service_list': [test_file_name]}
197     task_args['smoke'] = args.smoke
198     task_args['image_name'] = GLANCE_IMAGE_NAME
199     task_args['flavor_name'] = FLAVOR_NAME
200     task_args['glance_image_location'] = GLANCE_IMAGE_PATH
201     task_args['tmpl_dir'] = TEMPLATE_DIR
202     task_args['sup_dir'] = SUPPORT_DIR
203     task_args['users_amount'] = USERS_AMOUNT
204     task_args['tenants_amount'] = TENANTS_AMOUNT
205     task_args['iterations'] = ITERATIONS_AMOUNT
206     task_args['concurrency'] = CONCURRENCY
207
208     ext_net = functest_utils.get_external_net(client_dict['neutron'])
209     if ext_net:
210         task_args['floating_network'] = str(ext_net)
211     else:
212         task_args['floating_network'] = ''
213
214     net_id = functest_utils.get_network_id(client_dict['neutron'],
215                                            PRIVATE_NETWORK)
216     task_args['netid'] = str(net_id)
217     task_args['live_migration'] = live_migration_supported()
218
219     return task_args
220
221
222 def get_output(proc, test_name):
223     global SUMMARY
224     result = ""
225     nb_tests = 0
226     overall_duration = 0.0
227     success = 0.0
228     nb_totals = 0
229
230     while proc.poll() is None:
231         line = proc.stdout.readline()
232         if args.verbose:
233             result += line
234         else:
235             if "Load duration" in line or \
236                "started" in line or \
237                "finished" in line or \
238                " Preparing" in line or \
239                "+-" in line or \
240                "|" in line:
241                 result += line
242             elif "test scenario" in line:
243                 result += "\n" + line
244             elif "Full duration" in line:
245                 result += line + "\n\n"
246
247         # parse output for summary report
248         if "| " in line and \
249            "| action" not in line and \
250            "| Starting" not in line and \
251            "| Completed" not in line and \
252            "| ITER" not in line and \
253            "|   " not in line and \
254            "| total" not in line:
255             nb_tests += 1
256         elif "| total" in line:
257             percentage = ((line.split('|')[8]).strip(' ')).strip('%')
258             success += float(percentage)
259             nb_totals += 1
260         elif "Full duration" in line:
261             overall_duration += float(line.split(': ')[1])
262
263     overall_duration="{:10.2f}".format(overall_duration)
264     if nb_totals == 0:
265         success_avg = 0
266     else:
267         success_avg = "{:0.2f}".format(success / nb_totals)
268
269     scenario_summary = {'test_name': test_name,
270                         'overall_duration': overall_duration,
271                         'nb_tests': nb_tests,
272                         'success': success_avg}
273     SUMMARY.append(scenario_summary)
274
275     logger.info("\n" + result)
276
277     return result
278
279
280 def run_task(test_name):
281     #
282     # the "main" function of the script who launch rally for a task
283     # :param test_name: name for the rally test
284     # :return: void
285     #
286     global SUMMARY
287     logger.info('Starting test scenario "{}" ...'.format(test_name))
288
289     task_file = '{}task.yaml'.format(SCENARIOS_DIR)
290     if not os.path.exists(task_file):
291         logger.error("Task file '%s' does not exist." % task_file)
292         exit(-1)
293
294     test_file_name = '{}opnfv-{}.yaml'.format(SCENARIOS_DIR + "scenario/",
295                                               test_name)
296     if not os.path.exists(test_file_name):
297         logger.error("The scenario '%s' does not exist." % test_file_name)
298         exit(-1)
299
300     logger.debug('Scenario fetched from : {}'.format(test_file_name))
301
302     cmd_line = "rally task start --abort-on-sla-failure " + \
303                "--task {} ".format(task_file) + \
304                "--task-args \"{}\" ".format(build_task_args(test_name))
305     logger.debug('running command line : {}'.format(cmd_line))
306
307     p = subprocess.Popen(cmd_line, stdout=subprocess.PIPE,
308                          stderr=RALLY_STDERR, shell=True)
309     output = get_output(p, test_name)
310     task_id = get_task_id(output)
311     logger.debug('task_id : {}'.format(task_id))
312
313     if task_id is None:
314         logger.error("Failed to retrieve task_id.")
315         exit(-1)
316
317     # check for result directory and create it otherwise
318     if not os.path.exists(RESULTS_DIR):
319         logger.debug('%s does not exist, we create it.'.format(RESULTS_DIR))
320         os.makedirs(RESULTS_DIR)
321
322     # write html report file
323     report_file_name = '{}opnfv-{}.html'.format(RESULTS_DIR, test_name)
324     cmd_line = "rally task report {} --out {}".format(task_id,
325                                                       report_file_name)
326
327     logger.debug('running command line : {}'.format(cmd_line))
328     os.popen(cmd_line)
329
330     # get and save rally operation JSON result
331     cmd_line = "rally task results %s" % task_id
332     logger.debug('running command line : {}'.format(cmd_line))
333     cmd = os.popen(cmd_line)
334     json_results = cmd.read()
335     with open('{}opnfv-{}.json'.format(RESULTS_DIR, test_name), 'w') as f:
336         logger.debug('saving json file')
337         f.write(json_results)
338
339     with open('{}opnfv-{}.json'
340               .format(RESULTS_DIR, test_name)) as json_file:
341         json_data = json.load(json_file)
342
343     # Push results in payload of testcase
344     if args.report:
345         logger.debug("Push result into DB")
346         push_results_to_db("Rally_details", json_data)
347
348     """ parse JSON operation result """
349     if task_succeed(json_results):
350         logger.info('Test scenario: "{}" OK.'.format(test_name) + "\n")
351     else:
352         logger.info('Test scenario: "{}" Failed.'.format(test_name) + "\n")
353
354
355 def main():
356     global SUMMARY
357     # configure script
358     if not (args.test_name in tests):
359         logger.error('argument not valid')
360         exit(-1)
361
362     SUMMARY = []
363     creds_nova = functest_utils.get_credentials("nova")
364     nova_client = novaclient.Client('2', **creds_nova)
365     creds_neutron = functest_utils.get_credentials("neutron")
366     neutron_client = neutronclient.Client(**creds_neutron)
367     creds_keystone = functest_utils.get_credentials("keystone")
368     keystone_client = keystoneclient.Client(**creds_keystone)
369     glance_endpoint = keystone_client.service_catalog.url_for(service_type='image',
370                                                               endpoint_type='publicURL')
371     glance_client = glanceclient.Client(1, glance_endpoint,
372                                         token=keystone_client.auth_token)
373     creds_cinder = functest_utils.get_credentials("cinder")
374     cinder_client = cinderclient.Client('2', creds_cinder['username'],
375                                         creds_cinder['api_key'],
376                                         creds_cinder['project_id'],
377                                         creds_cinder['auth_url'],
378                                         service_type="volume")
379
380     client_dict['neutron'] = neutron_client
381
382     volume_types = functest_utils.list_volume_types(cinder_client,
383                                                     private=False)
384     if not volume_types:
385         volume_type = functest_utils.create_volume_type(cinder_client,
386                                                         CINDER_VOLUME_TYPE_NAME)
387         if not volume_type:
388             logger.error("Failed to create volume type...")
389             exit(-1)
390         else:
391             logger.debug("Volume type '%s' created succesfully..." \
392                          % CINDER_VOLUME_TYPE_NAME)
393     else:
394         logger.debug("Using existing volume type(s)...")
395
396     image_id = functest_utils.get_image_id(glance_client, GLANCE_IMAGE_NAME)
397
398     if image_id == '':
399         logger.debug("Creating image '%s' from '%s'..." % (GLANCE_IMAGE_NAME,
400                                                            GLANCE_IMAGE_PATH))
401         image_id = functest_utils.create_glance_image(glance_client,
402                                                       GLANCE_IMAGE_NAME,
403                                                       GLANCE_IMAGE_PATH)
404         if not image_id:
405             logger.error("Failed to create the Glance image...")
406             exit(-1)
407         else:
408             logger.debug("Image '%s' with ID '%s' created succesfully ." \
409                          % (GLANCE_IMAGE_NAME, image_id))
410     else:
411         logger.debug("Using existing image '%s' with ID '%s'..." \
412                      % (GLANCE_IMAGE_NAME, image_id))
413
414     if args.test_name == "all":
415         for test_name in tests:
416             if not (test_name == 'all' or
417                     test_name == 'vm'):
418                 run_task(test_name)
419     else:
420         logger.debug("Test name: " + args.test_name)
421         run_task(args.test_name)
422
423     report = "\n"\
424              "                                                              \n"\
425              "                     Rally Summary Report\n"\
426              "+===================+============+===============+===========+\n"\
427              "| Module            | Duration   | nb. Test Run  | Success   |\n"\
428              "+===================+============+===============+===========+\n"
429     payload = []
430
431     #for each scenario we draw a row for the table
432     total_duration = 0.0
433     total_nb_tests = 0
434     total_success = 0.0
435     for s in SUMMARY:
436         name = "{0:<17}".format(s['test_name'])
437         duration = float(s['overall_duration'])
438         total_duration += duration
439         duration = time.strftime("%M:%S", time.gmtime(duration))
440         duration = "{0:<10}".format(duration)
441         nb_tests = "{0:<13}".format(s['nb_tests'])
442         total_nb_tests += int(s['nb_tests'])
443         success = "{0:<10}".format(str(s['success'])+'%')
444         total_success += float(s['success'])
445         report += ""\
446         "| " + name + " | " + duration + " | " + nb_tests + " | " + success + "|\n"\
447         "+-------------------+------------+---------------+-----------+\n"
448         payload.append({'module': name,
449                         'details': {'duration': s['overall_duration'],
450                                     'nb tests': s['nb_tests'],
451                                     'success': s['success']}})
452
453     total_duration_str = time.strftime("%H:%M:%S", time.gmtime(total_duration))
454     total_duration_str2 = "{0:<10}".format(total_duration_str)
455     total_nb_tests_str = "{0:<13}".format(total_nb_tests)
456     total_success = "{:0.2f}".format(total_success / len(SUMMARY))
457     total_success_str = "{0:<10}".format(str(total_success)+'%')
458     report += "+===================+============+===============+===========+\n"
459     report += "| TOTAL:            | " + total_duration_str2 + " | " + \
460             total_nb_tests_str  + " | " + total_success_str + "|\n"
461     report += "+===================+============+===============+===========+\n"
462
463     logger.info("\n"+report)
464     payload.append({'summary': {'duration': total_duration,
465                                'nb tests': total_nb_tests,
466                                'nb success': total_success}})
467
468     # Generate json results for DB
469     #json_results = {"timestart": time_start, "duration": total_duration,
470     #                "tests": int(total_nb_tests), "success": int(total_success)}
471     #logger.info("Results: "+str(json_results))
472
473     if args.report:
474         logger.debug("Pushing Rally summary into DB...")
475         push_results_to_db("Rally", payload)
476
477     if args.noclean:
478         exit(0)
479
480     logger.debug("Deleting image '%s' with ID '%s'..." \
481                          % (GLANCE_IMAGE_NAME, image_id))
482     if not functest_utils.delete_glance_image(nova_client, image_id):
483         logger.error("Error deleting the glance image")
484
485     if not volume_types:
486         logger.debug("Deleting volume type '%s'..." \
487                              % CINDER_VOLUME_TYPE_NAME)
488         if not functest_utils.delete_volume_type(cinder_client, volume_type):
489             logger.error("Error in deleting volume type...")
490
491
492 if __name__ == '__main__':
493     main()