0d19926044daa429bff5cb983aaac8749975fff4
[functest-xtesting.git] / testcases / VIM / OpenStack / CI / libraries / run_rally-cert.py
1 #!/usr/bin/env python
2 #
3 # Copyright (c) 2015 Orange
4 # guyrodrigue.koffi@orange.com
5 # morgan.richomme@orange.com
6 # All rights reserved. This program and the accompanying materials
7 # are made available under the terms of the Apache License, Version 2.0
8 # which accompanies this distribution, and is available at
9 # http://www.apache.org/licenses/LICENSE-2.0
10 #
11 # 0.1 (05/2015) initial commit
12 # 0.2 (28/09/2015) extract Tempest, format json result, add ceilometer suite
13 # 0.3 (19/10/2015) remove Tempest from run_rally
14 # and push result into test DB
15 #
16 import argparse
17 import json
18 import logging
19 import os
20 import re
21 import requests
22 import subprocess
23 import sys
24 import time
25 import yaml
26
27 from novaclient import client as novaclient
28 from glanceclient import client as glanceclient
29 from keystoneclient.v2_0 import client as keystoneclient
30 from neutronclient.v2_0 import client as neutronclient
31 from cinderclient import client as cinderclient
32
33 """ tests configuration """
34 tests = ['authenticate', 'glance', 'cinder', 'heat', 'keystone',
35          'neutron', 'nova', 'quotas', 'requests', 'vm', 'all']
36 parser = argparse.ArgumentParser()
37 parser.add_argument("test_name",
38                     help="Module name to be tested. "
39                          "Possible values are : "
40                          "[ {d[0]} | {d[1]} | {d[2]} | {d[3]} | {d[4]} | "
41                          "{d[5]} | {d[6]} | {d[7]} | {d[8]} | {d[9]} | "
42                          "{d[10]} ] "
43                          "The 'all' value "
44                          "performs all possible test scenarios"
45                          .format(d=tests))
46
47 parser.add_argument("-d", "--debug", help="Debug mode",  action="store_true")
48 parser.add_argument("-r", "--report",
49                     help="Create json result file",
50                     action="store_true")
51 parser.add_argument("-s", "--smoke",
52                     help="Smoke test mode",
53                     action="store_true")
54 parser.add_argument("-v", "--verbose",
55                     help="Print verbose info about the progress",
56                     action="store_true")
57 parser.add_argument("-n", "--noclean",
58                     help="Don't clean the created resources for this test.",
59                     action="store_true")
60
61 args = parser.parse_args()
62
63 client_dict = {}
64
65 if args.verbose:
66     RALLY_STDERR = subprocess.STDOUT
67 else:
68     RALLY_STDERR = open(os.devnull, 'w')
69
70 """ logging configuration """
71 logger = logging.getLogger("run_rally")
72 logger.setLevel(logging.DEBUG)
73
74 ch = logging.StreamHandler()
75 if args.debug:
76     ch.setLevel(logging.DEBUG)
77 else:
78     ch.setLevel(logging.INFO)
79
80 formatter = logging.Formatter("%(asctime)s - %(name)s - "
81                               "%(levelname)s - %(message)s")
82 ch.setFormatter(formatter)
83 logger.addHandler(ch)
84
85 REPO_PATH = os.environ['repos_dir']+'/functest/'
86 if not os.path.exists(REPO_PATH):
87     logger.error("Functest repository directory not found '%s'" % REPO_PATH)
88     exit(-1)
89 sys.path.append(REPO_PATH + "testcases/")
90 import functest_utils
91
92 with open("/home/opnfv/functest/conf/config_functest.yaml") as f:
93     functest_yaml = yaml.safe_load(f)
94 f.close()
95
96 HOME = os.environ['HOME']+"/"
97 ### todo:
98 # SCENARIOS_DIR = REPO_PATH + functest_yaml.get("general"). \
99 #    get("directories").get("dir_rally_scn")
100 SCENARIOS_DIR = REPO_PATH + "testcases/VIM/OpenStack/CI/rally_cert/"
101 ###
102 TEMPLATE_DIR = SCENARIOS_DIR + "scenario/templates"
103 SUPPORT_DIR = SCENARIOS_DIR + "scenario/support"
104 ###todo:
105 FLAVOR_NAME = "m1.tiny"
106 USERS_AMOUNT = 2
107 TENANTS_AMOUNT = 3
108 ITERATIONS_AMOUNT = 10
109 CONCURRENCY = 4
110
111 ###
112 RESULTS_DIR = functest_yaml.get("general").get("directories"). \
113     get("dir_rally_res")
114 TEST_DB = functest_yaml.get("results").get("test_db_url")
115 PRIVATE_NETWORK = functest_yaml.get("general"). \
116     get("openstack").get("neutron_private_net_name")
117
118 GLANCE_IMAGE_NAME = functest_yaml.get("general"). \
119     get("openstack").get("image_name")
120 GLANCE_IMAGE_FILENAME = functest_yaml.get("general"). \
121     get("openstack").get("image_file_name")
122 GLANCE_IMAGE_FORMAT = functest_yaml.get("general"). \
123     get("openstack").get("image_disk_format")
124 GLANCE_IMAGE_PATH = functest_yaml.get("general"). \
125     get("directories").get("dir_functest_data") + "/" + GLANCE_IMAGE_FILENAME
126
127 CINDER_VOLUME_TYPE_NAME = "volume_test"
128
129
130 SUMMARY = []
131
132
133 def push_results_to_db(case, payload):
134
135     url = TEST_DB + "/results"
136     installer = functest_utils.get_installer_type(logger)
137     scenario = functest_utils.get_scenario(logger)
138     pod_name = functest_utils.get_pod_name(logger)
139     # TODO pod_name hardcoded, info shall come from Jenkins
140     params = {"project_name": "functest", "case_name": case,
141               "pod_name": pod_name, "installer": installer,
142               "version": scenario, "details": payload}
143
144     headers = {'Content-Type': 'application/json'}
145     r = requests.post(url, data=json.dumps(params), headers=headers)
146     logger.debug(r)
147
148
149 def get_task_id(cmd_raw):
150     """
151     get task id from command rally result
152     :param cmd_raw:
153     :return: task_id as string
154     """
155     taskid_re = re.compile('^Task +(.*): started$')
156     for line in cmd_raw.splitlines(True):
157         line = line.strip()
158         match = taskid_re.match(line)
159         if match:
160             return match.group(1)
161     return None
162
163
164 def task_succeed(json_raw):
165     """
166     Parse JSON from rally JSON results
167     :param json_raw:
168     :return: Bool
169     """
170     rally_report = json.loads(json_raw)
171     for report in rally_report:
172         if report is None or report.get('result') is None:
173             return False
174
175         for result in report.get('result'):
176             if result is None or len(result.get('error')) > 0:
177                 return False
178
179     return True
180
181
182 def build_task_args(test_file_name):
183     task_args = {'service_list': [test_file_name]}
184     task_args['smoke'] = args.smoke
185     task_args['image_name'] = GLANCE_IMAGE_NAME
186     task_args['flavor_name'] = FLAVOR_NAME
187     task_args['glance_image_location'] = GLANCE_IMAGE_PATH
188     task_args['tmpl_dir'] = TEMPLATE_DIR
189     task_args['sup_dir'] = SUPPORT_DIR
190     task_args['users_amount'] = USERS_AMOUNT
191     task_args['tenants_amount'] = TENANTS_AMOUNT
192     task_args['iterations'] = ITERATIONS_AMOUNT
193     task_args['concurrency'] = CONCURRENCY
194
195     ext_net = functest_utils.get_external_net(client_dict['neutron'])
196     if ext_net:
197         task_args['floating_network'] = str(ext_net)
198     else:
199         task_args['floating_network'] = ''
200
201     net_id = functest_utils.get_network_id(client_dict['neutron'],
202                                            PRIVATE_NETWORK)
203     task_args['netid'] = str(net_id)
204
205     return task_args
206
207
208 def get_output(proc, test_name):
209     global SUMMARY
210     result = ""
211     nb_tests = 0
212     overall_duration = 0.0
213     success = 0.0
214     nb_totals = 0
215
216     while proc.poll() is None:
217         line = proc.stdout.readline()
218         if args.verbose:
219             result += line
220         else:
221             if "Load duration" in line or \
222                "started" in line or \
223                "finished" in line or \
224                " Preparing" in line or \
225                "+-" in line or \
226                "|" in line:
227                 result += line
228             elif "test scenario" in line:
229                 result += "\n" + line
230             elif "Full duration" in line:
231                 result += line + "\n\n"
232
233         # parse output for summary report
234         if "| " in line and \
235            "| action" not in line and \
236            "| Starting" not in line and \
237            "| Completed" not in line and \
238            "| ITER" not in line and \
239            "|   " not in line and \
240            "| total" not in line:
241             nb_tests += 1
242         elif "| total" in line:
243             percentage = ((line.split('|')[8]).strip(' ')).strip('%')
244             success += float(percentage)
245             nb_totals += 1
246         elif "Full duration" in line:
247             overall_duration += float(line.split(': ')[1])
248
249     overall_duration="{:10.2f}".format(overall_duration)
250     if nb_totals == 0:
251         success_avg = 0
252     else:
253         success_avg = "{:0.2f}".format(success / nb_totals)
254
255     scenario_summary = {'test_name': test_name,
256                         'overall_duration': overall_duration,
257                         'nb_tests': nb_tests,
258                         'success': success_avg}
259     SUMMARY.append(scenario_summary)
260
261     logger.info("\n" + result)
262
263     return result
264
265
266 def run_task(test_name):
267     #
268     # the "main" function of the script who launch rally for a task
269     # :param test_name: name for the rally test
270     # :return: void
271     #
272     global SUMMARY
273     logger.info('Starting test scenario "{}" ...'.format(test_name))
274
275     task_file = '{}task.yaml'.format(SCENARIOS_DIR)
276     if not os.path.exists(task_file):
277         logger.error("Task file '%s' does not exist." % task_file)
278         exit(-1)
279
280     test_file_name = '{}opnfv-{}.yaml'.format(SCENARIOS_DIR + "scenario/",
281                                               test_name)
282     if not os.path.exists(test_file_name):
283         logger.error("The scenario '%s' does not exist." % test_file_name)
284         exit(-1)
285
286     logger.debug('Scenario fetched from : {}'.format(test_file_name))
287
288     cmd_line = "rally task start --abort-on-sla-failure " + \
289                "--task {} ".format(task_file) + \
290                "--task-args \"{}\" ".format(build_task_args(test_name))
291     logger.debug('running command line : {}'.format(cmd_line))
292
293     p = subprocess.Popen(cmd_line, stdout=subprocess.PIPE,
294                          stderr=RALLY_STDERR, shell=True)
295     output = get_output(p, test_name)
296     task_id = get_task_id(output)
297     logger.debug('task_id : {}'.format(task_id))
298
299     if task_id is None:
300         logger.error("Failed to retrieve task_id.")
301         exit(-1)
302
303     # check for result directory and create it otherwise
304     if not os.path.exists(RESULTS_DIR):
305         logger.debug('%s does not exist, we create it.'.format(RESULTS_DIR))
306         os.makedirs(RESULTS_DIR)
307
308     # write html report file
309     report_file_name = '{}opnfv-{}.html'.format(RESULTS_DIR, test_name)
310     cmd_line = "rally task report {} --out {}".format(task_id,
311                                                       report_file_name)
312
313     logger.debug('running command line : {}'.format(cmd_line))
314     os.popen(cmd_line)
315
316     # get and save rally operation JSON result
317     cmd_line = "rally task results %s" % task_id
318     logger.debug('running command line : {}'.format(cmd_line))
319     cmd = os.popen(cmd_line)
320     json_results = cmd.read()
321     with open('{}opnfv-{}.json'.format(RESULTS_DIR, test_name), 'w') as f:
322         logger.debug('saving json file')
323         f.write(json_results)
324
325     with open('{}opnfv-{}.json'
326               .format(RESULTS_DIR, test_name)) as json_file:
327         json_data = json.load(json_file)
328
329     # Push results in payload of testcase
330     if args.report:
331         logger.debug("Push result into DB")
332         push_results_to_db("Rally_details", json_data)
333
334     """ parse JSON operation result """
335     if task_succeed(json_results):
336         logger.info('Test scenario: "{}" OK.'.format(test_name) + "\n")
337     else:
338         logger.info('Test scenario: "{}" Failed.'.format(test_name) + "\n")
339
340
341 def main():
342     global SUMMARY
343     # configure script
344     if not (args.test_name in tests):
345         logger.error('argument not valid')
346         exit(-1)
347
348     SUMMARY = []
349     creds_nova = functest_utils.get_credentials("nova")
350     nova_client = novaclient.Client('2', **creds_nova)
351     creds_neutron = functest_utils.get_credentials("neutron")
352     neutron_client = neutronclient.Client(**creds_neutron)
353     creds_keystone = functest_utils.get_credentials("keystone")
354     keystone_client = keystoneclient.Client(**creds_keystone)
355     glance_endpoint = keystone_client.service_catalog.url_for(service_type='image',
356                                                               endpoint_type='publicURL')
357     glance_client = glanceclient.Client(1, glance_endpoint,
358                                         token=keystone_client.auth_token)
359     creds_cinder = functest_utils.get_credentials("cinder")
360     cinder_client = cinderclient.Client('2', creds_cinder['username'],
361                                         creds_cinder['api_key'],
362                                         creds_cinder['project_id'],
363                                         creds_cinder['auth_url'],
364                                         service_type="volume")
365
366     client_dict['neutron'] = neutron_client
367
368     volume_types = functest_utils.list_volume_types(cinder_client,
369                                                     private=False)
370     if not volume_types:
371         volume_type = functest_utils.create_volume_type(cinder_client,
372                                                         CINDER_VOLUME_TYPE_NAME)
373         if not volume_type:
374             logger.error("Failed to create volume type...")
375             exit(-1)
376         else:
377             logger.debug("Volume type '%s' created succesfully..." \
378                          % CINDER_VOLUME_TYPE_NAME)
379     else:
380         logger.debug("Using existing volume type(s)...")
381
382     image_id = functest_utils.get_image_id(glance_client, GLANCE_IMAGE_NAME)
383
384     if image_id == '':
385         logger.debug("Creating image '%s' from '%s'..." % (GLANCE_IMAGE_NAME,
386                                                            GLANCE_IMAGE_PATH))
387         image_id = functest_utils.create_glance_image(glance_client,
388                                                       GLANCE_IMAGE_NAME,
389                                                       GLANCE_IMAGE_PATH)
390         if not image_id:
391             logger.error("Failed to create the Glance image...")
392             exit(-1)
393         else:
394             logger.debug("Image '%s' with ID '%s' created succesfully ." \
395                          % (GLANCE_IMAGE_NAME, image_id))
396     else:
397         logger.debug("Using existing image '%s' with ID '%s'..." \
398                      % (GLANCE_IMAGE_NAME, image_id))
399
400     if args.test_name == "all":
401         for test_name in tests:
402             if not (test_name == 'all' or
403                     test_name == 'vm'):
404                 run_task(test_name)
405     else:
406         logger.debug("Test name: " + args.test_name)
407         run_task(args.test_name)
408
409     report = "\n"\
410              "                                                              \n"\
411              "                     Rally Summary Report\n"\
412              "+===================+============+===============+===========+\n"\
413              "| Module            | Duration   | nb. Test Run  | Success   |\n"\
414              "+===================+============+===============+===========+\n"
415     payload = []
416
417     #for each scenario we draw a row for the table
418     total_duration = 0.0
419     total_nb_tests = 0
420     total_success = 0.0
421     for s in SUMMARY:
422         name = "{0:<17}".format(s['test_name'])
423         duration = float(s['overall_duration'])
424         total_duration += duration
425         duration = time.strftime("%M:%S", time.gmtime(duration))
426         duration = "{0:<10}".format(duration)
427         nb_tests = "{0:<13}".format(s['nb_tests'])
428         total_nb_tests += int(s['nb_tests'])
429         success = "{0:<10}".format(str(s['success'])+'%')
430         total_success += float(s['success'])
431         report += ""\
432         "| " + name + " | " + duration + " | " + nb_tests + " | " + success + "|\n"\
433         "+-------------------+------------+---------------+-----------+\n"
434         payload.append({'module': name,
435                         'details': {'duration': s['overall_duration'],
436                                     'nb tests': s['nb_tests'],
437                                     'success': s['success']}})
438
439     total_duration_str = time.strftime("%H:%M:%S", time.gmtime(total_duration))
440     total_duration_str2 = "{0:<10}".format(total_duration_str)
441     total_nb_tests_str = "{0:<13}".format(total_nb_tests)
442     total_success = "{:0.2f}".format(total_success / len(SUMMARY))
443     total_success_str = "{0:<10}".format(str(total_success)+'%')
444     report += "+===================+============+===============+===========+\n"
445     report += "| TOTAL:            | " + total_duration_str2 + " | " + \
446             total_nb_tests_str  + " | " + total_success_str + "|\n"
447     report += "+===================+============+===============+===========+\n"
448
449     logger.info("\n"+report)
450     payload.append({'summary': {'duration': total_duration,
451                                'nb tests': total_nb_tests,
452                                'nb success': total_success}})
453
454     # Generate json results for DB
455     #json_results = {"timestart": time_start, "duration": total_duration,
456     #                "tests": int(total_nb_tests), "success": int(total_success)}
457     #logger.info("Results: "+str(json_results))
458
459     if args.report:
460         logger.debug("Pushing Rally summary into DB...")
461         push_results_to_db("Rally", payload)
462
463     if args.noclean:
464         exit(0)
465
466     logger.debug("Deleting image '%s' with ID '%s'..." \
467                          % (GLANCE_IMAGE_NAME, image_id))
468     if not functest_utils.delete_glance_image(nova_client, image_id):
469         logger.error("Error deleting the glance image")
470
471     if not volume_types:
472         logger.debug("Deleting volume type '%s'..." \
473                              % CINDER_VOLUME_TYPE_NAME)
474         if not functest_utils.delete_volume_type(cinder_client, volume_type):
475             logger.error("Error in deleting volume type...")
476
477
478 if __name__ == '__main__':
479     main()