Merge "Remove obsolete public network definitions"
[functest-xtesting.git] / testcases / VIM / OpenStack / CI / libraries / run_rally-cert.py
1 #!/usr/bin/env python
2 #
3 # Copyright (c) 2015 Orange
4 # guyrodrigue.koffi@orange.com
5 # morgan.richomme@orange.com
6 # All rights reserved. This program and the accompanying materials
7 # are made available under the terms of the Apache License, Version 2.0
8 # which accompanies this distribution, and is available at
9 # http://www.apache.org/licenses/LICENSE-2.0
10 #
11 # 0.1 (05/2015) initial commit
12 # 0.2 (28/09/2015) extract Tempest, format json result, add ceilometer suite
13 # 0.3 (19/10/2015) remove Tempest from run_rally
14 # and push result into test DB
15 #
16 import argparse
17 import json
18 import logging
19 import os
20 import re
21 import requests
22 import subprocess
23 import sys
24 import time
25 import yaml
26
27 from novaclient import client as novaclient
28 from glanceclient import client as glanceclient
29 from keystoneclient.v2_0 import client as keystoneclient
30 from neutronclient.v2_0 import client as neutronclient
31 from cinderclient import client as cinderclient
32
33 """ tests configuration """
34 tests = ['authenticate', 'glance', 'cinder', 'heat', 'keystone',
35          'neutron', 'nova', 'quotas', 'requests', 'vm', 'all']
36 parser = argparse.ArgumentParser()
37 parser.add_argument("test_name",
38                     help="Module name to be tested. "
39                          "Possible values are : "
40                          "[ {d[0]} | {d[1]} | {d[2]} | {d[3]} | {d[4]} | "
41                          "{d[5]} | {d[6]} | {d[7]} | {d[8]} | {d[9]} | "
42                          "{d[10]} ] "
43                          "The 'all' value "
44                          "performs all possible test scenarios"
45                          .format(d=tests))
46
47 parser.add_argument("-d", "--debug", help="Debug mode",  action="store_true")
48 parser.add_argument("-r", "--report",
49                     help="Create json result file",
50                     action="store_true")
51 parser.add_argument("-s", "--smoke",
52                     help="Smoke test mode",
53                     action="store_true")
54 parser.add_argument("-v", "--verbose",
55                     help="Print verbose info about the progress",
56                     action="store_true")
57
58 args = parser.parse_args()
59
60 client_dict = {}
61
62 if args.verbose:
63     RALLY_STDERR = subprocess.STDOUT
64 else:
65     RALLY_STDERR = open(os.devnull, 'w')
66
67 """ logging configuration """
68 logger = logging.getLogger("run_rally")
69 logger.setLevel(logging.DEBUG)
70
71 ch = logging.StreamHandler()
72 if args.debug:
73     ch.setLevel(logging.DEBUG)
74 else:
75     ch.setLevel(logging.INFO)
76
77 formatter = logging.Formatter("%(asctime)s - %(name)s - "
78                               "%(levelname)s - %(message)s")
79 ch.setFormatter(formatter)
80 logger.addHandler(ch)
81
82 REPO_PATH=os.environ['repos_dir']+'/functest/'
83 if not os.path.exists(REPO_PATH):
84     logger.error("Functest repository directory not found '%s'" % REPO_PATH)
85     exit(-1)
86 sys.path.append(REPO_PATH + "testcases/")
87 import functest_utils
88
89 with open("/home/opnfv/functest/conf/config_functest.yaml") as f:
90     functest_yaml = yaml.safe_load(f)
91 f.close()
92
93 HOME = os.environ['HOME']+"/"
94 ####todo:
95 #SCENARIOS_DIR = REPO_PATH + functest_yaml.get("general"). \
96 #    get("directories").get("dir_rally_scn")
97 SCENARIOS_DIR = REPO_PATH + "testcases/VIM/OpenStack/CI/rally_cert/"
98 ###
99 TEMPLATE_DIR = SCENARIOS_DIR + "scenario/templates"
100 SUPPORT_DIR = SCENARIOS_DIR + "scenario/support"
101 ###todo:
102 FLAVOR_NAME = "m1.tiny"
103 USERS_AMOUNT = 2
104 TENANTS_AMOUNT = 3
105 ITERATIONS_AMOUNT = 10
106 CONCURRENCY = 4
107
108 ###
109 RESULTS_DIR = functest_yaml.get("general").get("directories"). \
110     get("dir_rally_res")
111 TEST_DB = functest_yaml.get("results").get("test_db_url")
112 PRIVATE_NETWORK = functest_yaml.get("general"). \
113     get("openstack").get("neutron_private_net_name")
114
115 GLANCE_IMAGE_NAME = functest_yaml.get("general"). \
116     get("openstack").get("image_name")
117 GLANCE_IMAGE_FILENAME = functest_yaml.get("general"). \
118     get("openstack").get("image_file_name")
119 GLANCE_IMAGE_FORMAT = functest_yaml.get("general"). \
120     get("openstack").get("image_disk_format")
121 GLANCE_IMAGE_PATH = functest_yaml.get("general"). \
122     get("directories").get("dir_functest_data") + "/" + GLANCE_IMAGE_FILENAME
123
124 CINDER_VOLUME_TYPE_NAME = "volume_test"
125
126
127 SUMMARY = []
128
129 def push_results_to_db(payload):
130
131     url = TEST_DB + "/results"
132     installer = functest_utils.get_installer_type(logger)
133     scenario = functest_utils.get_scenario(logger)
134     pod_name = functest_utils.get_pod_name(logger)
135     # TODO pod_name hardcoded, info shall come from Jenkins
136     params = {"project_name": "functest", "case_name": "Rally",
137               "pod_name": pod_name, "installer": installer,
138               "version": scenario, "details": payload}
139
140     headers = {'Content-Type': 'application/json'}
141     r = requests.post(url, data=json.dumps(params), headers=headers)
142     logger.debug(r)
143
144
145 def get_task_id(cmd_raw):
146     """
147     get task id from command rally result
148     :param cmd_raw:
149     :return: task_id as string
150     """
151     taskid_re = re.compile('^Task +(.*): started$')
152     for line in cmd_raw.splitlines(True):
153         line = line.strip()
154         match = taskid_re.match(line)
155         if match:
156             return match.group(1)
157     return None
158
159
160 def task_succeed(json_raw):
161     """
162     Parse JSON from rally JSON results
163     :param json_raw:
164     :return: Bool
165     """
166     rally_report = json.loads(json_raw)
167     for report in rally_report:
168         if report is None or report.get('result') is None:
169             return False
170
171         for result in report.get('result'):
172             if result is None or len(result.get('error')) > 0:
173                 return False
174
175     return True
176
177
178 def build_task_args(test_file_name):
179     task_args = {'service_list': [test_file_name]}
180     task_args['smoke'] = args.smoke
181     task_args['image_name'] = GLANCE_IMAGE_NAME
182     task_args['flavor_name'] = FLAVOR_NAME
183     task_args['glance_image_location'] = GLANCE_IMAGE_PATH
184     task_args['tmpl_dir'] = TEMPLATE_DIR
185     task_args['sup_dir'] = SUPPORT_DIR
186     task_args['users_amount'] = USERS_AMOUNT
187     task_args['tenants_amount'] = TENANTS_AMOUNT
188     task_args['iterations'] = ITERATIONS_AMOUNT
189     task_args['concurrency'] = CONCURRENCY
190
191     ext_net = functest_utils.get_external_net(client_dict['neutron'])
192     if ext_net:
193         task_args['floating_network'] = str(ext_net)
194     else:
195         task_args['floating_network'] = ''
196
197     net_id = functest_utils.get_network_id(client_dict['neutron'],
198                                            PRIVATE_NETWORK)
199     task_args['netid'] = str(net_id)
200
201     return task_args
202
203
204 def get_output(proc, test_name):
205     global SUMMARY
206     result = ""
207     nb_tests = 0
208     overall_duration = 0.0
209     success = 0.0
210
211     if args.verbose:
212         while proc.poll() is None:
213             line = proc.stdout.readline()
214             print line.replace('\n', '')
215             result += line
216     else:
217         while proc.poll() is None:
218             line = proc.stdout.readline()
219             if "Load duration" in line or \
220                "started" in line or \
221                "finished" in line or \
222                " Preparing" in line or \
223                "+-" in line or \
224                "|" in line:
225                 result += line
226                 if "| " in line and \
227                    "| action" not in line and \
228                    "|   " not in line and \
229                    "| total" not in line:
230                     nb_tests += 1
231                     percentage = ((line.split('|')[8]).strip(' ')).strip('%')
232                     success += float(percentage)
233
234             elif "test scenario" in line:
235                 result += "\n" + line
236             elif "Full duration" in line:
237                 result += line + "\n\n"
238                 overall_duration += float(line.split(': ')[1])
239         logger.info("\n" + result)
240     overall_duration="{:10.2f}".format(overall_duration)
241     success_avg = success / nb_tests
242     scenario_summary = {'test_name': test_name, 'overall_duration':overall_duration, \
243                         'nb_tests': nb_tests, 'success': success_avg}
244
245     SUMMARY.append(scenario_summary)
246     return result
247
248
249 def run_task(test_name):
250     #
251     # the "main" function of the script who launch rally for a task
252     # :param test_name: name for the rally test
253     # :return: void
254     #
255     logger.info('Starting test scenario "{}" ...'.format(test_name))
256
257     task_file = '{}task.yaml'.format(SCENARIOS_DIR)
258     if not os.path.exists(task_file):
259         logger.error("Task file '%s' does not exist." % task_file)
260         exit(-1)
261
262     test_file_name = '{}opnfv-{}.yaml'.format(SCENARIOS_DIR + "scenario/", test_name)
263     if not os.path.exists(test_file_name):
264         logger.error("The scenario '%s' does not exist." % test_file_name)
265         exit(-1)
266
267     logger.debug('Scenario fetched from : {}'.format(test_file_name))
268
269     cmd_line = "rally task start --abort-on-sla-failure " + \
270                "--task {} ".format(task_file) + \
271                "--task-args \"{}\" ".format(build_task_args(test_name))
272     logger.debug('running command line : {}'.format(cmd_line))
273
274     p = subprocess.Popen(cmd_line, stdout=subprocess.PIPE, stderr=RALLY_STDERR, shell=True)
275     output = get_output(p, test_name)
276     task_id = get_task_id(output)
277     logger.debug('task_id : {}'.format(task_id))
278
279     if task_id is None:
280         logger.error("failed to retrieve task_id")
281         exit(-1)
282
283     # check for result directory and create it otherwise
284     if not os.path.exists(RESULTS_DIR):
285         logger.debug('does not exists, we create it'.format(RESULTS_DIR))
286         os.makedirs(RESULTS_DIR)
287
288     # write html report file
289     report_file_name = '{}opnfv-{}.html'.format(RESULTS_DIR, test_name)
290     cmd_line = "rally task report {} --out {}".format(task_id,
291                                                       report_file_name)
292
293     logger.debug('running command line : {}'.format(cmd_line))
294     os.popen(cmd_line)
295
296     # get and save rally operation JSON result
297     cmd_line = "rally task results %s" % task_id
298     logger.debug('running command line : {}'.format(cmd_line))
299     cmd = os.popen(cmd_line)
300     json_results = cmd.read()
301     with open('{}opnfv-{}.json'.format(RESULTS_DIR, test_name), 'w') as f:
302         logger.debug('saving json file')
303         f.write(json_results)
304
305     with open('{}opnfv-{}.json'
306               .format(RESULTS_DIR, test_name)) as json_file:
307         json_data = json.load(json_file)
308
309     # Push results in payload of testcase
310     if args.report:
311         logger.debug("Push result into DB")
312         push_results_to_db(json_data)
313
314     """ parse JSON operation result """
315     if task_succeed(json_results):
316         logger.info('Test scenario: "{}" OK.'.format(test_name) + "\n")
317     else:
318         logger.info('Test scenario: "{}" Failed.'.format(test_name) + "\n")
319
320 def push_results_to_db(payload):
321     # TODO
322     pass
323
324
325 def main():
326     global SUMMARY
327     # configure script
328     if not (args.test_name in tests):
329         logger.error('argument not valid')
330         exit(-1)
331
332     SUMMARY = []
333     creds_nova = functest_utils.get_credentials("nova")
334     nova_client = novaclient.Client('2',**creds_nova)
335     creds_neutron = functest_utils.get_credentials("neutron")
336     neutron_client = neutronclient.Client(**creds_neutron)
337     creds_keystone = functest_utils.get_credentials("keystone")
338     keystone_client = keystoneclient.Client(**creds_keystone)
339     glance_endpoint = keystone_client.service_catalog.url_for(service_type='image',
340                                                    endpoint_type='publicURL')
341     glance_client = glanceclient.Client(1, glance_endpoint,
342                                         token=keystone_client.auth_token)
343     creds_cinder = functest_utils.get_credentials("cinder")
344     cinder_client = cinderclient.Client('2',creds_cinder['username'],
345                                         creds_cinder['api_key'],
346                                         creds_cinder['project_id'],
347                                         creds_cinder['auth_url'],
348                                         service_type="volume")
349
350     client_dict['neutron'] = neutron_client
351
352     volume_types = functest_utils.list_volume_types(cinder_client, private=False)
353     if not volume_types:
354         volume_type = functest_utils.create_volume_type(cinder_client, \
355                                                         CINDER_VOLUME_TYPE_NAME)
356         if not volume_type:
357             logger.error("Failed to create volume type...")
358             exit(-1)
359         else:
360             logger.debug("Volume type '%s' created succesfully..." \
361                          % CINDER_VOLUME_TYPE_NAME)
362     else:
363         logger.debug("Using existing volume type(s)...")
364
365     image_id = functest_utils.get_image_id(glance_client, GLANCE_IMAGE_NAME)
366
367     if image_id == '':
368         logger.debug("Creating image '%s' from '%s'..." % (GLANCE_IMAGE_NAME, \
369                                                            GLANCE_IMAGE_PATH))
370         image_id = functest_utils.create_glance_image(glance_client,\
371                                                 GLANCE_IMAGE_NAME,GLANCE_IMAGE_PATH)
372         if not image_id:
373             logger.error("Failed to create the Glance image...")
374             exit(-1)
375         else:
376             logger.debug("Image '%s' with ID '%s' created succesfully ." \
377                          % (GLANCE_IMAGE_NAME, image_id))
378     else:
379         logger.debug("Using existing image '%s' with ID '%s'..." \
380                      % (GLANCE_IMAGE_NAME,image_id))
381
382     if args.test_name == "all":
383         for test_name in tests:
384             if not (test_name == 'all' or
385                     test_name == 'vm'):
386                 run_task(test_name)
387     else:
388         print(args.test_name)
389         run_task(args.test_name)
390
391     report="\n"\
392     "                                                              \n"\
393     "                     Rally Summary Report\n"\
394     "+===================+============+===============+===========+\n"\
395     "| Module            | Duration   | nb. Test Run  | Success   |\n"\
396     "+===================+============+===============+===========+\n"
397
398     #for each scenario we draw a row for the table
399     total_duration = 0.0
400     total_nb_tests = 0
401     total_success = 0.0
402     for s in SUMMARY:
403         name = "{0:<17}".format(s['test_name'])
404         duration = float(s['overall_duration'])
405         total_duration += duration
406         duration = time.strftime("%M:%S", time.gmtime(duration))
407         duration = "{0:<10}".format(duration)
408         nb_tests = "{0:<13}".format(s['nb_tests'])
409         total_nb_tests += int(s['nb_tests'])
410         success = "{0:<10}".format(str(s['success'])+'%')
411         total_success += float(s['success'])
412         report += ""\
413         "| " + name + " | " + duration + " | " + nb_tests + " | " + success + "|\n"\
414         "+-------------------+------------+---------------+-----------+\n"
415
416
417
418     total_duration_str = time.strftime("%H:%M:%S", time.gmtime(total_duration))
419     total_duration_str2 = "{0:<10}".format(total_duration_str)
420     total_nb_tests_str = "{0:<13}".format(total_nb_tests)
421     total_success = total_success / len(SUMMARY)
422     total_success_str = "{0:<10}".format(str(total_success)+'%')
423     report += "+===================+============+===============+===========+\n"
424     report += "| TOTAL:            | " + total_duration_str2 + " | " + \
425             total_nb_tests_str  + " | " + total_success_str + "|\n"
426     report += "+===================+============+===============+===========+\n"
427
428     logger.info("\n"+report)
429
430
431     # Generate json results for DB
432     #json_results = {"timestart": time_start, "duration": total_duration,
433     #                "tests": int(total_nb_tests), "success": int(total_success)}
434     #logger.info("Results: "+str(json_results))
435
436     #if args.report:
437     #    logger.debug("Pushing result into DB...")
438     #    push_results_to_db(json_results)
439
440
441     logger.debug("Deleting image '%s' with ID '%s'..." \
442                          % (GLANCE_IMAGE_NAME, image_id))
443     if not functest_utils.delete_glance_image(nova_client, image_id):
444         logger.error("Error deleting the glance image")
445
446     if not volume_types:
447         logger.debug("Deleting volume type '%s'..." \
448                              % CINDER_VOLUME_TYPE_NAME)
449         if not functest_utils.delete_volume_type(cinder_client, volume_type):
450             logger.error("Error in deleting volume type...")
451
452
453 if __name__ == '__main__':
454     main()