Execute successive scenarios after task failure
[functest.git] / testcases / VIM / OpenStack / CI / libraries / run_rally-cert.py
1 #!/usr/bin/env python
2 #
3 # Copyright (c) 2015 Orange
4 # guyrodrigue.koffi@orange.com
5 # morgan.richomme@orange.com
6 # All rights reserved. This program and the accompanying materials
7 # are made available under the terms of the Apache License, Version 2.0
8 # which accompanies this distribution, and is available at
9 # http://www.apache.org/licenses/LICENSE-2.0
10 #
11 # 0.1 (05/2015) initial commit
12 # 0.2 (28/09/2015) extract Tempest, format json result, add ceilometer suite
13 # 0.3 (19/10/2015) remove Tempest from run_rally
14 # and push result into test DB
15 #
16 import argparse
17 import iniparse
18 import json
19 import os
20 import re
21 import requests
22 import subprocess
23 import time
24 import yaml
25
26 from novaclient import client as novaclient
27 from glanceclient import client as glanceclient
28 from keystoneclient.v2_0 import client as keystoneclient
29 from neutronclient.v2_0 import client as neutronclient
30 from cinderclient import client as cinderclient
31
32 import functest.utils.functest_logger as ft_logger
33 import functest.utils.functest_utils as functest_utils
34 import functest.utils.openstack_utils as openstack_utils
35
36 """ tests configuration """
37 tests = ['authenticate', 'glance', 'cinder', 'heat', 'keystone',
38          'neutron', 'nova', 'quotas', 'requests', 'vm', 'all']
39 parser = argparse.ArgumentParser()
40 parser.add_argument("test_name",
41                     help="Module name to be tested. "
42                          "Possible values are : "
43                          "[ {d[0]} | {d[1]} | {d[2]} | {d[3]} | {d[4]} | "
44                          "{d[5]} | {d[6]} | {d[7]} | {d[8]} | {d[9]} | "
45                          "{d[10]} ] "
46                          "The 'all' value "
47                          "performs all possible test scenarios"
48                          .format(d=tests))
49
50 parser.add_argument("-d", "--debug", help="Debug mode", action="store_true")
51 parser.add_argument("-r", "--report",
52                     help="Create json result file",
53                     action="store_true")
54 parser.add_argument("-s", "--smoke",
55                     help="Smoke test mode",
56                     action="store_true")
57 parser.add_argument("-v", "--verbose",
58                     help="Print verbose info about the progress",
59                     action="store_true")
60 parser.add_argument("-n", "--noclean",
61                     help="Don't clean the created resources for this test.",
62                     action="store_true")
63 parser.add_argument("-z", "--sanity",
64                     help="Sanity test mode, execute only a subset of tests",
65                     action="store_true")
66
67 args = parser.parse_args()
68
69 client_dict = {}
70
71 if args.verbose:
72     RALLY_STDERR = subprocess.STDOUT
73 else:
74     RALLY_STDERR = open(os.devnull, 'w')
75
76 """ logging configuration """
77 logger = ft_logger.Logger("run_rally").getLogger()
78
79 REPO_PATH = os.environ['repos_dir'] + '/functest/'
80 if not os.path.exists(REPO_PATH):
81     logger.error("Functest repository directory not found '%s'" % REPO_PATH)
82     exit(-1)
83
84
85 with open("/home/opnfv/functest/conf/config_functest.yaml") as f:
86     functest_yaml = yaml.safe_load(f)
87 f.close()
88
89 HOME = os.environ['HOME'] + "/"
90 SCENARIOS_DIR = REPO_PATH + functest_yaml.get("general").get(
91     "directories").get("dir_rally_scn")
92 TEMPLATE_DIR = SCENARIOS_DIR + "scenario/templates"
93 SUPPORT_DIR = SCENARIOS_DIR + "scenario/support"
94
95 FLAVOR_NAME = "m1.tiny"
96 USERS_AMOUNT = 2
97 TENANTS_AMOUNT = 3
98 ITERATIONS_AMOUNT = 10
99 CONCURRENCY = 4
100
101 RESULTS_DIR = functest_yaml.get("general").get("directories").get(
102     "dir_rally_res")
103 TEMPEST_CONF_FILE = functest_yaml.get("general").get("directories").get(
104     "dir_results") + '/tempest/tempest.conf'
105 TEST_DB = functest_yaml.get("results").get("test_db_url")
106 PRIVATE_NETWORK = functest_yaml.get("general").get("openstack").get(
107     "neutron_private_net_name")
108
109 GLANCE_IMAGE_NAME = functest_yaml.get("general").get("openstack").get(
110     "image_name")
111 GLANCE_IMAGE_FILENAME = functest_yaml.get("general").get("openstack").get(
112     "image_file_name")
113 GLANCE_IMAGE_FORMAT = functest_yaml.get("general").get("openstack").get(
114     "image_disk_format")
115 GLANCE_IMAGE_PATH = functest_yaml.get("general").get("directories").get(
116     "dir_functest_data") + "/" + GLANCE_IMAGE_FILENAME
117
118 CINDER_VOLUME_TYPE_NAME = "volume_test"
119
120
121 SUMMARY = []
122
123
124 def push_results_to_db(case, payload, criteria):
125
126     url = TEST_DB + "/results"
127     installer = functest_utils.get_installer_type(logger)
128     scenario = functest_utils.get_scenario(logger)
129     version = functest_utils.get_version(logger)
130     pod_name = functest_utils.get_pod_name(logger)
131
132     # evalutate success criteria
133
134     params = {"project_name": "functest", "case_name": case,
135               "pod_name": pod_name, "installer": installer,
136               "version": version, "scenario": scenario,
137               "criteria": criteria, "details": payload}
138
139     headers = {'Content-Type': 'application/json'}
140     r = requests.post(url, data=json.dumps(params), headers=headers)
141     logger.debug(r)
142
143
144 def get_task_id(cmd_raw):
145     """
146     get task id from command rally result
147     :param cmd_raw:
148     :return: task_id as string
149     """
150     taskid_re = re.compile('^Task +(.*): started$')
151     for line in cmd_raw.splitlines(True):
152         line = line.strip()
153         match = taskid_re.match(line)
154         if match:
155             return match.group(1)
156     return None
157
158
159 def task_succeed(json_raw):
160     """
161     Parse JSON from rally JSON results
162     :param json_raw:
163     :return: Bool
164     """
165     rally_report = json.loads(json_raw)
166     for report in rally_report:
167         if report is None or report.get('result') is None:
168             return False
169
170         for result in report.get('result'):
171             if result is None or len(result.get('error')) > 0:
172                 return False
173
174     return True
175
176
177 def live_migration_supported():
178     config = iniparse.ConfigParser()
179     if (config.read(TEMPEST_CONF_FILE) and
180             config.has_section('compute-feature-enabled') and
181             config.has_option('compute-feature-enabled', 'live_migration')):
182         return config.getboolean('compute-feature-enabled', 'live_migration')
183
184     return False
185
186
187 def build_task_args(test_file_name):
188     task_args = {'service_list': [test_file_name]}
189     task_args['image_name'] = GLANCE_IMAGE_NAME
190     task_args['flavor_name'] = FLAVOR_NAME
191     task_args['glance_image_location'] = GLANCE_IMAGE_PATH
192     task_args['tmpl_dir'] = TEMPLATE_DIR
193     task_args['sup_dir'] = SUPPORT_DIR
194     task_args['users_amount'] = USERS_AMOUNT
195     task_args['tenants_amount'] = TENANTS_AMOUNT
196     task_args['iterations'] = ITERATIONS_AMOUNT
197     task_args['concurrency'] = CONCURRENCY
198
199     if args.sanity:
200         task_args['full_mode'] = False
201         task_args['smoke'] = True
202     else:
203         task_args['full_mode'] = True
204         task_args['smoke'] = args.smoke
205
206     ext_net = openstack_utils.get_external_net(client_dict['neutron'])
207     if ext_net:
208         task_args['floating_network'] = str(ext_net)
209     else:
210         task_args['floating_network'] = ''
211
212     net_id = openstack_utils.get_network_id(client_dict['neutron'],
213                                             PRIVATE_NETWORK)
214     task_args['netid'] = str(net_id)
215     task_args['live_migration'] = live_migration_supported()
216
217     return task_args
218
219
220 def get_output(proc, test_name):
221     global SUMMARY
222     result = ""
223     nb_tests = 0
224     overall_duration = 0.0
225     success = 0.0
226     nb_totals = 0
227
228     while proc.poll() is None:
229         line = proc.stdout.readline()
230         if args.verbose:
231             result += line
232         else:
233             if ("Load duration" in line or
234                     "started" in line or
235                     "finished" in line or
236                     " Preparing" in line or
237                     "+-" in line or
238                     "|" in line):
239                 result += line
240             elif "test scenario" in line:
241                 result += "\n" + line
242             elif "Full duration" in line:
243                 result += line + "\n\n"
244
245         # parse output for summary report
246         if ("| " in line and
247                 "| action" not in line and
248                 "| Starting" not in line and
249                 "| Completed" not in line and
250                 "| ITER" not in line and
251                 "|   " not in line and
252                 "| total" not in line):
253             nb_tests += 1
254         elif "| total" in line:
255             percentage = ((line.split('|')[8]).strip(' ')).strip('%')
256             try:
257                 success += float(percentage)
258             except ValueError:
259                 logger.info('Percentage error: %s, %s' % (percentage, line))
260             nb_totals += 1
261         elif "Full duration" in line:
262             duration = line.split(': ')[1]
263             try:
264                 overall_duration += float(duration)
265             except ValueError:
266                 logger.info('Duration error: %s, %s' % (duration, line))
267
268     overall_duration = "{:10.2f}".format(overall_duration)
269     if nb_totals == 0:
270         success_avg = 0
271     else:
272         success_avg = "{:0.2f}".format(success / nb_totals)
273
274     scenario_summary = {'test_name': test_name,
275                         'overall_duration': overall_duration,
276                         'nb_tests': nb_tests,
277                         'success': success_avg}
278     SUMMARY.append(scenario_summary)
279
280     logger.info("\n" + result)
281
282     return result
283
284
285 def get_cmd_output(proc):
286     result = ""
287
288     while proc.poll() is None:
289         line = proc.stdout.readline()
290         result += line
291
292     return result
293
294
295 def run_task(test_name):
296     #
297     # the "main" function of the script who launch rally for a task
298     # :param test_name: name for the rally test
299     # :return: void
300     #
301     global SUMMARY
302     logger.info('Starting test scenario "{}" ...'.format(test_name))
303
304     task_file = '{}task.yaml'.format(SCENARIOS_DIR)
305     if not os.path.exists(task_file):
306         logger.error("Task file '%s' does not exist." % task_file)
307         exit(-1)
308
309     test_file_name = '{}opnfv-{}.yaml'.format(SCENARIOS_DIR + "scenario/",
310                                               test_name)
311     if not os.path.exists(test_file_name):
312         logger.error("The scenario '%s' does not exist." % test_file_name)
313         exit(-1)
314
315     logger.debug('Scenario fetched from : {}'.format(test_file_name))
316
317     cmd_line = ("rally task start --abort-on-sla-failure " +
318                 "--task {} ".format(task_file) +
319                 "--task-args \"{}\" ".format(build_task_args(test_name)))
320     logger.debug('running command line : {}'.format(cmd_line))
321
322     p = subprocess.Popen(cmd_line, stdout=subprocess.PIPE,
323                          stderr=RALLY_STDERR, shell=True)
324     output = get_output(p, test_name)
325     task_id = get_task_id(output)
326     logger.debug('task_id : {}'.format(task_id))
327
328     if task_id is None:
329         logger.error('Failed to retrieve task_id, validating task...')
330         cmd_line = ("rally task validate " +
331                     "--task {} ".format(task_file) +
332                     "--task-args \"{}\" ".format(build_task_args(test_name)))
333         logger.debug('running command line : {}'.format(cmd_line))
334         p = subprocess.Popen(cmd_line, stdout=subprocess.PIPE,
335                              stderr=subprocess.STDOUT, shell=True)
336         output = get_cmd_output(p)
337         logger.error("Task validation result:" + "\n" + output)
338         return
339
340     # check for result directory and create it otherwise
341     if not os.path.exists(RESULTS_DIR):
342         logger.debug('%s does not exist, we create it.'.format(RESULTS_DIR))
343         os.makedirs(RESULTS_DIR)
344
345     # write html report file
346     report_file_name = '{}opnfv-{}.html'.format(RESULTS_DIR, test_name)
347     cmd_line = "rally task report {} --out {}".format(task_id,
348                                                       report_file_name)
349
350     logger.debug('running command line : {}'.format(cmd_line))
351     os.popen(cmd_line)
352
353     # get and save rally operation JSON result
354     cmd_line = "rally task results %s" % task_id
355     logger.debug('running command line : {}'.format(cmd_line))
356     cmd = os.popen(cmd_line)
357     json_results = cmd.read()
358     with open('{}opnfv-{}.json'.format(RESULTS_DIR, test_name), 'w') as f:
359         logger.debug('saving json file')
360         f.write(json_results)
361
362     with open('{}opnfv-{}.json'
363               .format(RESULTS_DIR, test_name)) as json_file:
364         json_data = json.load(json_file)
365
366     """ parse JSON operation result """
367     status = "failed"
368     if task_succeed(json_results):
369         logger.info('Test scenario: "{}" OK.'.format(test_name) + "\n")
370         status = "passed"
371     else:
372         logger.info('Test scenario: "{}" Failed.'.format(test_name) + "\n")
373
374     # Push results in payload of testcase
375     if args.report:
376         logger.debug("Push result into DB")
377         push_results_to_db("Rally_details", json_data, status)
378
379
380 def main():
381     global SUMMARY
382     # configure script
383     if not (args.test_name in tests):
384         logger.error('argument not valid')
385         exit(-1)
386
387     SUMMARY = []
388     creds_nova = openstack_utils.get_credentials("nova")
389     nova_client = novaclient.Client('2', **creds_nova)
390     creds_neutron = openstack_utils.get_credentials("neutron")
391     neutron_client = neutronclient.Client(**creds_neutron)
392     creds_keystone = openstack_utils.get_credentials("keystone")
393     keystone_client = keystoneclient.Client(**creds_keystone)
394     glance_endpoint = keystone_client.service_catalog.url_for(
395         service_type='image', endpoint_type='publicURL')
396     glance_client = glanceclient.Client(1, glance_endpoint,
397                                         token=keystone_client.auth_token)
398     creds_cinder = openstack_utils.get_credentials("cinder")
399     cinder_client = cinderclient.Client('2', creds_cinder['username'],
400                                         creds_cinder['api_key'],
401                                         creds_cinder['project_id'],
402                                         creds_cinder['auth_url'],
403                                         service_type="volume")
404
405     client_dict['neutron'] = neutron_client
406
407     volume_types = openstack_utils.list_volume_types(cinder_client,
408                                                      private=False)
409     if not volume_types:
410         volume_type = openstack_utils.create_volume_type(
411             cinder_client, CINDER_VOLUME_TYPE_NAME)
412         if not volume_type:
413             logger.error("Failed to create volume type...")
414             exit(-1)
415         else:
416             logger.debug("Volume type '%s' created succesfully..."
417                          % CINDER_VOLUME_TYPE_NAME)
418     else:
419         logger.debug("Using existing volume type(s)...")
420
421     image_id = openstack_utils.get_image_id(glance_client, GLANCE_IMAGE_NAME)
422     image_exists = False
423
424     if image_id == '':
425         logger.debug("Creating image '%s' from '%s'..." % (GLANCE_IMAGE_NAME,
426                                                            GLANCE_IMAGE_PATH))
427         image_id = openstack_utils.create_glance_image(glance_client,
428                                                        GLANCE_IMAGE_NAME,
429                                                        GLANCE_IMAGE_PATH)
430         if not image_id:
431             logger.error("Failed to create the Glance image...")
432             exit(-1)
433         else:
434             logger.debug("Image '%s' with ID '%s' created succesfully ."
435                          % (GLANCE_IMAGE_NAME, image_id))
436     else:
437         logger.debug("Using existing image '%s' with ID '%s'..."
438                      % (GLANCE_IMAGE_NAME, image_id))
439         image_exists = True
440
441     if args.test_name == "all":
442         for test_name in tests:
443             if not (test_name == 'all' or
444                     test_name == 'vm'):
445                 run_task(test_name)
446     else:
447         logger.debug("Test name: " + args.test_name)
448         run_task(args.test_name)
449
450     report = ("\n"
451               "                                                              "
452               "\n"
453               "                     Rally Summary Report\n"
454               "\n"
455               "+===================+============+===============+===========+"
456               "\n"
457               "| Module            | Duration   | nb. Test Run  | Success   |"
458               "\n"
459               "+===================+============+===============+===========+"
460               "\n")
461     payload = []
462
463     # for each scenario we draw a row for the table
464     total_duration = 0.0
465     total_nb_tests = 0
466     total_success = 0.0
467     for s in SUMMARY:
468         name = "{0:<17}".format(s['test_name'])
469         duration = float(s['overall_duration'])
470         total_duration += duration
471         duration = time.strftime("%M:%S", time.gmtime(duration))
472         duration = "{0:<10}".format(duration)
473         nb_tests = "{0:<13}".format(s['nb_tests'])
474         total_nb_tests += int(s['nb_tests'])
475         success = "{0:<10}".format(str(s['success']) + '%')
476         total_success += float(s['success'])
477         report += ("" +
478                    "| " + name + " | " + duration + " | " +
479                    nb_tests + " | " + success + "|\n" +
480                    "+-------------------+------------"
481                    "+---------------+-----------+\n")
482         payload.append({'module': name,
483                         'details': {'duration': s['overall_duration'],
484                                     'nb tests': s['nb_tests'],
485                                     'success': s['success']}})
486
487     total_duration_str = time.strftime("%H:%M:%S", time.gmtime(total_duration))
488     total_duration_str2 = "{0:<10}".format(total_duration_str)
489     total_nb_tests_str = "{0:<13}".format(total_nb_tests)
490     total_success = "{:0.2f}".format(total_success / len(SUMMARY))
491     total_success_str = "{0:<10}".format(str(total_success) + '%')
492     report += "+===================+============+===============+===========+"
493     report += "\n"
494     report += ("| TOTAL:            | " + total_duration_str2 + " | " +
495                total_nb_tests_str + " | " + total_success_str + "|\n")
496     report += "+===================+============+===============+===========+"
497     report += "\n"
498
499     logger.info("\n" + report)
500     payload.append({'summary': {'duration': total_duration,
501                                 'nb tests': total_nb_tests,
502                                 'nb success': total_success}})
503
504     # Generate json results for DB
505     # json_results = {"timestart": time_start, "duration": total_duration,
506     #                "tests": int(total_nb_tests),
507     #                "success": int(total_success)}
508     # logger.info("Results: "+str(json_results))
509
510     # Evaluation of the success criteria
511     status = "failed"
512     # for Rally we decided that the overall success rate must be above 90%
513     if total_success >= 90:
514         status = "passed"
515
516     if args.report:
517         logger.debug("Pushing Rally summary into DB...")
518         push_results_to_db("Rally", payload, status)
519
520     if args.noclean:
521         exit(0)
522
523     if not image_exists:
524         logger.debug("Deleting image '%s' with ID '%s'..."
525                      % (GLANCE_IMAGE_NAME, image_id))
526         if not openstack_utils.delete_glance_image(nova_client, image_id):
527             logger.error("Error deleting the glance image")
528
529     if not volume_types:
530         logger.debug("Deleting volume type '%s'..."
531                      % CINDER_VOLUME_TYPE_NAME)
532         if not openstack_utils.delete_volume_type(cinder_client, volume_type):
533             logger.error("Error in deleting volume type...")
534
535
536 if __name__ == '__main__':
537     main()