Merge "Add blacklist handling for rally test cases"
[functest-xtesting.git] / testcases / OpenStack / rally / run_rally-cert.py
1 #!/usr/bin/env python
2 #
3 # Copyright (c) 2015 Orange
4 # guyrodrigue.koffi@orange.com
5 # morgan.richomme@orange.com
6 # All rights reserved. This program and the accompanying materials
7 # are made available under the terms of the Apache License, Version 2.0
8 # which accompanies this distribution, and is available at
9 # http://www.apache.org/licenses/LICENSE-2.0
10 #
11 # 0.1 (05/2015) initial commit
12 # 0.2 (28/09/2015) extract Tempest, format json result, add ceilometer suite
13 # 0.3 (19/10/2015) remove Tempest from run_rally
14 # and push result into test DB
15 #
16 """ tests configuration """
17
18 import json
19 import os
20 import re
21 import subprocess
22 import time
23 import yaml
24
25 import argparse
26 import functest.utils.functest_logger as ft_logger
27 import functest.utils.functest_utils as functest_utils
28 import functest.utils.openstack_utils as os_utils
29 import iniparse
30 from functest.utils.functest_utils import FUNCTEST_REPO as REPO_PATH
31
32 tests = ['authenticate', 'glance', 'cinder', 'heat', 'keystone',
33          'neutron', 'nova', 'quotas', 'requests', 'vm', 'all']
34 parser = argparse.ArgumentParser()
35 parser.add_argument("test_name",
36                     help="Module name to be tested. "
37                          "Possible values are : "
38                          "[ {d[0]} | {d[1]} | {d[2]} | {d[3]} | {d[4]} | "
39                          "{d[5]} | {d[6]} | {d[7]} | {d[8]} | {d[9]} | "
40                          "{d[10]} ] "
41                          "The 'all' value "
42                          "performs all possible test scenarios"
43                          .format(d=tests))
44
45 parser.add_argument("-d", "--debug", help="Debug mode", action="store_true")
46 parser.add_argument("-r", "--report",
47                     help="Create json result file",
48                     action="store_true")
49 parser.add_argument("-s", "--smoke",
50                     help="Smoke test mode",
51                     action="store_true")
52 parser.add_argument("-v", "--verbose",
53                     help="Print verbose info about the progress",
54                     action="store_true")
55 parser.add_argument("-n", "--noclean",
56                     help="Don't clean the created resources for this test.",
57                     action="store_true")
58 parser.add_argument("-z", "--sanity",
59                     help="Sanity test mode, execute only a subset of tests",
60                     action="store_true")
61
62 args = parser.parse_args()
63
64 network_dict = {}
65
66 if args.verbose:
67     RALLY_STDERR = subprocess.STDOUT
68 else:
69     RALLY_STDERR = open(os.devnull, 'w')
70
71 """ logging configuration """
72 logger = ft_logger.Logger("run_rally").getLogger()
73
74
75 functest_yaml = functest_utils.get_functest_yaml()
76
77 HOME = os.environ['HOME'] + "/"
78 RALLY_DIR = REPO_PATH + '/' + functest_yaml.get("general").get(
79     "directories").get("dir_rally")
80 TEMPLATE_DIR = RALLY_DIR + "scenario/templates"
81 SUPPORT_DIR = RALLY_DIR + "scenario/support"
82 TEMP_DIR = RALLY_DIR + "var"
83 BLACKLIST_FILE = RALLY_DIR + "blacklist.txt"
84
85 FLAVOR_NAME = "m1.tiny"
86 USERS_AMOUNT = 2
87 TENANTS_AMOUNT = 3
88 ITERATIONS_AMOUNT = 10
89 CONCURRENCY = 4
90
91 RESULTS_DIR = functest_yaml.get("general").get("directories").get(
92     "dir_rally_res")
93 TEMPEST_CONF_FILE = functest_yaml.get("general").get("directories").get(
94     "dir_results") + '/tempest/tempest.conf'
95 TEST_DB = functest_yaml.get("results").get("test_db_url")
96
97 PRIVATE_NET_NAME = functest_yaml.get("rally").get("network_name")
98 PRIVATE_SUBNET_NAME = functest_yaml.get("rally").get("subnet_name")
99 PRIVATE_SUBNET_CIDR = functest_yaml.get("rally").get("subnet_cidr")
100 ROUTER_NAME = functest_yaml.get("rally").get("router_name")
101
102 GLANCE_IMAGE_NAME = functest_yaml.get("general").get("openstack").get(
103     "image_name")
104 GLANCE_IMAGE_FILENAME = functest_yaml.get("general").get("openstack").get(
105     "image_file_name")
106 GLANCE_IMAGE_FORMAT = functest_yaml.get("general").get("openstack").get(
107     "image_disk_format")
108 GLANCE_IMAGE_PATH = functest_yaml.get("general").get("directories").get(
109     "dir_functest_data") + "/" + GLANCE_IMAGE_FILENAME
110
111 CINDER_VOLUME_TYPE_NAME = "volume_test"
112
113
114 SUMMARY = []
115 neutron_client = None
116
117
118 def get_task_id(cmd_raw):
119     """
120     get task id from command rally result
121     :param cmd_raw:
122     :return: task_id as string
123     """
124     taskid_re = re.compile('^Task +(.*): started$')
125     for line in cmd_raw.splitlines(True):
126         line = line.strip()
127         match = taskid_re.match(line)
128         if match:
129             return match.group(1)
130     return None
131
132
133 def task_succeed(json_raw):
134     """
135     Parse JSON from rally JSON results
136     :param json_raw:
137     :return: Bool
138     """
139     rally_report = json.loads(json_raw)
140     for report in rally_report:
141         if report is None or report.get('result') is None:
142             return False
143
144         for result in report.get('result'):
145             if result is None or len(result.get('error')) > 0:
146                 return False
147
148     return True
149
150
151 def live_migration_supported():
152     config = iniparse.ConfigParser()
153     if (config.read(TEMPEST_CONF_FILE) and
154             config.has_section('compute-feature-enabled') and
155             config.has_option('compute-feature-enabled', 'live_migration')):
156         return config.getboolean('compute-feature-enabled', 'live_migration')
157
158     return False
159
160
161 def build_task_args(test_file_name):
162     task_args = {'service_list': [test_file_name]}
163     task_args['image_name'] = GLANCE_IMAGE_NAME
164     task_args['flavor_name'] = FLAVOR_NAME
165     task_args['glance_image_location'] = GLANCE_IMAGE_PATH
166     task_args['tmpl_dir'] = TEMPLATE_DIR
167     task_args['sup_dir'] = SUPPORT_DIR
168     task_args['users_amount'] = USERS_AMOUNT
169     task_args['tenants_amount'] = TENANTS_AMOUNT
170     task_args['use_existing_users'] = False
171     task_args['iterations'] = ITERATIONS_AMOUNT
172     task_args['concurrency'] = CONCURRENCY
173
174     if args.sanity:
175         task_args['full_mode'] = False
176         task_args['smoke'] = True
177     else:
178         task_args['full_mode'] = True
179         task_args['smoke'] = args.smoke
180
181     ext_net = os_utils.get_external_net(neutron_client)
182     if ext_net:
183         task_args['floating_network'] = str(ext_net)
184     else:
185         task_args['floating_network'] = ''
186
187     net_id = network_dict['net_id']
188     task_args['netid'] = str(net_id)
189     task_args['live_migration'] = live_migration_supported()
190
191     auth_url = os.getenv('OS_AUTH_URL')
192     if auth_url is not None:
193         task_args['request_url'] = auth_url.rsplit(":", 1)[0]
194     else:
195         task_args['request_url'] = ''
196
197     return task_args
198
199
200 def get_output(proc, test_name):
201     global SUMMARY
202     result = ""
203     nb_tests = 0
204     overall_duration = 0.0
205     success = 0.0
206     nb_totals = 0
207
208     while proc.poll() is None:
209         line = proc.stdout.readline()
210         if args.verbose:
211             result += line
212         else:
213             if ("Load duration" in line or
214                     "started" in line or
215                     "finished" in line or
216                     " Preparing" in line or
217                     "+-" in line or
218                     "|" in line):
219                 result += line
220             elif "test scenario" in line:
221                 result += "\n" + line
222             elif "Full duration" in line:
223                 result += line + "\n\n"
224
225         # parse output for summary report
226         if ("| " in line and
227                 "| action" not in line and
228                 "| Starting" not in line and
229                 "| Completed" not in line and
230                 "| ITER" not in line and
231                 "|   " not in line and
232                 "| total" not in line):
233             nb_tests += 1
234         elif "| total" in line:
235             percentage = ((line.split('|')[8]).strip(' ')).strip('%')
236             try:
237                 success += float(percentage)
238             except ValueError:
239                 logger.info('Percentage error: %s, %s' % (percentage, line))
240             nb_totals += 1
241         elif "Full duration" in line:
242             duration = line.split(': ')[1]
243             try:
244                 overall_duration += float(duration)
245             except ValueError:
246                 logger.info('Duration error: %s, %s' % (duration, line))
247
248     overall_duration = "{:10.2f}".format(overall_duration)
249     if nb_totals == 0:
250         success_avg = 0
251     else:
252         success_avg = "{:0.2f}".format(success / nb_totals)
253
254     scenario_summary = {'test_name': test_name,
255                         'overall_duration': overall_duration,
256                         'nb_tests': nb_tests,
257                         'success': success_avg}
258     SUMMARY.append(scenario_summary)
259
260     logger.debug("\n" + result)
261
262     return result
263
264
265 def get_cmd_output(proc):
266     result = ""
267
268     while proc.poll() is None:
269         line = proc.stdout.readline()
270         result += line
271
272     return result
273
274
275 def apply_blacklist(case_file_name, result_file_name):
276     logger.debug("Applying blacklist...")
277     cases_file = open(case_file_name, 'r')
278     result_file = open(result_file_name, 'w')
279     black_tests = []
280
281     try:
282         installer_type = os.getenv('INSTALLER_TYPE')
283         deploy_scenario = os.getenv('DEPLOY_SCENARIO')
284         if (bool(installer_type) * bool(deploy_scenario)):
285             # if INSTALLER_TYPE and DEPLOY_SCENARIO are set we read the file
286             with open(BLACKLIST_FILE, 'r') as black_list_file:
287                 black_list_yaml = yaml.safe_load(black_list_file)
288
289             for item in black_list_yaml:
290                 scenarios = item['scenarios']
291                 installers = item['installers']
292                 if (deploy_scenario in scenarios and
293                         installer_type in installers):
294                     tests = item['tests']
295                     black_tests.extend(tests)
296     except:
297         black_tests = []
298         logger.debug("Blacklisting not applied.")
299
300     include = True
301     for cases_line in cases_file:
302         if include:
303             for black_tests_line in black_tests:
304                 if black_tests_line == cases_line.strip().rstrip(':'):
305                     include = False
306                     break
307             else:
308                 result_file.write(str(cases_line))
309         else:
310             if cases_line.isspace():
311                 include = True
312
313     cases_file.close()
314     result_file.close()
315
316
317 def prepare_test_list(test_name):
318     scenario_file_name = '{}opnfv-{}.yaml'.format(RALLY_DIR + "scenario/",
319                                                   test_name)
320     if not os.path.exists(scenario_file_name):
321         logger.info("The scenario '%s' does not exist." % scenario_file_name)
322         exit(-1)
323
324     logger.debug('Scenario fetched from : {}'.format(scenario_file_name))
325     test_file_name = '{}opnfv-{}.yaml'.format(TEMP_DIR + "/", test_name)
326
327     if not os.path.exists(TEMP_DIR):
328         os.makedirs(TEMP_DIR)
329
330     apply_blacklist(scenario_file_name, test_file_name)
331
332
333 def run_task(test_name):
334     #
335     # the "main" function of the script who launch rally for a task
336     # :param test_name: name for the rally test
337     # :return: void
338     #
339     global SUMMARY
340     logger.info('Starting test scenario "{}" ...'.format(test_name))
341     start_time = time.time()
342
343     task_file = '{}task.yaml'.format(RALLY_DIR)
344     if not os.path.exists(task_file):
345         logger.error("Task file '%s' does not exist." % task_file)
346         exit(-1)
347
348     prepare_test_list(test_name)
349
350     cmd_line = ("rally task start --abort-on-sla-failure " +
351                 "--task {} ".format(task_file) +
352                 "--task-args \"{}\" ".format(build_task_args(test_name)))
353     logger.debug('running command line : {}'.format(cmd_line))
354
355     p = subprocess.Popen(cmd_line, stdout=subprocess.PIPE,
356                          stderr=RALLY_STDERR, shell=True)
357     output = get_output(p, test_name)
358     task_id = get_task_id(output)
359     logger.debug('task_id : {}'.format(task_id))
360
361     if task_id is None:
362         logger.error('Failed to retrieve task_id, validating task...')
363         cmd_line = ("rally task validate " +
364                     "--task {} ".format(task_file) +
365                     "--task-args \"{}\" ".format(build_task_args(test_name)))
366         logger.debug('running command line : {}'.format(cmd_line))
367         p = subprocess.Popen(cmd_line, stdout=subprocess.PIPE,
368                              stderr=subprocess.STDOUT, shell=True)
369         output = get_cmd_output(p)
370         logger.error("Task validation result:" + "\n" + output)
371         return
372
373     # check for result directory and create it otherwise
374     if not os.path.exists(RESULTS_DIR):
375         logger.debug('%s does not exist, we create it.'.format(RESULTS_DIR))
376         os.makedirs(RESULTS_DIR)
377
378     # write html report file
379     report_file_name = '{}opnfv-{}.html'.format(RESULTS_DIR, test_name)
380     cmd_line = "rally task report {} --out {}".format(task_id,
381                                                       report_file_name)
382
383     logger.debug('running command line : {}'.format(cmd_line))
384     os.popen(cmd_line)
385
386     # get and save rally operation JSON result
387     cmd_line = "rally task results %s" % task_id
388     logger.debug('running command line : {}'.format(cmd_line))
389     cmd = os.popen(cmd_line)
390     json_results = cmd.read()
391     with open('{}opnfv-{}.json'.format(RESULTS_DIR, test_name), 'w') as f:
392         logger.debug('saving json file')
393         f.write(json_results)
394
395     with open('{}opnfv-{}.json'
396               .format(RESULTS_DIR, test_name)) as json_file:
397         json_data = json.load(json_file)
398
399     """ parse JSON operation result """
400     status = "FAIL"
401     if task_succeed(json_results):
402         logger.info('Test scenario: "{}" OK.'.format(test_name) + "\n")
403         status = "PASS"
404     else:
405         logger.info('Test scenario: "{}" Failed.'.format(test_name) + "\n")
406
407     # Push results in payload of testcase
408     if args.report:
409         stop_time = time.time()
410         logger.debug("Push Rally detailed results into DB")
411         functest_utils.push_results_to_db("functest",
412                                           "Rally_details",
413                                           start_time,
414                                           stop_time,
415                                           status,
416                                           json_data)
417
418
419 def main():
420     global SUMMARY
421     global network_dict
422     global neutron_client
423
424     nova_client = os_utils.get_nova_client()
425     neutron_client = os_utils.get_neutron_client()
426     cinder_client = os_utils.get_cinder_client()
427
428     start_time = time.time()
429
430     # configure script
431     if not (args.test_name in tests):
432         logger.error('argument not valid')
433         exit(-1)
434
435     SUMMARY = []
436
437     volume_types = os_utils.list_volume_types(cinder_client,
438                                               private=False)
439     if not volume_types:
440         volume_type = os_utils.create_volume_type(
441             cinder_client, CINDER_VOLUME_TYPE_NAME)
442         if not volume_type:
443             logger.error("Failed to create volume type...")
444             exit(-1)
445         else:
446             logger.debug("Volume type '%s' created succesfully..."
447                          % CINDER_VOLUME_TYPE_NAME)
448     else:
449         logger.debug("Using existing volume type(s)...")
450
451     image_exists, image_id = os_utils.get_or_create_image(GLANCE_IMAGE_NAME,
452                                                           GLANCE_IMAGE_PATH,
453                                                           GLANCE_IMAGE_FORMAT)
454     if not image_id:
455         exit(-1)
456
457     logger.debug("Creating network '%s'..." % PRIVATE_NET_NAME)
458     network_dict = os_utils.create_shared_network_full(PRIVATE_NET_NAME,
459                                                        PRIVATE_SUBNET_NAME,
460                                                        ROUTER_NAME,
461                                                        PRIVATE_SUBNET_CIDR)
462     if not network_dict:
463         exit(1)
464
465     if args.test_name == "all":
466         for test_name in tests:
467             if not (test_name == 'all' or
468                     test_name == 'vm'):
469                 run_task(test_name)
470     else:
471         logger.debug("Test name: " + args.test_name)
472         run_task(args.test_name)
473
474     report = ("\n"
475               "                                                              "
476               "\n"
477               "                     Rally Summary Report\n"
478               "\n"
479               "+===================+============+===============+===========+"
480               "\n"
481               "| Module            | Duration   | nb. Test Run  | Success   |"
482               "\n"
483               "+===================+============+===============+===========+"
484               "\n")
485     payload = []
486     stop_time = time.time()
487
488     # for each scenario we draw a row for the table
489     total_duration = 0.0
490     total_nb_tests = 0
491     total_success = 0.0
492     for s in SUMMARY:
493         name = "{0:<17}".format(s['test_name'])
494         duration = float(s['overall_duration'])
495         total_duration += duration
496         duration = time.strftime("%M:%S", time.gmtime(duration))
497         duration = "{0:<10}".format(duration)
498         nb_tests = "{0:<13}".format(s['nb_tests'])
499         total_nb_tests += int(s['nb_tests'])
500         success = "{0:<10}".format(str(s['success']) + '%')
501         total_success += float(s['success'])
502         report += ("" +
503                    "| " + name + " | " + duration + " | " +
504                    nb_tests + " | " + success + "|\n" +
505                    "+-------------------+------------"
506                    "+---------------+-----------+\n")
507         payload.append({'module': name,
508                         'details': {'duration': s['overall_duration'],
509                                     'nb tests': s['nb_tests'],
510                                     'success': s['success']}})
511
512     total_duration_str = time.strftime("%H:%M:%S", time.gmtime(total_duration))
513     total_duration_str2 = "{0:<10}".format(total_duration_str)
514     total_nb_tests_str = "{0:<13}".format(total_nb_tests)
515     success_rate = "{:0.2f}".format(total_success / len(SUMMARY))
516     success_rate_str = "{0:<10}".format(str(success_rate) + '%')
517     report += "+===================+============+===============+===========+"
518     report += "\n"
519     report += ("| TOTAL:            | " + total_duration_str2 + " | " +
520                total_nb_tests_str + " | " + success_rate_str + "|\n")
521     report += "+===================+============+===============+===========+"
522     report += "\n"
523
524     logger.info("\n" + report)
525     payload.append({'summary': {'duration': total_duration,
526                                 'nb tests': total_nb_tests,
527                                 'nb success': success_rate}})
528
529     if args.sanity:
530         case_name = "rally_sanity"
531     else:
532         case_name = "rally_full"
533
534     # Evaluation of the success criteria
535     status = functest_utils.check_success_rate(case_name, success_rate)
536
537     exit_code = -1
538     if status == "PASS":
539         exit_code = 0
540
541     if args.report:
542         logger.debug("Pushing Rally summary into DB...")
543         functest_utils.push_results_to_db("functest",
544                                           case_name,
545                                           start_time,
546                                           stop_time,
547                                           status,
548                                           payload)
549     if args.noclean:
550         exit(exit_code)
551
552     if not image_exists:
553         logger.debug("Deleting image '%s' with ID '%s'..."
554                      % (GLANCE_IMAGE_NAME, image_id))
555         if not os_utils.delete_glance_image(nova_client, image_id):
556             logger.error("Error deleting the glance image")
557
558     if not volume_types:
559         logger.debug("Deleting volume type '%s'..."
560                      % CINDER_VOLUME_TYPE_NAME)
561         if not os_utils.delete_volume_type(cinder_client, volume_type):
562             logger.error("Error in deleting volume type...")
563
564     exit(exit_code)
565
566
567 if __name__ == '__main__':
568     main()