Extend rally test case exclusion feature
[functest-xtesting.git] / testcases / OpenStack / rally / run_rally-cert.py
1 #!/usr/bin/env python
2 #
3 # Copyright (c) 2015 Orange
4 # guyrodrigue.koffi@orange.com
5 # morgan.richomme@orange.com
6 # All rights reserved. This program and the accompanying materials
7 # are made available under the terms of the Apache License, Version 2.0
8 # which accompanies this distribution, and is available at
9 # http://www.apache.org/licenses/LICENSE-2.0
10 #
11 # 0.1 (05/2015) initial commit
12 # 0.2 (28/09/2015) extract Tempest, format json result, add ceilometer suite
13 # 0.3 (19/10/2015) remove Tempest from run_rally
14 # and push result into test DB
15 #
16 """ tests configuration """
17
18 import json
19 import os
20 import re
21 import subprocess
22 import time
23
24 import argparse
25 import iniparse
26 import yaml
27
28 import functest.utils.functest_logger as ft_logger
29 import functest.utils.functest_utils as ft_utils
30 import functest.utils.openstack_utils as os_utils
31
32 tests = ['authenticate', 'glance', 'cinder', 'heat', 'keystone',
33          'neutron', 'nova', 'quotas', 'requests', 'vm', 'all']
34 parser = argparse.ArgumentParser()
35 parser.add_argument("test_name",
36                     help="Module name to be tested. "
37                          "Possible values are : "
38                          "[ {d[0]} | {d[1]} | {d[2]} | {d[3]} | {d[4]} | "
39                          "{d[5]} | {d[6]} | {d[7]} | {d[8]} | {d[9]} | "
40                          "{d[10]} ] "
41                          "The 'all' value "
42                          "performs all possible test scenarios"
43                          .format(d=tests))
44
45 parser.add_argument("-d", "--debug", help="Debug mode", action="store_true")
46 parser.add_argument("-r", "--report",
47                     help="Create json result file",
48                     action="store_true")
49 parser.add_argument("-s", "--smoke",
50                     help="Smoke test mode",
51                     action="store_true")
52 parser.add_argument("-v", "--verbose",
53                     help="Print verbose info about the progress",
54                     action="store_true")
55 parser.add_argument("-n", "--noclean",
56                     help="Don't clean the created resources for this test.",
57                     action="store_true")
58 parser.add_argument("-z", "--sanity",
59                     help="Sanity test mode, execute only a subset of tests",
60                     action="store_true")
61
62 args = parser.parse_args()
63
64 network_dict = {}
65
66 if args.verbose:
67     RALLY_STDERR = subprocess.STDOUT
68 else:
69     RALLY_STDERR = open(os.devnull, 'w')
70
71 """ logging configuration """
72 logger = ft_logger.Logger("run_rally").getLogger()
73
74
75 HOME = os.environ['HOME'] + "/"
76 RALLY_DIR = ft_utils.FUNCTEST_REPO + '/' + \
77             ft_utils.get_functest_config('general.directories.dir_rally')
78 SANITY_MODE_DIR = RALLY_DIR + "scenario/sanity"
79 FULL_MODE_DIR = RALLY_DIR + "scenario/full"
80 TEMPLATE_DIR = RALLY_DIR + "scenario/templates"
81 SUPPORT_DIR = RALLY_DIR + "scenario/support"
82 TEMP_DIR = RALLY_DIR + "var"
83 BLACKLIST_FILE = RALLY_DIR + "blacklist.txt"
84
85 FLAVOR_NAME = "m1.tiny"
86 USERS_AMOUNT = 2
87 TENANTS_AMOUNT = 3
88 ITERATIONS_AMOUNT = 10
89 CONCURRENCY = 4
90
91 RESULTS_DIR = \
92     ft_utils.get_functest_config('general.directories.dir_rally_res')
93 TEMPEST_CONF_FILE = \
94     ft_utils.get_functest_config('general.directories.dir_results') + \
95     '/tempest/tempest.conf'
96 TEST_DB = ft_utils.get_functest_config('results.test_db_url')
97
98 PRIVATE_NET_NAME = ft_utils.get_functest_config('rally.network_name')
99 PRIVATE_SUBNET_NAME = ft_utils.get_functest_config('rally.subnet_name')
100 PRIVATE_SUBNET_CIDR = ft_utils.get_functest_config('rally.subnet_cidr')
101 ROUTER_NAME = ft_utils.get_functest_config('rally.router_name')
102
103 GLANCE_IMAGE_NAME = \
104     ft_utils.get_functest_config('general.openstack.image_name')
105 GLANCE_IMAGE_FILENAME = \
106     ft_utils.get_functest_config('general.openstack.image_file_name')
107 GLANCE_IMAGE_FORMAT = \
108     ft_utils.get_functest_config('general.openstack.image_disk_format')
109 GLANCE_IMAGE_PATH = \
110     ft_utils.get_functest_config('general.directories.dir_functest_data') + \
111     "/" + GLANCE_IMAGE_FILENAME
112
113 CINDER_VOLUME_TYPE_NAME = "volume_test"
114
115
116 SUMMARY = []
117 neutron_client = None
118
119
120 def get_task_id(cmd_raw):
121     """
122     get task id from command rally result
123     :param cmd_raw:
124     :return: task_id as string
125     """
126     taskid_re = re.compile('^Task +(.*): started$')
127     for line in cmd_raw.splitlines(True):
128         line = line.strip()
129         match = taskid_re.match(line)
130         if match:
131             return match.group(1)
132     return None
133
134
135 def task_succeed(json_raw):
136     """
137     Parse JSON from rally JSON results
138     :param json_raw:
139     :return: Bool
140     """
141     rally_report = json.loads(json_raw)
142     for report in rally_report:
143         if report is None or report.get('result') is None:
144             return False
145
146         for result in report.get('result'):
147             if result is None or len(result.get('error')) > 0:
148                 return False
149
150     return True
151
152
153 def live_migration_supported():
154     config = iniparse.ConfigParser()
155     if (config.read(TEMPEST_CONF_FILE) and
156             config.has_section('compute-feature-enabled') and
157             config.has_option('compute-feature-enabled', 'live_migration')):
158         return config.getboolean('compute-feature-enabled', 'live_migration')
159
160     return False
161
162
163 def build_task_args(test_file_name):
164     task_args = {'service_list': [test_file_name]}
165     task_args['image_name'] = GLANCE_IMAGE_NAME
166     task_args['flavor_name'] = FLAVOR_NAME
167     task_args['glance_image_location'] = GLANCE_IMAGE_PATH
168     task_args['glance_image_format'] = GLANCE_IMAGE_FORMAT
169     task_args['tmpl_dir'] = TEMPLATE_DIR
170     task_args['sup_dir'] = SUPPORT_DIR
171     task_args['users_amount'] = USERS_AMOUNT
172     task_args['tenants_amount'] = TENANTS_AMOUNT
173     task_args['use_existing_users'] = False
174     task_args['iterations'] = ITERATIONS_AMOUNT
175     task_args['concurrency'] = CONCURRENCY
176
177     if args.sanity:
178         task_args['smoke'] = True
179     else:
180         task_args['smoke'] = args.smoke
181
182     ext_net = os_utils.get_external_net(neutron_client)
183     if ext_net:
184         task_args['floating_network'] = str(ext_net)
185     else:
186         task_args['floating_network'] = ''
187
188     net_id = network_dict['net_id']
189     task_args['netid'] = str(net_id)
190
191     auth_url = os.getenv('OS_AUTH_URL')
192     if auth_url is not None:
193         task_args['request_url'] = auth_url.rsplit(":", 1)[0]
194     else:
195         task_args['request_url'] = ''
196
197     return task_args
198
199
200 def get_output(proc, test_name):
201     global SUMMARY
202     result = ""
203     nb_tests = 0
204     overall_duration = 0.0
205     success = 0.0
206     nb_totals = 0
207
208     while proc.poll() is None:
209         line = proc.stdout.readline()
210         if args.verbose:
211             result += line
212         else:
213             if ("Load duration" in line or
214                     "started" in line or
215                     "finished" in line or
216                     " Preparing" in line or
217                     "+-" in line or
218                     "|" in line):
219                 result += line
220             elif "test scenario" in line:
221                 result += "\n" + line
222             elif "Full duration" in line:
223                 result += line + "\n\n"
224
225         # parse output for summary report
226         if ("| " in line and
227                 "| action" not in line and
228                 "| Starting" not in line and
229                 "| Completed" not in line and
230                 "| ITER" not in line and
231                 "|   " not in line and
232                 "| total" not in line):
233             nb_tests += 1
234         elif "| total" in line:
235             percentage = ((line.split('|')[8]).strip(' ')).strip('%')
236             try:
237                 success += float(percentage)
238             except ValueError:
239                 logger.info('Percentage error: %s, %s' % (percentage, line))
240             nb_totals += 1
241         elif "Full duration" in line:
242             duration = line.split(': ')[1]
243             try:
244                 overall_duration += float(duration)
245             except ValueError:
246                 logger.info('Duration error: %s, %s' % (duration, line))
247
248     overall_duration = "{:10.2f}".format(overall_duration)
249     if nb_totals == 0:
250         success_avg = 0
251     else:
252         success_avg = "{:0.2f}".format(success / nb_totals)
253
254     scenario_summary = {'test_name': test_name,
255                         'overall_duration': overall_duration,
256                         'nb_tests': nb_tests,
257                         'success': success_avg}
258     SUMMARY.append(scenario_summary)
259
260     logger.debug("\n" + result)
261
262     return result
263
264
265 def get_cmd_output(proc):
266     result = ""
267
268     while proc.poll() is None:
269         line = proc.stdout.readline()
270         result += line
271
272     return result
273
274
275 def excl_scenario():
276     black_tests = []
277
278     try:
279         with open(BLACKLIST_FILE, 'r') as black_list_file:
280             black_list_yaml = yaml.safe_load(black_list_file)
281
282         installer_type = os.getenv('INSTALLER_TYPE')
283         deploy_scenario = os.getenv('DEPLOY_SCENARIO')
284         if (bool(installer_type) * bool(deploy_scenario)):
285             if 'scenario' in black_list_yaml.keys():
286                 for item in black_list_yaml['scenario']:
287                     scenarios = item['scenarios']
288                     installers = item['installers']
289                     if (deploy_scenario in scenarios and
290                             installer_type in installers):
291                         tests = item['tests']
292                         black_tests.extend(tests)
293     except:
294         logger.debug("Scenario exclusion not applied.")
295
296     return black_tests
297
298
299 def excl_func():
300     black_tests = []
301     func_list = []
302
303     try:
304         with open(BLACKLIST_FILE, 'r') as black_list_file:
305             black_list_yaml = yaml.safe_load(black_list_file)
306
307         if not live_migration_supported():
308             func_list.append("no_live_migration")
309
310         if 'functionality' in black_list_yaml.keys():
311             for item in black_list_yaml['functionality']:
312                 functions = item['functions']
313                 for func in func_list:
314                     if func in functions:
315                         tests = item['tests']
316                         black_tests.extend(tests)
317     except:
318         logger.debug("Functionality exclusion not applied.")
319
320     return black_tests
321
322
323 def apply_blacklist(case_file_name, result_file_name):
324     logger.debug("Applying blacklist...")
325     cases_file = open(case_file_name, 'r')
326     result_file = open(result_file_name, 'w')
327
328     black_tests = list(set(excl_func() + excl_scenario()))
329
330     include = True
331     for cases_line in cases_file:
332         if include:
333             for black_tests_line in black_tests:
334                 if re.search(black_tests_line, cases_line.strip().rstrip(':')):
335                     include = False
336                     break
337             else:
338                 result_file.write(str(cases_line))
339         else:
340             if cases_line.isspace():
341                 include = True
342
343     cases_file.close()
344     result_file.close()
345
346
347 def prepare_test_list(test_name):
348     scenario_file_name = '{}opnfv-{}.yaml'.format(RALLY_DIR + "scenario/",
349                                                   test_name)
350     if not os.path.exists(scenario_file_name):
351         if args.sanity:
352             scenario_file_name = '{}opnfv-{}.yaml'.format(SANITY_MODE_DIR +
353                                                           "/", test_name)
354         else:
355             scenario_file_name = '{}opnfv-{}.yaml'.format(FULL_MODE_DIR +
356                                                           "/", test_name)
357         if not os.path.exists(scenario_file_name):
358             logger.info("The scenario '%s' does not exist."
359                         % scenario_file_name)
360             exit(-1)
361
362     logger.debug('Scenario fetched from : {}'.format(scenario_file_name))
363     test_file_name = '{}opnfv-{}.yaml'.format(TEMP_DIR + "/", test_name)
364
365     if not os.path.exists(TEMP_DIR):
366         os.makedirs(TEMP_DIR)
367
368     apply_blacklist(scenario_file_name, test_file_name)
369     return test_file_name
370
371
372 def file_is_empty(file_name):
373     try:
374         if os.stat(file_name).st_size > 0:
375             return False
376     except:
377         pass
378
379     return True
380
381
382 def run_task(test_name):
383     #
384     # the "main" function of the script who launch rally for a task
385     # :param test_name: name for the rally test
386     # :return: void
387     #
388     global SUMMARY
389     logger.info('Starting test scenario "{}" ...'.format(test_name))
390     start_time = time.time()
391
392     task_file = '{}task.yaml'.format(RALLY_DIR)
393     if not os.path.exists(task_file):
394         logger.error("Task file '%s' does not exist." % task_file)
395         exit(-1)
396
397     file_name = prepare_test_list(test_name)
398     if file_is_empty(file_name):
399         logger.info('No tests for scenario "{}"'.format(test_name))
400         return
401
402     cmd_line = ("rally task start --abort-on-sla-failure " +
403                 "--task {} ".format(task_file) +
404                 "--task-args \"{}\" ".format(build_task_args(test_name)))
405     logger.debug('running command line : {}'.format(cmd_line))
406
407     p = subprocess.Popen(cmd_line, stdout=subprocess.PIPE,
408                          stderr=RALLY_STDERR, shell=True)
409     output = get_output(p, test_name)
410     task_id = get_task_id(output)
411     logger.debug('task_id : {}'.format(task_id))
412
413     if task_id is None:
414         logger.error('Failed to retrieve task_id, validating task...')
415         cmd_line = ("rally task validate " +
416                     "--task {} ".format(task_file) +
417                     "--task-args \"{}\" ".format(build_task_args(test_name)))
418         logger.debug('running command line : {}'.format(cmd_line))
419         p = subprocess.Popen(cmd_line, stdout=subprocess.PIPE,
420                              stderr=subprocess.STDOUT, shell=True)
421         output = get_cmd_output(p)
422         logger.error("Task validation result:" + "\n" + output)
423         return
424
425     # check for result directory and create it otherwise
426     if not os.path.exists(RESULTS_DIR):
427         logger.debug('{} does not exist, we create it.'.format(RESULTS_DIR))
428         os.makedirs(RESULTS_DIR)
429
430     # write html report file
431     report_file_name = '{}opnfv-{}.html'.format(RESULTS_DIR, test_name)
432     cmd_line = "rally task report {} --out {}".format(task_id,
433                                                       report_file_name)
434
435     logger.debug('running command line : {}'.format(cmd_line))
436     os.popen(cmd_line)
437
438     # get and save rally operation JSON result
439     cmd_line = "rally task results %s" % task_id
440     logger.debug('running command line : {}'.format(cmd_line))
441     cmd = os.popen(cmd_line)
442     json_results = cmd.read()
443     with open('{}opnfv-{}.json'.format(RESULTS_DIR, test_name), 'w') as f:
444         logger.debug('saving json file')
445         f.write(json_results)
446
447     with open('{}opnfv-{}.json'
448               .format(RESULTS_DIR, test_name)) as json_file:
449         json_data = json.load(json_file)
450
451     """ parse JSON operation result """
452     status = "FAIL"
453     if task_succeed(json_results):
454         logger.info('Test scenario: "{}" OK.'.format(test_name) + "\n")
455         status = "PASS"
456     else:
457         logger.info('Test scenario: "{}" Failed.'.format(test_name) + "\n")
458
459     # Push results in payload of testcase
460     if args.report:
461         stop_time = time.time()
462         logger.debug("Push Rally detailed results into DB")
463         ft_utils.push_results_to_db("functest",
464                                     "Rally_details",
465                                     start_time,
466                                     stop_time,
467                                     status,
468                                     json_data)
469
470
471 def main():
472     global SUMMARY
473     global network_dict
474     global neutron_client
475
476     nova_client = os_utils.get_nova_client()
477     neutron_client = os_utils.get_neutron_client()
478     cinder_client = os_utils.get_cinder_client()
479
480     start_time = time.time()
481
482     # configure script
483     if not (args.test_name in tests):
484         logger.error('argument not valid')
485         exit(-1)
486
487     SUMMARY = []
488
489     volume_types = os_utils.list_volume_types(cinder_client,
490                                               private=False)
491     if not volume_types:
492         volume_type = os_utils.create_volume_type(
493             cinder_client, CINDER_VOLUME_TYPE_NAME)
494         if not volume_type:
495             logger.error("Failed to create volume type...")
496             exit(-1)
497         else:
498             logger.debug("Volume type '%s' created succesfully..."
499                          % CINDER_VOLUME_TYPE_NAME)
500     else:
501         logger.debug("Using existing volume type(s)...")
502
503     image_exists, image_id = os_utils.get_or_create_image(GLANCE_IMAGE_NAME,
504                                                           GLANCE_IMAGE_PATH,
505                                                           GLANCE_IMAGE_FORMAT)
506     if not image_id:
507         exit(-1)
508
509     logger.debug("Creating network '%s'..." % PRIVATE_NET_NAME)
510     network_dict = os_utils.create_shared_network_full(PRIVATE_NET_NAME,
511                                                        PRIVATE_SUBNET_NAME,
512                                                        ROUTER_NAME,
513                                                        PRIVATE_SUBNET_CIDR)
514     if not network_dict:
515         exit(1)
516
517     if args.test_name == "all":
518         for test_name in tests:
519             if not (test_name == 'all' or
520                     test_name == 'vm'):
521                 run_task(test_name)
522     else:
523         logger.debug("Test name: " + args.test_name)
524         run_task(args.test_name)
525
526     report = ("\n"
527               "                                                              "
528               "\n"
529               "                     Rally Summary Report\n"
530               "\n"
531               "+===================+============+===============+===========+"
532               "\n"
533               "| Module            | Duration   | nb. Test Run  | Success   |"
534               "\n"
535               "+===================+============+===============+===========+"
536               "\n")
537     payload = []
538     stop_time = time.time()
539
540     # for each scenario we draw a row for the table
541     total_duration = 0.0
542     total_nb_tests = 0
543     total_success = 0.0
544     for s in SUMMARY:
545         name = "{0:<17}".format(s['test_name'])
546         duration = float(s['overall_duration'])
547         total_duration += duration
548         duration = time.strftime("%M:%S", time.gmtime(duration))
549         duration = "{0:<10}".format(duration)
550         nb_tests = "{0:<13}".format(s['nb_tests'])
551         total_nb_tests += int(s['nb_tests'])
552         success = "{0:<10}".format(str(s['success']) + '%')
553         total_success += float(s['success'])
554         report += ("" +
555                    "| " + name + " | " + duration + " | " +
556                    nb_tests + " | " + success + "|\n" +
557                    "+-------------------+------------"
558                    "+---------------+-----------+\n")
559         payload.append({'module': name,
560                         'details': {'duration': s['overall_duration'],
561                                     'nb tests': s['nb_tests'],
562                                     'success': s['success']}})
563
564     total_duration_str = time.strftime("%H:%M:%S", time.gmtime(total_duration))
565     total_duration_str2 = "{0:<10}".format(total_duration_str)
566     total_nb_tests_str = "{0:<13}".format(total_nb_tests)
567
568     if len(SUMMARY):
569         success_rate = total_success / len(SUMMARY)
570     else:
571         success_rate = 100
572     success_rate = "{:0.2f}".format(success_rate)
573     success_rate_str = "{0:<10}".format(str(success_rate) + '%')
574     report += "+===================+============+===============+===========+"
575     report += "\n"
576     report += ("| TOTAL:            | " + total_duration_str2 + " | " +
577                total_nb_tests_str + " | " + success_rate_str + "|\n")
578     report += "+===================+============+===============+===========+"
579     report += "\n"
580
581     logger.info("\n" + report)
582     payload.append({'summary': {'duration': total_duration,
583                                 'nb tests': total_nb_tests,
584                                 'nb success': success_rate}})
585
586     if args.sanity:
587         case_name = "rally_sanity"
588     else:
589         case_name = "rally_full"
590
591     # Evaluation of the success criteria
592     status = ft_utils.check_success_rate(case_name, success_rate)
593
594     exit_code = -1
595     if status == "PASS":
596         exit_code = 0
597
598     if args.report:
599         logger.debug("Pushing Rally summary into DB...")
600         ft_utils.push_results_to_db("functest",
601                                     case_name,
602                                     start_time,
603                                     stop_time,
604                                     status,
605                                     payload)
606     if args.noclean:
607         exit(exit_code)
608
609     if not image_exists:
610         logger.debug("Deleting image '%s' with ID '%s'..."
611                      % (GLANCE_IMAGE_NAME, image_id))
612         if not os_utils.delete_glance_image(nova_client, image_id):
613             logger.error("Error deleting the glance image")
614
615     if not volume_types:
616         logger.debug("Deleting volume type '%s'..."
617                      % CINDER_VOLUME_TYPE_NAME)
618         if not os_utils.delete_volume_type(cinder_client, volume_type):
619             logger.error("Error in deleting volume type...")
620
621     exit(exit_code)
622
623
624 if __name__ == '__main__':
625     main()