Merge "Refactor rally task scenario files"
[functest-xtesting.git] / testcases / OpenStack / rally / run_rally-cert.py
1 #!/usr/bin/env python
2 #
3 # Copyright (c) 2015 Orange
4 # guyrodrigue.koffi@orange.com
5 # morgan.richomme@orange.com
6 # All rights reserved. This program and the accompanying materials
7 # are made available under the terms of the Apache License, Version 2.0
8 # which accompanies this distribution, and is available at
9 # http://www.apache.org/licenses/LICENSE-2.0
10 #
11 # 0.1 (05/2015) initial commit
12 # 0.2 (28/09/2015) extract Tempest, format json result, add ceilometer suite
13 # 0.3 (19/10/2015) remove Tempest from run_rally
14 # and push result into test DB
15 #
16 """ tests configuration """
17
18 import json
19 import os
20 import re
21 import subprocess
22 import time
23
24 import argparse
25 import iniparse
26 import yaml
27
28 import functest.utils.functest_logger as ft_logger
29 import functest.utils.functest_utils as ft_utils
30 import functest.utils.openstack_utils as os_utils
31
32 tests = ['authenticate', 'glance', 'cinder', 'heat', 'keystone',
33          'neutron', 'nova', 'quotas', 'requests', 'vm', 'all']
34 parser = argparse.ArgumentParser()
35 parser.add_argument("test_name",
36                     help="Module name to be tested. "
37                          "Possible values are : "
38                          "[ {d[0]} | {d[1]} | {d[2]} | {d[3]} | {d[4]} | "
39                          "{d[5]} | {d[6]} | {d[7]} | {d[8]} | {d[9]} | "
40                          "{d[10]} ] "
41                          "The 'all' value "
42                          "performs all possible test scenarios"
43                          .format(d=tests))
44
45 parser.add_argument("-d", "--debug", help="Debug mode", action="store_true")
46 parser.add_argument("-r", "--report",
47                     help="Create json result file",
48                     action="store_true")
49 parser.add_argument("-s", "--smoke",
50                     help="Smoke test mode",
51                     action="store_true")
52 parser.add_argument("-v", "--verbose",
53                     help="Print verbose info about the progress",
54                     action="store_true")
55 parser.add_argument("-n", "--noclean",
56                     help="Don't clean the created resources for this test.",
57                     action="store_true")
58 parser.add_argument("-z", "--sanity",
59                     help="Sanity test mode, execute only a subset of tests",
60                     action="store_true")
61
62 args = parser.parse_args()
63
64 network_dict = {}
65
66 if args.verbose:
67     RALLY_STDERR = subprocess.STDOUT
68 else:
69     RALLY_STDERR = open(os.devnull, 'w')
70
71 """ logging configuration """
72 logger = ft_logger.Logger("run_rally").getLogger()
73
74
75 HOME = os.environ['HOME'] + "/"
76 RALLY_DIR = ft_utils.FUNCTEST_REPO + '/' + \
77             ft_utils.get_functest_config('general.directories.dir_rally')
78 SANITY_MODE_DIR = RALLY_DIR + "scenario/sanity"
79 FULL_MODE_DIR = RALLY_DIR + "scenario/full"
80 TEMPLATE_DIR = RALLY_DIR + "scenario/templates"
81 SUPPORT_DIR = RALLY_DIR + "scenario/support"
82 TEMP_DIR = RALLY_DIR + "var"
83 BLACKLIST_FILE = RALLY_DIR + "blacklist.txt"
84
85 FLAVOR_NAME = "m1.tiny"
86 USERS_AMOUNT = 2
87 TENANTS_AMOUNT = 3
88 ITERATIONS_AMOUNT = 10
89 CONCURRENCY = 4
90
91 RESULTS_DIR = \
92     ft_utils.get_functest_config('general.directories.dir_rally_res')
93 TEMPEST_CONF_FILE = \
94     ft_utils.get_functest_config('general.directories.dir_results') + \
95     '/tempest/tempest.conf'
96 TEST_DB = ft_utils.get_functest_config('results.test_db_url')
97
98 PRIVATE_NET_NAME = ft_utils.get_functest_config('rally.network_name')
99 PRIVATE_SUBNET_NAME = ft_utils.get_functest_config('rally.subnet_name')
100 PRIVATE_SUBNET_CIDR = ft_utils.get_functest_config('rally.subnet_cidr')
101 ROUTER_NAME = ft_utils.get_functest_config('rally.router_name')
102
103 GLANCE_IMAGE_NAME = \
104     ft_utils.get_functest_config('general.openstack.image_name')
105 GLANCE_IMAGE_FILENAME = \
106     ft_utils.get_functest_config('general.openstack.image_file_name')
107 GLANCE_IMAGE_FORMAT = \
108     ft_utils.get_functest_config('general.openstack.image_disk_format')
109 GLANCE_IMAGE_PATH = \
110     ft_utils.get_functest_config('general.directories.dir_functest_data') + \
111     "/" + GLANCE_IMAGE_FILENAME
112
113 CINDER_VOLUME_TYPE_NAME = "volume_test"
114
115
116 SUMMARY = []
117 neutron_client = None
118
119
120 def get_task_id(cmd_raw):
121     """
122     get task id from command rally result
123     :param cmd_raw:
124     :return: task_id as string
125     """
126     taskid_re = re.compile('^Task +(.*): started$')
127     for line in cmd_raw.splitlines(True):
128         line = line.strip()
129         match = taskid_re.match(line)
130         if match:
131             return match.group(1)
132     return None
133
134
135 def task_succeed(json_raw):
136     """
137     Parse JSON from rally JSON results
138     :param json_raw:
139     :return: Bool
140     """
141     rally_report = json.loads(json_raw)
142     for report in rally_report:
143         if report is None or report.get('result') is None:
144             return False
145
146         for result in report.get('result'):
147             if result is None or len(result.get('error')) > 0:
148                 return False
149
150     return True
151
152
153 def live_migration_supported():
154     config = iniparse.ConfigParser()
155     if (config.read(TEMPEST_CONF_FILE) and
156             config.has_section('compute-feature-enabled') and
157             config.has_option('compute-feature-enabled', 'live_migration')):
158         return config.getboolean('compute-feature-enabled', 'live_migration')
159
160     return False
161
162
163 def build_task_args(test_file_name):
164     task_args = {'service_list': [test_file_name]}
165     task_args['image_name'] = GLANCE_IMAGE_NAME
166     task_args['flavor_name'] = FLAVOR_NAME
167     task_args['glance_image_location'] = GLANCE_IMAGE_PATH
168     task_args['glance_image_format'] = GLANCE_IMAGE_FORMAT
169     task_args['tmpl_dir'] = TEMPLATE_DIR
170     task_args['sup_dir'] = SUPPORT_DIR
171     task_args['users_amount'] = USERS_AMOUNT
172     task_args['tenants_amount'] = TENANTS_AMOUNT
173     task_args['use_existing_users'] = False
174     task_args['iterations'] = ITERATIONS_AMOUNT
175     task_args['concurrency'] = CONCURRENCY
176
177     if args.sanity:
178         task_args['smoke'] = True
179     else:
180         task_args['smoke'] = args.smoke
181
182     ext_net = os_utils.get_external_net(neutron_client)
183     if ext_net:
184         task_args['floating_network'] = str(ext_net)
185     else:
186         task_args['floating_network'] = ''
187
188     net_id = network_dict['net_id']
189     task_args['netid'] = str(net_id)
190     task_args['live_migration'] = live_migration_supported()
191
192     auth_url = os.getenv('OS_AUTH_URL')
193     if auth_url is not None:
194         task_args['request_url'] = auth_url.rsplit(":", 1)[0]
195     else:
196         task_args['request_url'] = ''
197
198     return task_args
199
200
201 def get_output(proc, test_name):
202     global SUMMARY
203     result = ""
204     nb_tests = 0
205     overall_duration = 0.0
206     success = 0.0
207     nb_totals = 0
208
209     while proc.poll() is None:
210         line = proc.stdout.readline()
211         if args.verbose:
212             result += line
213         else:
214             if ("Load duration" in line or
215                     "started" in line or
216                     "finished" in line or
217                     " Preparing" in line or
218                     "+-" in line or
219                     "|" in line):
220                 result += line
221             elif "test scenario" in line:
222                 result += "\n" + line
223             elif "Full duration" in line:
224                 result += line + "\n\n"
225
226         # parse output for summary report
227         if ("| " in line and
228                 "| action" not in line and
229                 "| Starting" not in line and
230                 "| Completed" not in line and
231                 "| ITER" not in line and
232                 "|   " not in line and
233                 "| total" not in line):
234             nb_tests += 1
235         elif "| total" in line:
236             percentage = ((line.split('|')[8]).strip(' ')).strip('%')
237             try:
238                 success += float(percentage)
239             except ValueError:
240                 logger.info('Percentage error: %s, %s' % (percentage, line))
241             nb_totals += 1
242         elif "Full duration" in line:
243             duration = line.split(': ')[1]
244             try:
245                 overall_duration += float(duration)
246             except ValueError:
247                 logger.info('Duration error: %s, %s' % (duration, line))
248
249     overall_duration = "{:10.2f}".format(overall_duration)
250     if nb_totals == 0:
251         success_avg = 0
252     else:
253         success_avg = "{:0.2f}".format(success / nb_totals)
254
255     scenario_summary = {'test_name': test_name,
256                         'overall_duration': overall_duration,
257                         'nb_tests': nb_tests,
258                         'success': success_avg}
259     SUMMARY.append(scenario_summary)
260
261     logger.debug("\n" + result)
262
263     return result
264
265
266 def get_cmd_output(proc):
267     result = ""
268
269     while proc.poll() is None:
270         line = proc.stdout.readline()
271         result += line
272
273     return result
274
275
276 def apply_blacklist(case_file_name, result_file_name):
277     logger.debug("Applying blacklist...")
278     cases_file = open(case_file_name, 'r')
279     result_file = open(result_file_name, 'w')
280     black_tests = []
281
282     try:
283         installer_type = os.getenv('INSTALLER_TYPE')
284         deploy_scenario = os.getenv('DEPLOY_SCENARIO')
285         if (bool(installer_type) * bool(deploy_scenario)):
286             # if INSTALLER_TYPE and DEPLOY_SCENARIO are set we read the file
287             with open(BLACKLIST_FILE, 'r') as black_list_file:
288                 black_list_yaml = yaml.safe_load(black_list_file)
289
290             for item in black_list_yaml:
291                 scenarios = item['scenarios']
292                 installers = item['installers']
293                 if (deploy_scenario in scenarios and
294                         installer_type in installers):
295                     tests = item['tests']
296                     black_tests.extend(tests)
297     except:
298         black_tests = []
299         logger.debug("Blacklisting not applied.")
300
301     include = True
302     for cases_line in cases_file:
303         if include:
304             for black_tests_line in black_tests:
305                 if black_tests_line == cases_line.strip().rstrip(':'):
306                     include = False
307                     break
308             else:
309                 result_file.write(str(cases_line))
310         else:
311             if cases_line.isspace():
312                 include = True
313
314     cases_file.close()
315     result_file.close()
316
317
318 def prepare_test_list(test_name):
319     scenario_file_name = '{}opnfv-{}.yaml'.format(RALLY_DIR + "scenario/",
320                                                   test_name)
321     if not os.path.exists(scenario_file_name):
322         if args.sanity:
323             scenario_file_name = '{}opnfv-{}.yaml'.format(SANITY_MODE_DIR +
324                                                           "/", test_name)
325         else:
326             scenario_file_name = '{}opnfv-{}.yaml'.format(FULL_MODE_DIR +
327                                                           "/", test_name)
328         if not os.path.exists(scenario_file_name):
329             logger.info("The scenario '%s' does not exist."
330                         % scenario_file_name)
331             exit(-1)
332
333     logger.debug('Scenario fetched from : {}'.format(scenario_file_name))
334     test_file_name = '{}opnfv-{}.yaml'.format(TEMP_DIR + "/", test_name)
335
336     if not os.path.exists(TEMP_DIR):
337         os.makedirs(TEMP_DIR)
338
339     apply_blacklist(scenario_file_name, test_file_name)
340
341
342 def run_task(test_name):
343     #
344     # the "main" function of the script who launch rally for a task
345     # :param test_name: name for the rally test
346     # :return: void
347     #
348     global SUMMARY
349     logger.info('Starting test scenario "{}" ...'.format(test_name))
350     start_time = time.time()
351
352     task_file = '{}task.yaml'.format(RALLY_DIR)
353     if not os.path.exists(task_file):
354         logger.error("Task file '%s' does not exist." % task_file)
355         exit(-1)
356
357     prepare_test_list(test_name)
358
359     cmd_line = ("rally task start --abort-on-sla-failure " +
360                 "--task {} ".format(task_file) +
361                 "--task-args \"{}\" ".format(build_task_args(test_name)))
362     logger.debug('running command line : {}'.format(cmd_line))
363
364     p = subprocess.Popen(cmd_line, stdout=subprocess.PIPE,
365                          stderr=RALLY_STDERR, shell=True)
366     output = get_output(p, test_name)
367     task_id = get_task_id(output)
368     logger.debug('task_id : {}'.format(task_id))
369
370     if task_id is None:
371         logger.error('Failed to retrieve task_id, validating task...')
372         cmd_line = ("rally task validate " +
373                     "--task {} ".format(task_file) +
374                     "--task-args \"{}\" ".format(build_task_args(test_name)))
375         logger.debug('running command line : {}'.format(cmd_line))
376         p = subprocess.Popen(cmd_line, stdout=subprocess.PIPE,
377                              stderr=subprocess.STDOUT, shell=True)
378         output = get_cmd_output(p)
379         logger.error("Task validation result:" + "\n" + output)
380         return
381
382     # check for result directory and create it otherwise
383     if not os.path.exists(RESULTS_DIR):
384         logger.debug('%s does not exist, we create it.'.format(RESULTS_DIR))
385         os.makedirs(RESULTS_DIR)
386
387     # write html report file
388     report_file_name = '{}opnfv-{}.html'.format(RESULTS_DIR, test_name)
389     cmd_line = "rally task report {} --out {}".format(task_id,
390                                                       report_file_name)
391
392     logger.debug('running command line : {}'.format(cmd_line))
393     os.popen(cmd_line)
394
395     # get and save rally operation JSON result
396     cmd_line = "rally task results %s" % task_id
397     logger.debug('running command line : {}'.format(cmd_line))
398     cmd = os.popen(cmd_line)
399     json_results = cmd.read()
400     with open('{}opnfv-{}.json'.format(RESULTS_DIR, test_name), 'w') as f:
401         logger.debug('saving json file')
402         f.write(json_results)
403
404     with open('{}opnfv-{}.json'
405               .format(RESULTS_DIR, test_name)) as json_file:
406         json_data = json.load(json_file)
407
408     """ parse JSON operation result """
409     status = "FAIL"
410     if task_succeed(json_results):
411         logger.info('Test scenario: "{}" OK.'.format(test_name) + "\n")
412         status = "PASS"
413     else:
414         logger.info('Test scenario: "{}" Failed.'.format(test_name) + "\n")
415
416     # Push results in payload of testcase
417     if args.report:
418         stop_time = time.time()
419         logger.debug("Push Rally detailed results into DB")
420         ft_utils.push_results_to_db("functest",
421                                     "Rally_details",
422                                     start_time,
423                                     stop_time,
424                                     status,
425                                     json_data)
426
427
428 def main():
429     global SUMMARY
430     global network_dict
431     global neutron_client
432
433     nova_client = os_utils.get_nova_client()
434     neutron_client = os_utils.get_neutron_client()
435     cinder_client = os_utils.get_cinder_client()
436
437     start_time = time.time()
438
439     # configure script
440     if not (args.test_name in tests):
441         logger.error('argument not valid')
442         exit(-1)
443
444     SUMMARY = []
445
446     volume_types = os_utils.list_volume_types(cinder_client,
447                                               private=False)
448     if not volume_types:
449         volume_type = os_utils.create_volume_type(
450             cinder_client, CINDER_VOLUME_TYPE_NAME)
451         if not volume_type:
452             logger.error("Failed to create volume type...")
453             exit(-1)
454         else:
455             logger.debug("Volume type '%s' created succesfully..."
456                          % CINDER_VOLUME_TYPE_NAME)
457     else:
458         logger.debug("Using existing volume type(s)...")
459
460     image_exists, image_id = os_utils.get_or_create_image(GLANCE_IMAGE_NAME,
461                                                           GLANCE_IMAGE_PATH,
462                                                           GLANCE_IMAGE_FORMAT)
463     if not image_id:
464         exit(-1)
465
466     logger.debug("Creating network '%s'..." % PRIVATE_NET_NAME)
467     network_dict = os_utils.create_shared_network_full(PRIVATE_NET_NAME,
468                                                        PRIVATE_SUBNET_NAME,
469                                                        ROUTER_NAME,
470                                                        PRIVATE_SUBNET_CIDR)
471     if not network_dict:
472         exit(1)
473
474     if args.test_name == "all":
475         for test_name in tests:
476             if not (test_name == 'all' or
477                     test_name == 'vm'):
478                 run_task(test_name)
479     else:
480         logger.debug("Test name: " + args.test_name)
481         run_task(args.test_name)
482
483     report = ("\n"
484               "                                                              "
485               "\n"
486               "                     Rally Summary Report\n"
487               "\n"
488               "+===================+============+===============+===========+"
489               "\n"
490               "| Module            | Duration   | nb. Test Run  | Success   |"
491               "\n"
492               "+===================+============+===============+===========+"
493               "\n")
494     payload = []
495     stop_time = time.time()
496
497     # for each scenario we draw a row for the table
498     total_duration = 0.0
499     total_nb_tests = 0
500     total_success = 0.0
501     for s in SUMMARY:
502         name = "{0:<17}".format(s['test_name'])
503         duration = float(s['overall_duration'])
504         total_duration += duration
505         duration = time.strftime("%M:%S", time.gmtime(duration))
506         duration = "{0:<10}".format(duration)
507         nb_tests = "{0:<13}".format(s['nb_tests'])
508         total_nb_tests += int(s['nb_tests'])
509         success = "{0:<10}".format(str(s['success']) + '%')
510         total_success += float(s['success'])
511         report += ("" +
512                    "| " + name + " | " + duration + " | " +
513                    nb_tests + " | " + success + "|\n" +
514                    "+-------------------+------------"
515                    "+---------------+-----------+\n")
516         payload.append({'module': name,
517                         'details': {'duration': s['overall_duration'],
518                                     'nb tests': s['nb_tests'],
519                                     'success': s['success']}})
520
521     total_duration_str = time.strftime("%H:%M:%S", time.gmtime(total_duration))
522     total_duration_str2 = "{0:<10}".format(total_duration_str)
523     total_nb_tests_str = "{0:<13}".format(total_nb_tests)
524     success_rate = "{:0.2f}".format(total_success / len(SUMMARY))
525     success_rate_str = "{0:<10}".format(str(success_rate) + '%')
526     report += "+===================+============+===============+===========+"
527     report += "\n"
528     report += ("| TOTAL:            | " + total_duration_str2 + " | " +
529                total_nb_tests_str + " | " + success_rate_str + "|\n")
530     report += "+===================+============+===============+===========+"
531     report += "\n"
532
533     logger.info("\n" + report)
534     payload.append({'summary': {'duration': total_duration,
535                                 'nb tests': total_nb_tests,
536                                 'nb success': success_rate}})
537
538     if args.sanity:
539         case_name = "rally_sanity"
540     else:
541         case_name = "rally_full"
542
543     # Evaluation of the success criteria
544     status = ft_utils.check_success_rate(case_name, success_rate)
545
546     exit_code = -1
547     if status == "PASS":
548         exit_code = 0
549
550     if args.report:
551         logger.debug("Pushing Rally summary into DB...")
552         ft_utils.push_results_to_db("functest",
553                                     case_name,
554                                     start_time,
555                                     stop_time,
556                                     status,
557                                     payload)
558     if args.noclean:
559         exit(exit_code)
560
561     if not image_exists:
562         logger.debug("Deleting image '%s' with ID '%s'..."
563                      % (GLANCE_IMAGE_NAME, image_id))
564         if not os_utils.delete_glance_image(nova_client, image_id):
565             logger.error("Error deleting the glance image")
566
567     if not volume_types:
568         logger.debug("Deleting volume type '%s'..."
569                      % CINDER_VOLUME_TYPE_NAME)
570         if not os_utils.delete_volume_type(cinder_client, volume_type):
571             logger.error("Error in deleting volume type...")
572
573     exit(exit_code)
574
575
576 if __name__ == '__main__':
577     main()