make use of functest_utils.get_parameter_from_yaml
[functest-xtesting.git] / testcases / OpenStack / rally / run_rally-cert.py
1 #!/usr/bin/env python
2 #
3 # Copyright (c) 2015 Orange
4 # guyrodrigue.koffi@orange.com
5 # morgan.richomme@orange.com
6 # All rights reserved. This program and the accompanying materials
7 # are made available under the terms of the Apache License, Version 2.0
8 # which accompanies this distribution, and is available at
9 # http://www.apache.org/licenses/LICENSE-2.0
10 #
11 # 0.1 (05/2015) initial commit
12 # 0.2 (28/09/2015) extract Tempest, format json result, add ceilometer suite
13 # 0.3 (19/10/2015) remove Tempest from run_rally
14 # and push result into test DB
15 #
16 """ tests configuration """
17
18 import json
19 import os
20 import re
21 import subprocess
22 import time
23
24 import argparse
25 import iniparse
26 import yaml
27
28 import functest.utils.functest_logger as ft_logger
29 import functest.utils.functest_utils as ft_utils
30 import functest.utils.openstack_utils as os_utils
31
32 tests = ['authenticate', 'glance', 'cinder', 'heat', 'keystone',
33          'neutron', 'nova', 'quotas', 'requests', 'vm', 'all']
34 parser = argparse.ArgumentParser()
35 parser.add_argument("test_name",
36                     help="Module name to be tested. "
37                          "Possible values are : "
38                          "[ {d[0]} | {d[1]} | {d[2]} | {d[3]} | {d[4]} | "
39                          "{d[5]} | {d[6]} | {d[7]} | {d[8]} | {d[9]} | "
40                          "{d[10]} ] "
41                          "The 'all' value "
42                          "performs all possible test scenarios"
43                          .format(d=tests))
44
45 parser.add_argument("-d", "--debug", help="Debug mode", action="store_true")
46 parser.add_argument("-r", "--report",
47                     help="Create json result file",
48                     action="store_true")
49 parser.add_argument("-s", "--smoke",
50                     help="Smoke test mode",
51                     action="store_true")
52 parser.add_argument("-v", "--verbose",
53                     help="Print verbose info about the progress",
54                     action="store_true")
55 parser.add_argument("-n", "--noclean",
56                     help="Don't clean the created resources for this test.",
57                     action="store_true")
58 parser.add_argument("-z", "--sanity",
59                     help="Sanity test mode, execute only a subset of tests",
60                     action="store_true")
61
62 args = parser.parse_args()
63
64 network_dict = {}
65
66 if args.verbose:
67     RALLY_STDERR = subprocess.STDOUT
68 else:
69     RALLY_STDERR = open(os.devnull, 'w')
70
71 """ logging configuration """
72 logger = ft_logger.Logger("run_rally").getLogger()
73
74
75 HOME = os.environ['HOME'] + "/"
76 RALLY_DIR = ft_utils.FUNCTEST_REPO + '/' + \
77             ft_utils.get_functest_config('general.directories.dir_rally')
78 TEMPLATE_DIR = RALLY_DIR + "scenario/templates"
79 SUPPORT_DIR = RALLY_DIR + "scenario/support"
80 TEMP_DIR = RALLY_DIR + "var"
81 BLACKLIST_FILE = RALLY_DIR + "blacklist.txt"
82
83 FLAVOR_NAME = "m1.tiny"
84 USERS_AMOUNT = 2
85 TENANTS_AMOUNT = 3
86 ITERATIONS_AMOUNT = 10
87 CONCURRENCY = 4
88
89 RESULTS_DIR = \
90     ft_utils.get_functest_config('general.directories.dir_rally_res')
91 TEMPEST_CONF_FILE = \
92     ft_utils.get_functest_config('general.directories.dir_results') + \
93     '/tempest/tempest.conf'
94 TEST_DB = ft_utils.get_functest_config('results.test_db_url')
95
96 PRIVATE_NET_NAME = ft_utils.get_functest_config('rally.network_name')
97 PRIVATE_SUBNET_NAME = ft_utils.get_functest_config('rally.subnet_name')
98 PRIVATE_SUBNET_CIDR = ft_utils.get_functest_config('rally.subnet_cidr')
99 ROUTER_NAME = ft_utils.get_functest_config('rally.router_name')
100
101 GLANCE_IMAGE_NAME = \
102     ft_utils.get_functest_config('general.openstack.image_name')
103 GLANCE_IMAGE_FILENAME = \
104     ft_utils.get_functest_config('general.openstack.image_file_name')
105 GLANCE_IMAGE_FORMAT = \
106     ft_utils.get_functest_config('general.openstack.image_disk_format')
107 GLANCE_IMAGE_PATH = \
108     ft_utils.get_functest_config('general.directories.dir_functest_data') + \
109     "/" + GLANCE_IMAGE_FILENAME
110
111 CINDER_VOLUME_TYPE_NAME = "volume_test"
112
113
114 SUMMARY = []
115 neutron_client = None
116
117
118 def get_task_id(cmd_raw):
119     """
120     get task id from command rally result
121     :param cmd_raw:
122     :return: task_id as string
123     """
124     taskid_re = re.compile('^Task +(.*): started$')
125     for line in cmd_raw.splitlines(True):
126         line = line.strip()
127         match = taskid_re.match(line)
128         if match:
129             return match.group(1)
130     return None
131
132
133 def task_succeed(json_raw):
134     """
135     Parse JSON from rally JSON results
136     :param json_raw:
137     :return: Bool
138     """
139     rally_report = json.loads(json_raw)
140     for report in rally_report:
141         if report is None or report.get('result') is None:
142             return False
143
144         for result in report.get('result'):
145             if result is None or len(result.get('error')) > 0:
146                 return False
147
148     return True
149
150
151 def live_migration_supported():
152     config = iniparse.ConfigParser()
153     if (config.read(TEMPEST_CONF_FILE) and
154             config.has_section('compute-feature-enabled') and
155             config.has_option('compute-feature-enabled', 'live_migration')):
156         return config.getboolean('compute-feature-enabled', 'live_migration')
157
158     return False
159
160
161 def build_task_args(test_file_name):
162     task_args = {'service_list': [test_file_name]}
163     task_args['image_name'] = GLANCE_IMAGE_NAME
164     task_args['flavor_name'] = FLAVOR_NAME
165     task_args['glance_image_location'] = GLANCE_IMAGE_PATH
166     task_args['glance_image_format'] = GLANCE_IMAGE_FORMAT
167     task_args['tmpl_dir'] = TEMPLATE_DIR
168     task_args['sup_dir'] = SUPPORT_DIR
169     task_args['users_amount'] = USERS_AMOUNT
170     task_args['tenants_amount'] = TENANTS_AMOUNT
171     task_args['use_existing_users'] = False
172     task_args['iterations'] = ITERATIONS_AMOUNT
173     task_args['concurrency'] = CONCURRENCY
174
175     if args.sanity:
176         task_args['full_mode'] = False
177         task_args['smoke'] = True
178     else:
179         task_args['full_mode'] = True
180         task_args['smoke'] = args.smoke
181
182     ext_net = os_utils.get_external_net(neutron_client)
183     if ext_net:
184         task_args['floating_network'] = str(ext_net)
185     else:
186         task_args['floating_network'] = ''
187
188     net_id = network_dict['net_id']
189     task_args['netid'] = str(net_id)
190     task_args['live_migration'] = live_migration_supported()
191
192     auth_url = os.getenv('OS_AUTH_URL')
193     if auth_url is not None:
194         task_args['request_url'] = auth_url.rsplit(":", 1)[0]
195     else:
196         task_args['request_url'] = ''
197
198     return task_args
199
200
201 def get_output(proc, test_name):
202     global SUMMARY
203     result = ""
204     nb_tests = 0
205     overall_duration = 0.0
206     success = 0.0
207     nb_totals = 0
208
209     while proc.poll() is None:
210         line = proc.stdout.readline()
211         if args.verbose:
212             result += line
213         else:
214             if ("Load duration" in line or
215                     "started" in line or
216                     "finished" in line or
217                     " Preparing" in line or
218                     "+-" in line or
219                     "|" in line):
220                 result += line
221             elif "test scenario" in line:
222                 result += "\n" + line
223             elif "Full duration" in line:
224                 result += line + "\n\n"
225
226         # parse output for summary report
227         if ("| " in line and
228                 "| action" not in line and
229                 "| Starting" not in line and
230                 "| Completed" not in line and
231                 "| ITER" not in line and
232                 "|   " not in line and
233                 "| total" not in line):
234             nb_tests += 1
235         elif "| total" in line:
236             percentage = ((line.split('|')[8]).strip(' ')).strip('%')
237             try:
238                 success += float(percentage)
239             except ValueError:
240                 logger.info('Percentage error: %s, %s' % (percentage, line))
241             nb_totals += 1
242         elif "Full duration" in line:
243             duration = line.split(': ')[1]
244             try:
245                 overall_duration += float(duration)
246             except ValueError:
247                 logger.info('Duration error: %s, %s' % (duration, line))
248
249     overall_duration = "{:10.2f}".format(overall_duration)
250     if nb_totals == 0:
251         success_avg = 0
252     else:
253         success_avg = "{:0.2f}".format(success / nb_totals)
254
255     scenario_summary = {'test_name': test_name,
256                         'overall_duration': overall_duration,
257                         'nb_tests': nb_tests,
258                         'success': success_avg}
259     SUMMARY.append(scenario_summary)
260
261     logger.debug("\n" + result)
262
263     return result
264
265
266 def get_cmd_output(proc):
267     result = ""
268
269     while proc.poll() is None:
270         line = proc.stdout.readline()
271         result += line
272
273     return result
274
275
276 def apply_blacklist(case_file_name, result_file_name):
277     logger.debug("Applying blacklist...")
278     cases_file = open(case_file_name, 'r')
279     result_file = open(result_file_name, 'w')
280     black_tests = []
281
282     try:
283         installer_type = os.getenv('INSTALLER_TYPE')
284         deploy_scenario = os.getenv('DEPLOY_SCENARIO')
285         if (bool(installer_type) * bool(deploy_scenario)):
286             # if INSTALLER_TYPE and DEPLOY_SCENARIO are set we read the file
287             with open(BLACKLIST_FILE, 'r') as black_list_file:
288                 black_list_yaml = yaml.safe_load(black_list_file)
289
290             for item in black_list_yaml:
291                 scenarios = item['scenarios']
292                 installers = item['installers']
293                 if (deploy_scenario in scenarios and
294                         installer_type in installers):
295                     tests = item['tests']
296                     black_tests.extend(tests)
297     except:
298         black_tests = []
299         logger.debug("Blacklisting not applied.")
300
301     include = True
302     for cases_line in cases_file:
303         if include:
304             for black_tests_line in black_tests:
305                 if black_tests_line == cases_line.strip().rstrip(':'):
306                     include = False
307                     break
308             else:
309                 result_file.write(str(cases_line))
310         else:
311             if cases_line.isspace():
312                 include = True
313
314     cases_file.close()
315     result_file.close()
316
317
318 def prepare_test_list(test_name):
319     scenario_file_name = '{}opnfv-{}.yaml'.format(RALLY_DIR + "scenario/",
320                                                   test_name)
321     if not os.path.exists(scenario_file_name):
322         logger.info("The scenario '%s' does not exist." % scenario_file_name)
323         exit(-1)
324
325     logger.debug('Scenario fetched from : {}'.format(scenario_file_name))
326     test_file_name = '{}opnfv-{}.yaml'.format(TEMP_DIR + "/", test_name)
327
328     if not os.path.exists(TEMP_DIR):
329         os.makedirs(TEMP_DIR)
330
331     apply_blacklist(scenario_file_name, test_file_name)
332
333
334 def run_task(test_name):
335     #
336     # the "main" function of the script who launch rally for a task
337     # :param test_name: name for the rally test
338     # :return: void
339     #
340     global SUMMARY
341     logger.info('Starting test scenario "{}" ...'.format(test_name))
342     start_time = time.time()
343
344     task_file = '{}task.yaml'.format(RALLY_DIR)
345     if not os.path.exists(task_file):
346         logger.error("Task file '%s' does not exist." % task_file)
347         exit(-1)
348
349     prepare_test_list(test_name)
350
351     cmd_line = ("rally task start --abort-on-sla-failure " +
352                 "--task {} ".format(task_file) +
353                 "--task-args \"{}\" ".format(build_task_args(test_name)))
354     logger.debug('running command line : {}'.format(cmd_line))
355
356     p = subprocess.Popen(cmd_line, stdout=subprocess.PIPE,
357                          stderr=RALLY_STDERR, shell=True)
358     output = get_output(p, test_name)
359     task_id = get_task_id(output)
360     logger.debug('task_id : {}'.format(task_id))
361
362     if task_id is None:
363         logger.error('Failed to retrieve task_id, validating task...')
364         cmd_line = ("rally task validate " +
365                     "--task {} ".format(task_file) +
366                     "--task-args \"{}\" ".format(build_task_args(test_name)))
367         logger.debug('running command line : {}'.format(cmd_line))
368         p = subprocess.Popen(cmd_line, stdout=subprocess.PIPE,
369                              stderr=subprocess.STDOUT, shell=True)
370         output = get_cmd_output(p)
371         logger.error("Task validation result:" + "\n" + output)
372         return
373
374     # check for result directory and create it otherwise
375     if not os.path.exists(RESULTS_DIR):
376         logger.debug('%s does not exist, we create it.'.format(RESULTS_DIR))
377         os.makedirs(RESULTS_DIR)
378
379     # write html report file
380     report_file_name = '{}opnfv-{}.html'.format(RESULTS_DIR, test_name)
381     cmd_line = "rally task report {} --out {}".format(task_id,
382                                                       report_file_name)
383
384     logger.debug('running command line : {}'.format(cmd_line))
385     os.popen(cmd_line)
386
387     # get and save rally operation JSON result
388     cmd_line = "rally task results %s" % task_id
389     logger.debug('running command line : {}'.format(cmd_line))
390     cmd = os.popen(cmd_line)
391     json_results = cmd.read()
392     with open('{}opnfv-{}.json'.format(RESULTS_DIR, test_name), 'w') as f:
393         logger.debug('saving json file')
394         f.write(json_results)
395
396     with open('{}opnfv-{}.json'
397               .format(RESULTS_DIR, test_name)) as json_file:
398         json_data = json.load(json_file)
399
400     """ parse JSON operation result """
401     status = "FAIL"
402     if task_succeed(json_results):
403         logger.info('Test scenario: "{}" OK.'.format(test_name) + "\n")
404         status = "PASS"
405     else:
406         logger.info('Test scenario: "{}" Failed.'.format(test_name) + "\n")
407
408     # Push results in payload of testcase
409     if args.report:
410         stop_time = time.time()
411         logger.debug("Push Rally detailed results into DB")
412         ft_utils.push_results_to_db("functest",
413                                     "Rally_details",
414                                     start_time,
415                                     stop_time,
416                                     status,
417                                     json_data)
418
419
420 def main():
421     global SUMMARY
422     global network_dict
423     global neutron_client
424
425     nova_client = os_utils.get_nova_client()
426     neutron_client = os_utils.get_neutron_client()
427     cinder_client = os_utils.get_cinder_client()
428
429     start_time = time.time()
430
431     # configure script
432     if not (args.test_name in tests):
433         logger.error('argument not valid')
434         exit(-1)
435
436     SUMMARY = []
437
438     volume_types = os_utils.list_volume_types(cinder_client,
439                                               private=False)
440     if not volume_types:
441         volume_type = os_utils.create_volume_type(
442             cinder_client, CINDER_VOLUME_TYPE_NAME)
443         if not volume_type:
444             logger.error("Failed to create volume type...")
445             exit(-1)
446         else:
447             logger.debug("Volume type '%s' created succesfully..."
448                          % CINDER_VOLUME_TYPE_NAME)
449     else:
450         logger.debug("Using existing volume type(s)...")
451
452     image_exists, image_id = os_utils.get_or_create_image(GLANCE_IMAGE_NAME,
453                                                           GLANCE_IMAGE_PATH,
454                                                           GLANCE_IMAGE_FORMAT)
455     if not image_id:
456         exit(-1)
457
458     logger.debug("Creating network '%s'..." % PRIVATE_NET_NAME)
459     network_dict = os_utils.create_shared_network_full(PRIVATE_NET_NAME,
460                                                        PRIVATE_SUBNET_NAME,
461                                                        ROUTER_NAME,
462                                                        PRIVATE_SUBNET_CIDR)
463     if not network_dict:
464         exit(1)
465
466     if args.test_name == "all":
467         for test_name in tests:
468             if not (test_name == 'all' or
469                     test_name == 'vm'):
470                 run_task(test_name)
471     else:
472         logger.debug("Test name: " + args.test_name)
473         run_task(args.test_name)
474
475     report = ("\n"
476               "                                                              "
477               "\n"
478               "                     Rally Summary Report\n"
479               "\n"
480               "+===================+============+===============+===========+"
481               "\n"
482               "| Module            | Duration   | nb. Test Run  | Success   |"
483               "\n"
484               "+===================+============+===============+===========+"
485               "\n")
486     payload = []
487     stop_time = time.time()
488
489     # for each scenario we draw a row for the table
490     total_duration = 0.0
491     total_nb_tests = 0
492     total_success = 0.0
493     for s in SUMMARY:
494         name = "{0:<17}".format(s['test_name'])
495         duration = float(s['overall_duration'])
496         total_duration += duration
497         duration = time.strftime("%M:%S", time.gmtime(duration))
498         duration = "{0:<10}".format(duration)
499         nb_tests = "{0:<13}".format(s['nb_tests'])
500         total_nb_tests += int(s['nb_tests'])
501         success = "{0:<10}".format(str(s['success']) + '%')
502         total_success += float(s['success'])
503         report += ("" +
504                    "| " + name + " | " + duration + " | " +
505                    nb_tests + " | " + success + "|\n" +
506                    "+-------------------+------------"
507                    "+---------------+-----------+\n")
508         payload.append({'module': name,
509                         'details': {'duration': s['overall_duration'],
510                                     'nb tests': s['nb_tests'],
511                                     'success': s['success']}})
512
513     total_duration_str = time.strftime("%H:%M:%S", time.gmtime(total_duration))
514     total_duration_str2 = "{0:<10}".format(total_duration_str)
515     total_nb_tests_str = "{0:<13}".format(total_nb_tests)
516     success_rate = "{:0.2f}".format(total_success / len(SUMMARY))
517     success_rate_str = "{0:<10}".format(str(success_rate) + '%')
518     report += "+===================+============+===============+===========+"
519     report += "\n"
520     report += ("| TOTAL:            | " + total_duration_str2 + " | " +
521                total_nb_tests_str + " | " + success_rate_str + "|\n")
522     report += "+===================+============+===============+===========+"
523     report += "\n"
524
525     logger.info("\n" + report)
526     payload.append({'summary': {'duration': total_duration,
527                                 'nb tests': total_nb_tests,
528                                 'nb success': success_rate}})
529
530     if args.sanity:
531         case_name = "rally_sanity"
532     else:
533         case_name = "rally_full"
534
535     # Evaluation of the success criteria
536     status = ft_utils.check_success_rate(case_name, success_rate)
537
538     exit_code = -1
539     if status == "PASS":
540         exit_code = 0
541
542     if args.report:
543         logger.debug("Pushing Rally summary into DB...")
544         ft_utils.push_results_to_db("functest",
545                                     case_name,
546                                     start_time,
547                                     stop_time,
548                                     status,
549                                     payload)
550     if args.noclean:
551         exit(exit_code)
552
553     if not image_exists:
554         logger.debug("Deleting image '%s' with ID '%s'..."
555                      % (GLANCE_IMAGE_NAME, image_id))
556         if not os_utils.delete_glance_image(nova_client, image_id):
557             logger.error("Error deleting the glance image")
558
559     if not volume_types:
560         logger.debug("Deleting volume type '%s'..."
561                      % CINDER_VOLUME_TYPE_NAME)
562         if not os_utils.delete_volume_type(cinder_client, volume_type):
563             logger.error("Error in deleting volume type...")
564
565     exit(exit_code)
566
567
568 if __name__ == '__main__':
569     main()