Extracted all global parameters into functest_constants.py
[functest.git] / functest / opnfv_tests / OpenStack / rally / run_rally-cert.py
1 #!/usr/bin/env python
2 #
3 # Copyright (c) 2015 Orange
4 # guyrodrigue.koffi@orange.com
5 # morgan.richomme@orange.com
6 # All rights reserved. This program and the accompanying materials
7 # are made available under the terms of the Apache License, Version 2.0
8 # which accompanies this distribution, and is available at
9 # http://www.apache.org/licenses/LICENSE-2.0
10 #
11 # 0.1 (05/2015) initial commit
12 # 0.2 (28/09/2015) extract Tempest, format json result, add ceilometer suite
13 # 0.3 (19/10/2015) remove Tempest from run_rally
14 # and push result into test DB
15 #
16 """ tests configuration """
17
18 import json
19 import os
20 import re
21 import subprocess
22 import time
23
24 import argparse
25 import iniparse
26 import yaml
27
28 import functest.utils.functest_logger as ft_logger
29 import functest.utils.functest_utils as ft_utils
30 import functest.utils.openstack_utils as os_utils
31 import functest.utils.functest_constants as ft_constants
32
33 tests = ['authenticate', 'glance', 'cinder', 'heat', 'keystone',
34          'neutron', 'nova', 'quotas', 'requests', 'vm', 'all']
35 parser = argparse.ArgumentParser()
36 parser.add_argument("test_name",
37                     help="Module name to be tested. "
38                          "Possible values are : "
39                          "[ {d[0]} | {d[1]} | {d[2]} | {d[3]} | {d[4]} | "
40                          "{d[5]} | {d[6]} | {d[7]} | {d[8]} | {d[9]} | "
41                          "{d[10]} ] "
42                          "The 'all' value "
43                          "performs all possible test scenarios"
44                          .format(d=tests))
45
46 parser.add_argument("-d", "--debug", help="Debug mode", action="store_true")
47 parser.add_argument("-r", "--report",
48                     help="Create json result file",
49                     action="store_true")
50 parser.add_argument("-s", "--smoke",
51                     help="Smoke test mode",
52                     action="store_true")
53 parser.add_argument("-v", "--verbose",
54                     help="Print verbose info about the progress",
55                     action="store_true")
56 parser.add_argument("-n", "--noclean",
57                     help="Don't clean the created resources for this test.",
58                     action="store_true")
59 parser.add_argument("-z", "--sanity",
60                     help="Sanity test mode, execute only a subset of tests",
61                     action="store_true")
62
63 args = parser.parse_args()
64
65
66 if args.verbose:
67     RALLY_STDERR = subprocess.STDOUT
68 else:
69     RALLY_STDERR = open(os.devnull, 'w')
70
71 """ logging configuration """
72 logger = ft_logger.Logger("run_rally-cert").getLogger()
73
74 RALLY_DIR = os.path.join(ft_constants.FUNCTEST_REPO_DIR,
75                          ft_constants.RALLY_RELATIVE_PATH)
76 RALLY_SCENARIO_DIR = os.path.join(RALLY_DIR, "scenario")
77 SANITY_MODE_DIR = os.path.join(RALLY_SCENARIO_DIR, "sanity")
78 FULL_MODE_DIR = os.path.join(RALLY_SCENARIO_DIR, "full")
79 TEMPLATE_DIR = os.path.join(RALLY_SCENARIO_DIR, "templates")
80 SUPPORT_DIR = os.path.join(RALLY_SCENARIO_DIR, "support")
81 TEMP_DIR = os.path.join(RALLY_DIR, "var")
82 BLACKLIST_FILE = os.path.join(RALLY_DIR, "blacklist.txt")
83
84 FLAVOR_NAME = "m1.tiny"
85 USERS_AMOUNT = 2
86 TENANTS_AMOUNT = 3
87 ITERATIONS_AMOUNT = 10
88 CONCURRENCY = 4
89
90 RESULTS_DIR = os.path.join(ft_constants.FUNCTEST_RESULTS_DIR, 'rally')
91 TEMPEST_CONF_FILE = os.path.join(ft_constants.FUNCTEST_RESULTS_DIR,
92                                  'tempest/tempest.conf')
93
94 RALLY_PRIVATE_NET_NAME = ft_constants.RALLY_PRIVATE_NET_NAME
95 RALLY_PRIVATE_SUBNET_NAME = ft_constants.RALLY_PRIVATE_SUBNET_NAME
96 RALLY_PRIVATE_SUBNET_CIDR = ft_constants.RALLY_PRIVATE_SUBNET_CIDR
97 RALLY_ROUTER_NAME = ft_constants.RALLY_ROUTER_NAME
98
99 GLANCE_IMAGE_NAME = ft_constants.GLANCE_IMAGE_NAME
100 GLANCE_IMAGE_FILENAME = ft_constants.GLANCE_IMAGE_FILENAME
101 GLANCE_IMAGE_FORMAT = ft_constants.GLANCE_IMAGE_FORMAT
102 GLANCE_IMAGE_PATH = os.path.join(ft_constants.FUNCTEST_DATA_DIR,
103                                  GLANCE_IMAGE_FILENAME)
104 CINDER_VOLUME_TYPE_NAME = "volume_test"
105
106
107 class GlobalVariables:
108     SUMMARY = []
109     neutron_client = None
110     network_dict = {}
111
112
113 def get_task_id(cmd_raw):
114     """
115     get task id from command rally result
116     :param cmd_raw:
117     :return: task_id as string
118     """
119     taskid_re = re.compile('^Task +(.*): started$')
120     for line in cmd_raw.splitlines(True):
121         line = line.strip()
122         match = taskid_re.match(line)
123         if match:
124             return match.group(1)
125     return None
126
127
128 def task_succeed(json_raw):
129     """
130     Parse JSON from rally JSON results
131     :param json_raw:
132     :return: Bool
133     """
134     rally_report = json.loads(json_raw)
135     for report in rally_report:
136         if report is None or report.get('result') is None:
137             return False
138
139         for result in report.get('result'):
140             if result is None or len(result.get('error')) > 0:
141                 return False
142
143     return True
144
145
146 def live_migration_supported():
147     config = iniparse.ConfigParser()
148     if (config.read(TEMPEST_CONF_FILE) and
149             config.has_section('compute-feature-enabled') and
150             config.has_option('compute-feature-enabled', 'live_migration')):
151         return config.getboolean('compute-feature-enabled', 'live_migration')
152
153     return False
154
155
156 def build_task_args(test_file_name):
157     task_args = {'service_list': [test_file_name]}
158     task_args['image_name'] = GLANCE_IMAGE_NAME
159     task_args['flavor_name'] = FLAVOR_NAME
160     task_args['glance_image_location'] = GLANCE_IMAGE_PATH
161     task_args['glance_image_format'] = GLANCE_IMAGE_FORMAT
162     task_args['tmpl_dir'] = TEMPLATE_DIR
163     task_args['sup_dir'] = SUPPORT_DIR
164     task_args['users_amount'] = USERS_AMOUNT
165     task_args['tenants_amount'] = TENANTS_AMOUNT
166     task_args['use_existing_users'] = False
167     task_args['iterations'] = ITERATIONS_AMOUNT
168     task_args['concurrency'] = CONCURRENCY
169
170     if args.sanity:
171         task_args['smoke'] = True
172     else:
173         task_args['smoke'] = args.smoke
174
175     ext_net = os_utils.get_external_net(GlobalVariables.neutron_client)
176     if ext_net:
177         task_args['floating_network'] = str(ext_net)
178     else:
179         task_args['floating_network'] = ''
180
181     net_id = GlobalVariables.network_dict['net_id']
182     task_args['netid'] = str(net_id)
183
184     auth_url = ft_constants.OS_AUTH_URL
185     if auth_url is not None:
186         task_args['request_url'] = auth_url.rsplit(":", 1)[0]
187     else:
188         task_args['request_url'] = ''
189
190     return task_args
191
192
193 def get_output(proc, test_name):
194     result = ""
195     nb_tests = 0
196     overall_duration = 0.0
197     success = 0.0
198     nb_totals = 0
199
200     while proc.poll() is None:
201         line = proc.stdout.readline()
202         if args.verbose:
203             result += line
204         else:
205             if ("Load duration" in line or
206                     "started" in line or
207                     "finished" in line or
208                     " Preparing" in line or
209                     "+-" in line or
210                     "|" in line):
211                 result += line
212             elif "test scenario" in line:
213                 result += "\n" + line
214             elif "Full duration" in line:
215                 result += line + "\n\n"
216
217         # parse output for summary report
218         if ("| " in line and
219                 "| action" not in line and
220                 "| Starting" not in line and
221                 "| Completed" not in line and
222                 "| ITER" not in line and
223                 "|   " not in line and
224                 "| total" not in line):
225             nb_tests += 1
226         elif "| total" in line:
227             percentage = ((line.split('|')[8]).strip(' ')).strip('%')
228             try:
229                 success += float(percentage)
230             except ValueError:
231                 logger.info('Percentage error: %s, %s' % (percentage, line))
232             nb_totals += 1
233         elif "Full duration" in line:
234             duration = line.split(': ')[1]
235             try:
236                 overall_duration += float(duration)
237             except ValueError:
238                 logger.info('Duration error: %s, %s' % (duration, line))
239
240     overall_duration = "{:10.2f}".format(overall_duration)
241     if nb_totals == 0:
242         success_avg = 0
243     else:
244         success_avg = "{:0.2f}".format(success / nb_totals)
245
246     scenario_summary = {'test_name': test_name,
247                         'overall_duration': overall_duration,
248                         'nb_tests': nb_tests,
249                         'success': success_avg}
250     GlobalVariables.SUMMARY.append(scenario_summary)
251
252     logger.debug("\n" + result)
253
254     return result
255
256
257 def get_cmd_output(proc):
258     result = ""
259
260     while proc.poll() is None:
261         line = proc.stdout.readline()
262         result += line
263
264     return result
265
266
267 def excl_scenario():
268     black_tests = []
269
270     try:
271         with open(BLACKLIST_FILE, 'r') as black_list_file:
272             black_list_yaml = yaml.safe_load(black_list_file)
273
274         installer_type = ft_constants.CI_INSTALLER_TYPE
275         deploy_scenario = ft_constants.CI_SCENARIO
276         if (bool(installer_type) * bool(deploy_scenario)):
277             if 'scenario' in black_list_yaml.keys():
278                 for item in black_list_yaml['scenario']:
279                     scenarios = item['scenarios']
280                     installers = item['installers']
281                     if (deploy_scenario in scenarios and
282                             installer_type in installers):
283                         tests = item['tests']
284                         black_tests.extend(tests)
285     except:
286         logger.debug("Scenario exclusion not applied.")
287
288     return black_tests
289
290
291 def excl_func():
292     black_tests = []
293     func_list = []
294
295     try:
296         with open(BLACKLIST_FILE, 'r') as black_list_file:
297             black_list_yaml = yaml.safe_load(black_list_file)
298
299         if not live_migration_supported():
300             func_list.append("no_live_migration")
301
302         if 'functionality' in black_list_yaml.keys():
303             for item in black_list_yaml['functionality']:
304                 functions = item['functions']
305                 for func in func_list:
306                     if func in functions:
307                         tests = item['tests']
308                         black_tests.extend(tests)
309     except:
310         logger.debug("Functionality exclusion not applied.")
311
312     return black_tests
313
314
315 def apply_blacklist(case_file_name, result_file_name):
316     logger.debug("Applying blacklist...")
317     cases_file = open(case_file_name, 'r')
318     result_file = open(result_file_name, 'w')
319
320     black_tests = list(set(excl_func() + excl_scenario()))
321
322     include = True
323     for cases_line in cases_file:
324         if include:
325             for black_tests_line in black_tests:
326                 if re.search(black_tests_line, cases_line.strip().rstrip(':')):
327                     include = False
328                     break
329             else:
330                 result_file.write(str(cases_line))
331         else:
332             if cases_line.isspace():
333                 include = True
334
335     cases_file.close()
336     result_file.close()
337
338
339 def prepare_test_list(test_name):
340     test_yaml_file_name = 'opnfv-{}.yaml'.format(test_name)
341     scenario_file_name = os.path.join(RALLY_SCENARIO_DIR, test_yaml_file_name)
342
343     if not os.path.exists(scenario_file_name):
344         if args.sanity:
345             scenario_file_name = os.path.join(SANITY_MODE_DIR,
346                                               test_yaml_file_name)
347         else:
348             scenario_file_name = os.path.join(FULL_MODE_DIR,
349                                               test_yaml_file_name)
350
351         if not os.path.exists(scenario_file_name):
352             logger.info("The scenario '%s' does not exist."
353                         % scenario_file_name)
354             exit(-1)
355
356     logger.debug('Scenario fetched from : {}'.format(scenario_file_name))
357     test_file_name = os.path.join(TEMP_DIR, test_yaml_file_name)
358
359     if not os.path.exists(TEMP_DIR):
360         os.makedirs(TEMP_DIR)
361
362     apply_blacklist(scenario_file_name, test_file_name)
363     return test_file_name
364
365
366 def file_is_empty(file_name):
367     try:
368         if os.stat(file_name).st_size > 0:
369             return False
370     except:
371         pass
372
373     return True
374
375
376 def run_task(test_name):
377     #
378     # the "main" function of the script who launch rally for a task
379     # :param test_name: name for the rally test
380     # :return: void
381     #
382     logger.info('Starting test scenario "{}" ...'.format(test_name))
383     start_time = time.time()
384
385     task_file = os.path.join(RALLY_DIR, 'task.yaml')
386     if not os.path.exists(task_file):
387         logger.error("Task file '%s' does not exist." % task_file)
388         exit(-1)
389
390     file_name = prepare_test_list(test_name)
391     if file_is_empty(file_name):
392         logger.info('No tests for scenario "{}"'.format(test_name))
393         return
394
395     cmd_line = ("rally task start --abort-on-sla-failure " +
396                 "--task {} ".format(task_file) +
397                 "--task-args \"{}\" ".format(build_task_args(test_name)))
398     logger.debug('running command line : {}'.format(cmd_line))
399
400     p = subprocess.Popen(cmd_line, stdout=subprocess.PIPE,
401                          stderr=RALLY_STDERR, shell=True)
402     output = get_output(p, test_name)
403     task_id = get_task_id(output)
404     logger.debug('task_id : {}'.format(task_id))
405
406     if task_id is None:
407         logger.error('Failed to retrieve task_id, validating task...')
408         cmd_line = ("rally task validate " +
409                     "--task {} ".format(task_file) +
410                     "--task-args \"{}\" ".format(build_task_args(test_name)))
411         logger.debug('running command line : {}'.format(cmd_line))
412         p = subprocess.Popen(cmd_line, stdout=subprocess.PIPE,
413                              stderr=subprocess.STDOUT, shell=True)
414         output = get_cmd_output(p)
415         logger.error("Task validation result:" + "\n" + output)
416         return
417
418     # check for result directory and create it otherwise
419     if not os.path.exists(RESULTS_DIR):
420         logger.debug('{} does not exist, we create it.'.format(RESULTS_DIR))
421         os.makedirs(RESULTS_DIR)
422
423     # write html report file
424     report_html_name = 'opnfv-{}.html'.format(test_name)
425     report_html_dir = os.path.join(RESULTS_DIR, report_html_name)
426     cmd_line = "rally task report {} --out {}".format(task_id,
427                                                       report_html_dir)
428
429     logger.debug('running command line : {}'.format(cmd_line))
430     os.popen(cmd_line)
431
432     # get and save rally operation JSON result
433     cmd_line = "rally task results %s" % task_id
434     logger.debug('running command line : {}'.format(cmd_line))
435     cmd = os.popen(cmd_line)
436     json_results = cmd.read()
437     report_json_name = 'opnfv-{}.json'.format(test_name)
438     report_json_dir = os.path.join(RESULTS_DIR, report_json_name)
439     with open(report_json_dir, 'w') as f:
440         logger.debug('saving json file')
441         f.write(json_results)
442
443     with open(report_json_dir) as json_file:
444         json_data = json.load(json_file)
445
446     """ parse JSON operation result """
447     status = "FAIL"
448     if task_succeed(json_results):
449         logger.info('Test scenario: "{}" OK.'.format(test_name) + "\n")
450         status = "PASS"
451     else:
452         logger.info('Test scenario: "{}" Failed.'.format(test_name) + "\n")
453
454     # Push results in payload of testcase
455     if args.report:
456         stop_time = time.time()
457         logger.debug("Push Rally detailed results into DB")
458         ft_utils.push_results_to_db("functest",
459                                     "Rally_details",
460                                     start_time,
461                                     stop_time,
462                                     status,
463                                     json_data)
464
465
466 def main():
467
468     GlobalVariables.nova_client = os_utils.get_nova_client()
469     GlobalVariables.neutron_client = os_utils.get_neutron_client()
470     cinder_client = os_utils.get_cinder_client()
471
472     start_time = time.time()
473
474     # configure script
475     if not (args.test_name in tests):
476         logger.error('argument not valid')
477         exit(-1)
478
479     GlobalVariables.SUMMARY = []
480
481     volume_types = os_utils.list_volume_types(cinder_client,
482                                               private=False)
483     if not volume_types:
484         volume_type = os_utils.create_volume_type(
485             cinder_client, CINDER_VOLUME_TYPE_NAME)
486         if not volume_type:
487             logger.error("Failed to create volume type...")
488             exit(-1)
489         else:
490             logger.debug("Volume type '%s' created succesfully..."
491                          % CINDER_VOLUME_TYPE_NAME)
492     else:
493         logger.debug("Using existing volume type(s)...")
494
495     image_exists, image_id = os_utils.get_or_create_image(GLANCE_IMAGE_NAME,
496                                                           GLANCE_IMAGE_PATH,
497                                                           GLANCE_IMAGE_FORMAT)
498     if not image_id:
499         exit(-1)
500
501     logger.debug("Creating network '%s'..." % RALLY_PRIVATE_NET_NAME)
502     GlobalVariables.network_dict = \
503         os_utils.create_shared_network_full(RALLY_PRIVATE_NET_NAME,
504                                             RALLY_PRIVATE_SUBNET_NAME,
505                                             RALLY_ROUTER_NAME,
506                                             RALLY_PRIVATE_SUBNET_CIDR)
507     if not GlobalVariables.network_dict:
508         exit(1)
509
510     if args.test_name == "all":
511         for test_name in tests:
512             if not (test_name == 'all' or
513                     test_name == 'vm'):
514                 run_task(test_name)
515     else:
516         logger.debug("Test name: " + args.test_name)
517         run_task(args.test_name)
518
519     report = ("\n"
520               "                                                              "
521               "\n"
522               "                     Rally Summary Report\n"
523               "\n"
524               "+===================+============+===============+===========+"
525               "\n"
526               "| Module            | Duration   | nb. Test Run  | Success   |"
527               "\n"
528               "+===================+============+===============+===========+"
529               "\n")
530     payload = []
531     stop_time = time.time()
532
533     # for each scenario we draw a row for the table
534     total_duration = 0.0
535     total_nb_tests = 0
536     total_success = 0.0
537     for s in GlobalVariables.SUMMARY:
538         name = "{0:<17}".format(s['test_name'])
539         duration = float(s['overall_duration'])
540         total_duration += duration
541         duration = time.strftime("%M:%S", time.gmtime(duration))
542         duration = "{0:<10}".format(duration)
543         nb_tests = "{0:<13}".format(s['nb_tests'])
544         total_nb_tests += int(s['nb_tests'])
545         success = "{0:<10}".format(str(s['success']) + '%')
546         total_success += float(s['success'])
547         report += ("" +
548                    "| " + name + " | " + duration + " | " +
549                    nb_tests + " | " + success + "|\n" +
550                    "+-------------------+------------"
551                    "+---------------+-----------+\n")
552         payload.append({'module': name,
553                         'details': {'duration': s['overall_duration'],
554                                     'nb tests': s['nb_tests'],
555                                     'success': s['success']}})
556
557     total_duration_str = time.strftime("%H:%M:%S", time.gmtime(total_duration))
558     total_duration_str2 = "{0:<10}".format(total_duration_str)
559     total_nb_tests_str = "{0:<13}".format(total_nb_tests)
560
561     if len(GlobalVariables.SUMMARY):
562         success_rate = total_success / len(GlobalVariables.SUMMARY)
563     else:
564         success_rate = 100
565     success_rate = "{:0.2f}".format(success_rate)
566     success_rate_str = "{0:<10}".format(str(success_rate) + '%')
567     report += "+===================+============+===============+===========+"
568     report += "\n"
569     report += ("| TOTAL:            | " + total_duration_str2 + " | " +
570                total_nb_tests_str + " | " + success_rate_str + "|\n")
571     report += "+===================+============+===============+===========+"
572     report += "\n"
573
574     logger.info("\n" + report)
575     payload.append({'summary': {'duration': total_duration,
576                                 'nb tests': total_nb_tests,
577                                 'nb success': success_rate}})
578
579     if args.sanity:
580         case_name = "rally_sanity"
581     else:
582         case_name = "rally_full"
583
584     # Evaluation of the success criteria
585     status = ft_utils.check_success_rate(case_name, success_rate)
586
587     exit_code = -1
588     if status == "PASS":
589         exit_code = 0
590
591     if args.report:
592         logger.debug("Pushing Rally summary into DB...")
593         ft_utils.push_results_to_db("functest",
594                                     case_name,
595                                     start_time,
596                                     stop_time,
597                                     status,
598                                     payload)
599     if args.noclean:
600         exit(exit_code)
601
602     if not image_exists:
603         logger.debug("Deleting image '%s' with ID '%s'..."
604                      % (GLANCE_IMAGE_NAME, image_id))
605         if not os_utils.delete_glance_image(GlobalVariables.nova_client,
606                                             image_id):
607             logger.error("Error deleting the glance image")
608
609     if not volume_types:
610         logger.debug("Deleting volume type '%s'..."
611                      % CINDER_VOLUME_TYPE_NAME)
612         if not os_utils.delete_volume_type(cinder_client, volume_type):
613             logger.error("Error in deleting volume type...")
614
615     exit(exit_code)
616
617
618 if __name__ == '__main__':
619     main()