Merge "Reverted the file permission"
[functest-xtesting.git] / functest / opnfv_tests / openstack / rally / run_rally-cert.py
1 #!/usr/bin/python
2 #
3 # Copyright (c) 2015 All rights reserved
4 # This program and the accompanying materials
5 # are made available under the terms of the Apache License, Version 2.0
6 # which accompanies this distribution, and is available at
7 #
8 # http://www.apache.org/licenses/LICENSE-2.0
9 #
10
11 import argparse
12 import json
13 import os
14 import re
15 import subprocess
16 import time
17
18 import iniparse
19 import yaml
20
21 from functest.utils.constants import CONST
22 import functest.utils.functest_logger as ft_logger
23 import functest.utils.functest_utils as ft_utils
24 import functest.utils.openstack_utils as os_utils
25
26 tests = ['authenticate', 'glance', 'cinder', 'heat', 'keystone',
27          'neutron', 'nova', 'quotas', 'requests', 'vm', 'all']
28 parser = argparse.ArgumentParser()
29 parser.add_argument("test_name",
30                     help="Module name to be tested. "
31                          "Possible values are : "
32                          "[ {d[0]} | {d[1]} | {d[2]} | {d[3]} | {d[4]} | "
33                          "{d[5]} | {d[6]} | {d[7]} | {d[8]} | {d[9]} | "
34                          "{d[10]} ] "
35                          "The 'all' value "
36                          "performs all possible test scenarios"
37                          .format(d=tests))
38
39 parser.add_argument("-d", "--debug", help="Debug mode", action="store_true")
40 parser.add_argument("-r", "--report",
41                     help="Create json result file",
42                     action="store_true")
43 parser.add_argument("-s", "--smoke",
44                     help="Smoke test mode",
45                     action="store_true")
46 parser.add_argument("-v", "--verbose",
47                     help="Print verbose info about the progress",
48                     action="store_true")
49 parser.add_argument("-n", "--noclean",
50                     help="Don't clean the created resources for this test.",
51                     action="store_true")
52 parser.add_argument("-z", "--sanity",
53                     help="Sanity test mode, execute only a subset of tests",
54                     action="store_true")
55
56 args = parser.parse_args()
57
58
59 if args.verbose:
60     RALLY_STDERR = subprocess.STDOUT
61 else:
62     RALLY_STDERR = open(os.devnull, 'w')
63
64 """ logging configuration """
65 logger = ft_logger.Logger("run_rally-cert").getLogger()
66
67 RALLY_DIR = os.path.join(CONST.dir_repo_functest, CONST.dir_rally)
68 RALLY_SCENARIO_DIR = os.path.join(RALLY_DIR, "scenario")
69 SANITY_MODE_DIR = os.path.join(RALLY_SCENARIO_DIR, "sanity")
70 FULL_MODE_DIR = os.path.join(RALLY_SCENARIO_DIR, "full")
71 TEMPLATE_DIR = os.path.join(RALLY_SCENARIO_DIR, "templates")
72 SUPPORT_DIR = os.path.join(RALLY_SCENARIO_DIR, "support")
73 TEMP_DIR = os.path.join(RALLY_DIR, "var")
74 BLACKLIST_FILE = os.path.join(RALLY_DIR, "blacklist.txt")
75
76 FLAVOR_NAME = "m1.tiny"
77 USERS_AMOUNT = 2
78 TENANTS_AMOUNT = 3
79 ITERATIONS_AMOUNT = 10
80 CONCURRENCY = 4
81
82 RESULTS_DIR = os.path.join(CONST.dir_results, 'rally')
83 TEMPEST_CONF_FILE = os.path.join(CONST.dir_results,
84                                  'tempest/tempest.conf')
85
86 RALLY_PRIVATE_NET_NAME = CONST.rally_network_name
87 RALLY_PRIVATE_SUBNET_NAME = CONST.rally_subnet_name
88 RALLY_PRIVATE_SUBNET_CIDR = CONST.rally_subnet_cidr
89 RALLY_ROUTER_NAME = CONST.rally_router_name
90
91 GLANCE_IMAGE_NAME = CONST.openstack_image_name
92 GLANCE_IMAGE_FILENAME = CONST.openstack_image_file_name
93 GLANCE_IMAGE_FORMAT = CONST.openstack_image_disk_format
94 GLANCE_IMAGE_PATH = os.path.join(CONST.dir_functest_data,
95                                  GLANCE_IMAGE_FILENAME)
96 CINDER_VOLUME_TYPE_NAME = "volume_test"
97
98
99 class GlobalVariables:
100     SUMMARY = []
101     neutron_client = None
102     network_dict = {}
103
104
105 def get_task_id(cmd_raw):
106     """
107     get task id from command rally result
108     :param cmd_raw:
109     :return: task_id as string
110     """
111     taskid_re = re.compile('^Task +(.*): started$')
112     for line in cmd_raw.splitlines(True):
113         line = line.strip()
114         match = taskid_re.match(line)
115         if match:
116             return match.group(1)
117     return None
118
119
120 def task_succeed(json_raw):
121     """
122     Parse JSON from rally JSON results
123     :param json_raw:
124     :return: Bool
125     """
126     rally_report = json.loads(json_raw)
127     for report in rally_report:
128         if report is None or report.get('result') is None:
129             return False
130
131         for result in report.get('result'):
132             if result is None or len(result.get('error')) > 0:
133                 return False
134
135     return True
136
137
138 def live_migration_supported():
139     config = iniparse.ConfigParser()
140     if (config.read(TEMPEST_CONF_FILE) and
141             config.has_section('compute-feature-enabled') and
142             config.has_option('compute-feature-enabled', 'live_migration')):
143         return config.getboolean('compute-feature-enabled', 'live_migration')
144
145     return False
146
147
148 def build_task_args(test_file_name):
149     task_args = {'service_list': [test_file_name]}
150     task_args['image_name'] = GLANCE_IMAGE_NAME
151     task_args['flavor_name'] = FLAVOR_NAME
152     task_args['glance_image_location'] = GLANCE_IMAGE_PATH
153     task_args['glance_image_format'] = GLANCE_IMAGE_FORMAT
154     task_args['tmpl_dir'] = TEMPLATE_DIR
155     task_args['sup_dir'] = SUPPORT_DIR
156     task_args['users_amount'] = USERS_AMOUNT
157     task_args['tenants_amount'] = TENANTS_AMOUNT
158     task_args['use_existing_users'] = False
159     task_args['iterations'] = ITERATIONS_AMOUNT
160     task_args['concurrency'] = CONCURRENCY
161
162     if args.sanity:
163         task_args['smoke'] = True
164     else:
165         task_args['smoke'] = args.smoke
166
167     ext_net = os_utils.get_external_net(GlobalVariables.neutron_client)
168     if ext_net:
169         task_args['floating_network'] = str(ext_net)
170     else:
171         task_args['floating_network'] = ''
172
173     net_id = GlobalVariables.network_dict['net_id']
174     task_args['netid'] = str(net_id)
175
176     auth_url = CONST.OS_AUTH_URL
177     if auth_url is not None:
178         task_args['request_url'] = auth_url.rsplit(":", 1)[0]
179     else:
180         task_args['request_url'] = ''
181
182     return task_args
183
184
185 def get_output(proc, test_name):
186     result = ""
187     nb_tests = 0
188     overall_duration = 0.0
189     success = 0.0
190     nb_totals = 0
191
192     while proc.poll() is None:
193         line = proc.stdout.readline()
194         if args.verbose:
195             result += line
196         else:
197             if ("Load duration" in line or
198                     "started" in line or
199                     "finished" in line or
200                     " Preparing" in line or
201                     "+-" in line or
202                     "|" in line):
203                 result += line
204             elif "test scenario" in line:
205                 result += "\n" + line
206             elif "Full duration" in line:
207                 result += line + "\n\n"
208
209         # parse output for summary report
210         if ("| " in line and
211                 "| action" not in line and
212                 "| Starting" not in line and
213                 "| Completed" not in line and
214                 "| ITER" not in line and
215                 "|   " not in line and
216                 "| total" not in line):
217             nb_tests += 1
218         elif "| total" in line:
219             percentage = ((line.split('|')[8]).strip(' ')).strip('%')
220             try:
221                 success += float(percentage)
222             except ValueError:
223                 logger.info('Percentage error: %s, %s' % (percentage, line))
224             nb_totals += 1
225         elif "Full duration" in line:
226             duration = line.split(': ')[1]
227             try:
228                 overall_duration += float(duration)
229             except ValueError:
230                 logger.info('Duration error: %s, %s' % (duration, line))
231
232     overall_duration = "{:10.2f}".format(overall_duration)
233     if nb_totals == 0:
234         success_avg = 0
235     else:
236         success_avg = "{:0.2f}".format(success / nb_totals)
237
238     scenario_summary = {'test_name': test_name,
239                         'overall_duration': overall_duration,
240                         'nb_tests': nb_tests,
241                         'success': success_avg}
242     GlobalVariables.SUMMARY.append(scenario_summary)
243
244     logger.debug("\n" + result)
245
246     return result
247
248
249 def get_cmd_output(proc):
250     result = ""
251
252     while proc.poll() is None:
253         line = proc.stdout.readline()
254         result += line
255
256     return result
257
258
259 def excl_scenario():
260     black_tests = []
261
262     try:
263         with open(BLACKLIST_FILE, 'r') as black_list_file:
264             black_list_yaml = yaml.safe_load(black_list_file)
265
266         installer_type = CONST.INSTALLER_TYPE
267         deploy_scenario = CONST.DEPLOY_SCENARIO
268         if (bool(installer_type) * bool(deploy_scenario)):
269             if 'scenario' in black_list_yaml.keys():
270                 for item in black_list_yaml['scenario']:
271                     scenarios = item['scenarios']
272                     installers = item['installers']
273                     if (deploy_scenario in scenarios and
274                             installer_type in installers):
275                         tests = item['tests']
276                         black_tests.extend(tests)
277     except:
278         logger.debug("Scenario exclusion not applied.")
279
280     return black_tests
281
282
283 def excl_func():
284     black_tests = []
285     func_list = []
286
287     try:
288         with open(BLACKLIST_FILE, 'r') as black_list_file:
289             black_list_yaml = yaml.safe_load(black_list_file)
290
291         if not live_migration_supported():
292             func_list.append("no_live_migration")
293
294         if 'functionality' in black_list_yaml.keys():
295             for item in black_list_yaml['functionality']:
296                 functions = item['functions']
297                 for func in func_list:
298                     if func in functions:
299                         tests = item['tests']
300                         black_tests.extend(tests)
301     except:
302         logger.debug("Functionality exclusion not applied.")
303
304     return black_tests
305
306
307 def apply_blacklist(case_file_name, result_file_name):
308     logger.debug("Applying blacklist...")
309     cases_file = open(case_file_name, 'r')
310     result_file = open(result_file_name, 'w')
311
312     black_tests = list(set(excl_func() + excl_scenario()))
313
314     include = True
315     for cases_line in cases_file:
316         if include:
317             for black_tests_line in black_tests:
318                 if re.search(black_tests_line, cases_line.strip().rstrip(':')):
319                     include = False
320                     break
321             else:
322                 result_file.write(str(cases_line))
323         else:
324             if cases_line.isspace():
325                 include = True
326
327     cases_file.close()
328     result_file.close()
329
330
331 def prepare_test_list(test_name):
332     test_yaml_file_name = 'opnfv-{}.yaml'.format(test_name)
333     scenario_file_name = os.path.join(RALLY_SCENARIO_DIR, test_yaml_file_name)
334
335     if not os.path.exists(scenario_file_name):
336         if args.sanity:
337             scenario_file_name = os.path.join(SANITY_MODE_DIR,
338                                               test_yaml_file_name)
339         else:
340             scenario_file_name = os.path.join(FULL_MODE_DIR,
341                                               test_yaml_file_name)
342
343         if not os.path.exists(scenario_file_name):
344             logger.info("The scenario '%s' does not exist."
345                         % scenario_file_name)
346             exit(-1)
347
348     logger.debug('Scenario fetched from : {}'.format(scenario_file_name))
349     test_file_name = os.path.join(TEMP_DIR, test_yaml_file_name)
350
351     if not os.path.exists(TEMP_DIR):
352         os.makedirs(TEMP_DIR)
353
354     apply_blacklist(scenario_file_name, test_file_name)
355     return test_file_name
356
357
358 def file_is_empty(file_name):
359     try:
360         if os.stat(file_name).st_size > 0:
361             return False
362     except:
363         pass
364
365     return True
366
367
368 def run_task(test_name):
369     #
370     # the "main" function of the script who launch rally for a task
371     # :param test_name: name for the rally test
372     # :return: void
373     #
374     logger.info('Starting test scenario "{}" ...'.format(test_name))
375     start_time = time.time()
376
377     task_file = os.path.join(RALLY_DIR, 'task.yaml')
378     if not os.path.exists(task_file):
379         logger.error("Task file '%s' does not exist." % task_file)
380         exit(-1)
381
382     file_name = prepare_test_list(test_name)
383     if file_is_empty(file_name):
384         logger.info('No tests for scenario "{}"'.format(test_name))
385         return
386
387     cmd_line = ("rally task start --abort-on-sla-failure "
388                 "--task {0} "
389                 "--task-args \"{1}\""
390                 .format(task_file, build_task_args(test_name)))
391     logger.debug('running command line: {}'.format(cmd_line))
392
393     p = subprocess.Popen(cmd_line, stdout=subprocess.PIPE,
394                          stderr=RALLY_STDERR, shell=True)
395     output = get_output(p, test_name)
396     task_id = get_task_id(output)
397     logger.debug('task_id : {}'.format(task_id))
398
399     if task_id is None:
400         logger.error('Failed to retrieve task_id, validating task...')
401         cmd_line = ("rally task validate "
402                     "--task {0} "
403                     "--task-args \"{1}\""
404                     .format(task_file, build_task_args(test_name)))
405         logger.debug('running command line: {}'.format(cmd_line))
406         p = subprocess.Popen(cmd_line, stdout=subprocess.PIPE,
407                              stderr=subprocess.STDOUT, shell=True)
408         output = get_cmd_output(p)
409         logger.error("Task validation result:" + "\n" + output)
410         return
411
412     # check for result directory and create it otherwise
413     if not os.path.exists(RESULTS_DIR):
414         logger.debug('{} does not exist, we create it.'.format(RESULTS_DIR))
415         os.makedirs(RESULTS_DIR)
416
417     # write html report file
418     report_html_name = 'opnfv-{}.html'.format(test_name)
419     report_html_dir = os.path.join(RESULTS_DIR, report_html_name)
420     cmd_line = "rally task report {} --out {}".format(task_id,
421                                                       report_html_dir)
422
423     logger.debug('running command line: {}'.format(cmd_line))
424     os.popen(cmd_line)
425
426     # get and save rally operation JSON result
427     cmd_line = "rally task results %s" % task_id
428     logger.debug('running command line: {}'.format(cmd_line))
429     cmd = os.popen(cmd_line)
430     json_results = cmd.read()
431     report_json_name = 'opnfv-{}.json'.format(test_name)
432     report_json_dir = os.path.join(RESULTS_DIR, report_json_name)
433     with open(report_json_dir, 'w') as f:
434         logger.debug('saving json file')
435         f.write(json_results)
436
437     with open(report_json_dir) as json_file:
438         json_data = json.load(json_file)
439
440     """ parse JSON operation result """
441     status = "FAIL"
442     if task_succeed(json_results):
443         logger.info('Test scenario: "{}" OK.'.format(test_name) + "\n")
444         status = "PASS"
445     else:
446         logger.info('Test scenario: "{}" Failed.'.format(test_name) + "\n")
447
448     # Push results in payload of testcase
449     if args.report:
450         stop_time = time.time()
451         logger.debug("Push Rally detailed results into DB")
452         ft_utils.push_results_to_db("functest",
453                                     "Rally_details",
454                                     start_time,
455                                     stop_time,
456                                     status,
457                                     json_data)
458
459
460 def main():
461
462     GlobalVariables.nova_client = os_utils.get_nova_client()
463     GlobalVariables.neutron_client = os_utils.get_neutron_client()
464     cinder_client = os_utils.get_cinder_client()
465
466     start_time = time.time()
467
468     # configure script
469     if not (args.test_name in tests):
470         logger.error('argument not valid')
471         exit(-1)
472
473     GlobalVariables.SUMMARY = []
474
475     volume_types = os_utils.list_volume_types(cinder_client,
476                                               private=False)
477     if not volume_types:
478         volume_type = os_utils.create_volume_type(
479             cinder_client, CINDER_VOLUME_TYPE_NAME)
480         if not volume_type:
481             logger.error("Failed to create volume type...")
482             exit(-1)
483         else:
484             logger.debug("Volume type '%s' created succesfully..."
485                          % CINDER_VOLUME_TYPE_NAME)
486     else:
487         logger.debug("Using existing volume type(s)...")
488
489     image_exists, image_id = os_utils.get_or_create_image(GLANCE_IMAGE_NAME,
490                                                           GLANCE_IMAGE_PATH,
491                                                           GLANCE_IMAGE_FORMAT)
492     if not image_id:
493         exit(-1)
494
495     logger.debug("Creating network '%s'..." % RALLY_PRIVATE_NET_NAME)
496     GlobalVariables.network_dict = \
497         os_utils.create_shared_network_full(RALLY_PRIVATE_NET_NAME,
498                                             RALLY_PRIVATE_SUBNET_NAME,
499                                             RALLY_ROUTER_NAME,
500                                             RALLY_PRIVATE_SUBNET_CIDR)
501     if not GlobalVariables.network_dict:
502         exit(1)
503
504     if args.test_name == "all":
505         for test_name in tests:
506             if not (test_name == 'all' or
507                     test_name == 'vm'):
508                 run_task(test_name)
509     else:
510         logger.debug("Test name: " + args.test_name)
511         run_task(args.test_name)
512
513     report = ("\n"
514               "                                                              "
515               "\n"
516               "                     Rally Summary Report\n"
517               "\n"
518               "+===================+============+===============+===========+"
519               "\n"
520               "| Module            | Duration   | nb. Test Run  | Success   |"
521               "\n"
522               "+===================+============+===============+===========+"
523               "\n")
524     payload = []
525     stop_time = time.time()
526
527     # for each scenario we draw a row for the table
528     total_duration = 0.0
529     total_nb_tests = 0
530     total_success = 0.0
531     for s in GlobalVariables.SUMMARY:
532         name = "{0:<17}".format(s['test_name'])
533         duration = float(s['overall_duration'])
534         total_duration += duration
535         duration = time.strftime("%M:%S", time.gmtime(duration))
536         duration = "{0:<10}".format(duration)
537         nb_tests = "{0:<13}".format(s['nb_tests'])
538         total_nb_tests += int(s['nb_tests'])
539         success = "{0:<10}".format(str(s['success']) + '%')
540         total_success += float(s['success'])
541         report += ("" +
542                    "| " + name + " | " + duration + " | " +
543                    nb_tests + " | " + success + "|\n" +
544                    "+-------------------+------------"
545                    "+---------------+-----------+\n")
546         payload.append({'module': name,
547                         'details': {'duration': s['overall_duration'],
548                                     'nb tests': s['nb_tests'],
549                                     'success': s['success']}})
550
551     total_duration_str = time.strftime("%H:%M:%S", time.gmtime(total_duration))
552     total_duration_str2 = "{0:<10}".format(total_duration_str)
553     total_nb_tests_str = "{0:<13}".format(total_nb_tests)
554
555     if len(GlobalVariables.SUMMARY):
556         success_rate = total_success / len(GlobalVariables.SUMMARY)
557     else:
558         success_rate = 100
559     success_rate = "{:0.2f}".format(success_rate)
560     success_rate_str = "{0:<10}".format(str(success_rate) + '%')
561     report += "+===================+============+===============+===========+"
562     report += "\n"
563     report += ("| TOTAL:            | " + total_duration_str2 + " | " +
564                total_nb_tests_str + " | " + success_rate_str + "|\n")
565     report += "+===================+============+===============+===========+"
566     report += "\n"
567
568     logger.info("\n" + report)
569     payload.append({'summary': {'duration': total_duration,
570                                 'nb tests': total_nb_tests,
571                                 'nb success': success_rate}})
572
573     if args.sanity:
574         case_name = "rally_sanity"
575     else:
576         case_name = "rally_full"
577
578     # Evaluation of the success criteria
579     status = ft_utils.check_success_rate(case_name, success_rate)
580
581     exit_code = -1
582     if status == "PASS":
583         exit_code = 0
584
585     if args.report:
586         logger.debug("Pushing Rally summary into DB...")
587         ft_utils.push_results_to_db("functest",
588                                     case_name,
589                                     start_time,
590                                     stop_time,
591                                     status,
592                                     payload)
593     if args.noclean:
594         exit(exit_code)
595
596     if not image_exists:
597         logger.debug("Deleting image '%s' with ID '%s'..."
598                      % (GLANCE_IMAGE_NAME, image_id))
599         if not os_utils.delete_glance_image(GlobalVariables.nova_client,
600                                             image_id):
601             logger.error("Error deleting the glance image")
602
603     if not volume_types:
604         logger.debug("Deleting volume type '%s'..."
605                      % CINDER_VOLUME_TYPE_NAME)
606         if not os_utils.delete_volume_type(cinder_client, volume_type):
607             logger.error("Error in deleting volume type...")
608
609     exit(exit_code)
610
611
612 if __name__ == '__main__':
613     main()