[Promise test case] Use proper way to get parameters from yaml
[functest.git] / testcases / OpenStack / rally / run_rally-cert.py
1 #!/usr/bin/env python
2 #
3 # Copyright (c) 2015 Orange
4 # guyrodrigue.koffi@orange.com
5 # morgan.richomme@orange.com
6 # All rights reserved. This program and the accompanying materials
7 # are made available under the terms of the Apache License, Version 2.0
8 # which accompanies this distribution, and is available at
9 # http://www.apache.org/licenses/LICENSE-2.0
10 #
11 # 0.1 (05/2015) initial commit
12 # 0.2 (28/09/2015) extract Tempest, format json result, add ceilometer suite
13 # 0.3 (19/10/2015) remove Tempest from run_rally
14 # and push result into test DB
15 #
16 """ tests configuration """
17
18 import json
19 import os
20 import re
21 import subprocess
22 import time
23
24 import argparse
25 import functest.utils.functest_logger as ft_logger
26 import functest.utils.functest_utils as functest_utils
27 import functest.utils.openstack_utils as os_utils
28 import iniparse
29 from functest.utils.functest_utils import FUNCTEST_REPO as REPO_PATH
30
31 tests = ['authenticate', 'glance', 'cinder', 'heat', 'keystone',
32          'neutron', 'nova', 'quotas', 'requests', 'vm', 'all']
33 parser = argparse.ArgumentParser()
34 parser.add_argument("test_name",
35                     help="Module name to be tested. "
36                          "Possible values are : "
37                          "[ {d[0]} | {d[1]} | {d[2]} | {d[3]} | {d[4]} | "
38                          "{d[5]} | {d[6]} | {d[7]} | {d[8]} | {d[9]} | "
39                          "{d[10]} ] "
40                          "The 'all' value "
41                          "performs all possible test scenarios"
42                          .format(d=tests))
43
44 parser.add_argument("-d", "--debug", help="Debug mode", action="store_true")
45 parser.add_argument("-r", "--report",
46                     help="Create json result file",
47                     action="store_true")
48 parser.add_argument("-s", "--smoke",
49                     help="Smoke test mode",
50                     action="store_true")
51 parser.add_argument("-v", "--verbose",
52                     help="Print verbose info about the progress",
53                     action="store_true")
54 parser.add_argument("-n", "--noclean",
55                     help="Don't clean the created resources for this test.",
56                     action="store_true")
57 parser.add_argument("-z", "--sanity",
58                     help="Sanity test mode, execute only a subset of tests",
59                     action="store_true")
60
61 args = parser.parse_args()
62
63 network_dict = {}
64
65 if args.verbose:
66     RALLY_STDERR = subprocess.STDOUT
67 else:
68     RALLY_STDERR = open(os.devnull, 'w')
69
70 """ logging configuration """
71 logger = ft_logger.Logger("run_rally").getLogger()
72
73
74 functest_yaml = functest_utils.get_functest_yaml()
75
76 HOME = os.environ['HOME'] + "/"
77 RALLY_DIR = REPO_PATH + '/' + functest_yaml.get("general").get(
78     "directories").get("dir_rally")
79 TEMPLATE_DIR = RALLY_DIR + "scenario/templates"
80 SUPPORT_DIR = RALLY_DIR + "scenario/support"
81
82 FLAVOR_NAME = "m1.tiny"
83 USERS_AMOUNT = 2
84 TENANTS_AMOUNT = 3
85 ITERATIONS_AMOUNT = 10
86 CONCURRENCY = 4
87
88 RESULTS_DIR = functest_yaml.get("general").get("directories").get(
89     "dir_rally_res")
90 TEMPEST_CONF_FILE = functest_yaml.get("general").get("directories").get(
91     "dir_results") + '/tempest/tempest.conf'
92 TEST_DB = functest_yaml.get("results").get("test_db_url")
93
94 PRIVATE_NET_NAME = functest_yaml.get("rally").get("network_name")
95 PRIVATE_SUBNET_NAME = functest_yaml.get("rally").get("subnet_name")
96 PRIVATE_SUBNET_CIDR = functest_yaml.get("rally").get("subnet_cidr")
97 ROUTER_NAME = functest_yaml.get("rally").get("router_name")
98
99 GLANCE_IMAGE_NAME = functest_yaml.get("general").get("openstack").get(
100     "image_name")
101 GLANCE_IMAGE_FILENAME = functest_yaml.get("general").get("openstack").get(
102     "image_file_name")
103 GLANCE_IMAGE_FORMAT = functest_yaml.get("general").get("openstack").get(
104     "image_disk_format")
105 GLANCE_IMAGE_PATH = functest_yaml.get("general").get("directories").get(
106     "dir_functest_data") + "/" + GLANCE_IMAGE_FILENAME
107
108 CINDER_VOLUME_TYPE_NAME = "volume_test"
109
110
111 SUMMARY = []
112 neutron_client = None
113
114
115 def get_task_id(cmd_raw):
116     """
117     get task id from command rally result
118     :param cmd_raw:
119     :return: task_id as string
120     """
121     taskid_re = re.compile('^Task +(.*): started$')
122     for line in cmd_raw.splitlines(True):
123         line = line.strip()
124         match = taskid_re.match(line)
125         if match:
126             return match.group(1)
127     return None
128
129
130 def task_succeed(json_raw):
131     """
132     Parse JSON from rally JSON results
133     :param json_raw:
134     :return: Bool
135     """
136     rally_report = json.loads(json_raw)
137     for report in rally_report:
138         if report is None or report.get('result') is None:
139             return False
140
141         for result in report.get('result'):
142             if result is None or len(result.get('error')) > 0:
143                 return False
144
145     return True
146
147
148 def live_migration_supported():
149     config = iniparse.ConfigParser()
150     if (config.read(TEMPEST_CONF_FILE) and
151             config.has_section('compute-feature-enabled') and
152             config.has_option('compute-feature-enabled', 'live_migration')):
153         return config.getboolean('compute-feature-enabled', 'live_migration')
154
155     return False
156
157
158 def build_task_args(test_file_name):
159     task_args = {'service_list': [test_file_name]}
160     task_args['image_name'] = GLANCE_IMAGE_NAME
161     task_args['flavor_name'] = FLAVOR_NAME
162     task_args['glance_image_location'] = GLANCE_IMAGE_PATH
163     task_args['tmpl_dir'] = TEMPLATE_DIR
164     task_args['sup_dir'] = SUPPORT_DIR
165     task_args['users_amount'] = USERS_AMOUNT
166     task_args['tenants_amount'] = TENANTS_AMOUNT
167     task_args['use_existing_users'] = False
168     task_args['iterations'] = ITERATIONS_AMOUNT
169     task_args['concurrency'] = CONCURRENCY
170
171     if args.sanity:
172         task_args['full_mode'] = False
173         task_args['smoke'] = True
174     else:
175         task_args['full_mode'] = True
176         task_args['smoke'] = args.smoke
177
178     ext_net = os_utils.get_external_net(neutron_client)
179     if ext_net:
180         task_args['floating_network'] = str(ext_net)
181     else:
182         task_args['floating_network'] = ''
183
184     net_id = network_dict['net_id']
185     task_args['netid'] = str(net_id)
186     task_args['live_migration'] = live_migration_supported()
187
188     auth_url = os.getenv('OS_AUTH_URL')
189     if auth_url is not None:
190         task_args['request_url'] = auth_url.rsplit(":", 1)[0]
191     else:
192         task_args['request_url'] = ''
193
194     return task_args
195
196
197 def get_output(proc, test_name):
198     global SUMMARY
199     result = ""
200     nb_tests = 0
201     overall_duration = 0.0
202     success = 0.0
203     nb_totals = 0
204
205     while proc.poll() is None:
206         line = proc.stdout.readline()
207         if args.verbose:
208             result += line
209         else:
210             if ("Load duration" in line or
211                     "started" in line or
212                     "finished" in line or
213                     " Preparing" in line or
214                     "+-" in line or
215                     "|" in line):
216                 result += line
217             elif "test scenario" in line:
218                 result += "\n" + line
219             elif "Full duration" in line:
220                 result += line + "\n\n"
221
222         # parse output for summary report
223         if ("| " in line and
224                 "| action" not in line and
225                 "| Starting" not in line and
226                 "| Completed" not in line and
227                 "| ITER" not in line and
228                 "|   " not in line and
229                 "| total" not in line):
230             nb_tests += 1
231         elif "| total" in line:
232             percentage = ((line.split('|')[8]).strip(' ')).strip('%')
233             try:
234                 success += float(percentage)
235             except ValueError:
236                 logger.info('Percentage error: %s, %s' % (percentage, line))
237             nb_totals += 1
238         elif "Full duration" in line:
239             duration = line.split(': ')[1]
240             try:
241                 overall_duration += float(duration)
242             except ValueError:
243                 logger.info('Duration error: %s, %s' % (duration, line))
244
245     overall_duration = "{:10.2f}".format(overall_duration)
246     if nb_totals == 0:
247         success_avg = 0
248     else:
249         success_avg = "{:0.2f}".format(success / nb_totals)
250
251     scenario_summary = {'test_name': test_name,
252                         'overall_duration': overall_duration,
253                         'nb_tests': nb_tests,
254                         'success': success_avg}
255     SUMMARY.append(scenario_summary)
256
257     logger.debug("\n" + result)
258
259     return result
260
261
262 def get_cmd_output(proc):
263     result = ""
264
265     while proc.poll() is None:
266         line = proc.stdout.readline()
267         result += line
268
269     return result
270
271
272 def run_task(test_name):
273     #
274     # the "main" function of the script who launch rally for a task
275     # :param test_name: name for the rally test
276     # :return: void
277     #
278     global SUMMARY
279     logger.info('Starting test scenario "{}" ...'.format(test_name))
280     start_time = time.time()
281
282     task_file = '{}task.yaml'.format(RALLY_DIR)
283     if not os.path.exists(task_file):
284         logger.error("Task file '%s' does not exist." % task_file)
285         exit(-1)
286
287     test_file_name = '{}opnfv-{}.yaml'.format(RALLY_DIR + "scenario/",
288                                               test_name)
289     if not os.path.exists(test_file_name):
290         logger.error("The scenario '%s' does not exist." % test_file_name)
291         exit(-1)
292
293     logger.debug('Scenario fetched from : {}'.format(test_file_name))
294
295     cmd_line = ("rally task start --abort-on-sla-failure " +
296                 "--task {} ".format(task_file) +
297                 "--task-args \"{}\" ".format(build_task_args(test_name)))
298     logger.debug('running command line : {}'.format(cmd_line))
299
300     p = subprocess.Popen(cmd_line, stdout=subprocess.PIPE,
301                          stderr=RALLY_STDERR, shell=True)
302     output = get_output(p, test_name)
303     task_id = get_task_id(output)
304     logger.debug('task_id : {}'.format(task_id))
305
306     if task_id is None:
307         logger.error('Failed to retrieve task_id, validating task...')
308         cmd_line = ("rally task validate " +
309                     "--task {} ".format(task_file) +
310                     "--task-args \"{}\" ".format(build_task_args(test_name)))
311         logger.debug('running command line : {}'.format(cmd_line))
312         p = subprocess.Popen(cmd_line, stdout=subprocess.PIPE,
313                              stderr=subprocess.STDOUT, shell=True)
314         output = get_cmd_output(p)
315         logger.error("Task validation result:" + "\n" + output)
316         return
317
318     # check for result directory and create it otherwise
319     if not os.path.exists(RESULTS_DIR):
320         logger.debug('%s does not exist, we create it.'.format(RESULTS_DIR))
321         os.makedirs(RESULTS_DIR)
322
323     # write html report file
324     report_file_name = '{}opnfv-{}.html'.format(RESULTS_DIR, test_name)
325     cmd_line = "rally task report {} --out {}".format(task_id,
326                                                       report_file_name)
327
328     logger.debug('running command line : {}'.format(cmd_line))
329     os.popen(cmd_line)
330
331     # get and save rally operation JSON result
332     cmd_line = "rally task results %s" % task_id
333     logger.debug('running command line : {}'.format(cmd_line))
334     cmd = os.popen(cmd_line)
335     json_results = cmd.read()
336     with open('{}opnfv-{}.json'.format(RESULTS_DIR, test_name), 'w') as f:
337         logger.debug('saving json file')
338         f.write(json_results)
339
340     with open('{}opnfv-{}.json'
341               .format(RESULTS_DIR, test_name)) as json_file:
342         json_data = json.load(json_file)
343
344     """ parse JSON operation result """
345     status = "FAIL"
346     if task_succeed(json_results):
347         logger.info('Test scenario: "{}" OK.'.format(test_name) + "\n")
348         status = "PASS"
349     else:
350         logger.info('Test scenario: "{}" Failed.'.format(test_name) + "\n")
351
352     # Push results in payload of testcase
353     if args.report:
354         stop_time = time.time()
355         logger.debug("Push Rally detailed results into DB")
356         functest_utils.push_results_to_db("functest",
357                                           "Rally_details",
358                                           start_time,
359                                           stop_time,
360                                           status,
361                                           json_data)
362
363
364 def main():
365     global SUMMARY
366     global network_dict
367     global neutron_client
368
369     nova_client = os_utils.get_nova_client()
370     neutron_client = os_utils.get_neutron_client()
371     cinder_client = os_utils.get_cinder_client()
372
373     start_time = time.time()
374
375     # configure script
376     if not (args.test_name in tests):
377         logger.error('argument not valid')
378         exit(-1)
379
380     SUMMARY = []
381
382     volume_types = os_utils.list_volume_types(cinder_client,
383                                               private=False)
384     if not volume_types:
385         volume_type = os_utils.create_volume_type(
386             cinder_client, CINDER_VOLUME_TYPE_NAME)
387         if not volume_type:
388             logger.error("Failed to create volume type...")
389             exit(-1)
390         else:
391             logger.debug("Volume type '%s' created succesfully..."
392                          % CINDER_VOLUME_TYPE_NAME)
393     else:
394         logger.debug("Using existing volume type(s)...")
395
396     image_exists, image_id = os_utils.get_or_create_image(GLANCE_IMAGE_NAME,
397                                                           GLANCE_IMAGE_PATH,
398                                                           GLANCE_IMAGE_FORMAT)
399     if not image_id:
400         exit(-1)
401
402     logger.debug("Creating network '%s'..." % PRIVATE_NET_NAME)
403     network_dict = os_utils.create_shared_network_full(PRIVATE_NET_NAME,
404                                                        PRIVATE_SUBNET_NAME,
405                                                        ROUTER_NAME,
406                                                        PRIVATE_SUBNET_CIDR)
407     if not network_dict:
408         exit(1)
409
410     if args.test_name == "all":
411         for test_name in tests:
412             if not (test_name == 'all' or
413                     test_name == 'vm'):
414                 run_task(test_name)
415     else:
416         logger.debug("Test name: " + args.test_name)
417         run_task(args.test_name)
418
419     report = ("\n"
420               "                                                              "
421               "\n"
422               "                     Rally Summary Report\n"
423               "\n"
424               "+===================+============+===============+===========+"
425               "\n"
426               "| Module            | Duration   | nb. Test Run  | Success   |"
427               "\n"
428               "+===================+============+===============+===========+"
429               "\n")
430     payload = []
431     stop_time = time.time()
432
433     # for each scenario we draw a row for the table
434     total_duration = 0.0
435     total_nb_tests = 0
436     total_success = 0.0
437     for s in SUMMARY:
438         name = "{0:<17}".format(s['test_name'])
439         duration = float(s['overall_duration'])
440         total_duration += duration
441         duration = time.strftime("%M:%S", time.gmtime(duration))
442         duration = "{0:<10}".format(duration)
443         nb_tests = "{0:<13}".format(s['nb_tests'])
444         total_nb_tests += int(s['nb_tests'])
445         success = "{0:<10}".format(str(s['success']) + '%')
446         total_success += float(s['success'])
447         report += ("" +
448                    "| " + name + " | " + duration + " | " +
449                    nb_tests + " | " + success + "|\n" +
450                    "+-------------------+------------"
451                    "+---------------+-----------+\n")
452         payload.append({'module': name,
453                         'details': {'duration': s['overall_duration'],
454                                     'nb tests': s['nb_tests'],
455                                     'success': s['success']}})
456
457     total_duration_str = time.strftime("%H:%M:%S", time.gmtime(total_duration))
458     total_duration_str2 = "{0:<10}".format(total_duration_str)
459     total_nb_tests_str = "{0:<13}".format(total_nb_tests)
460     success_rate = "{:0.2f}".format(total_success / len(SUMMARY))
461     success_rate_str = "{0:<10}".format(str(success_rate) + '%')
462     report += "+===================+============+===============+===========+"
463     report += "\n"
464     report += ("| TOTAL:            | " + total_duration_str2 + " | " +
465                total_nb_tests_str + " | " + success_rate_str + "|\n")
466     report += "+===================+============+===============+===========+"
467     report += "\n"
468
469     logger.info("\n" + report)
470     payload.append({'summary': {'duration': total_duration,
471                                 'nb tests': total_nb_tests,
472                                 'nb success': success_rate}})
473
474     if args.sanity:
475         case_name = "rally_sanity"
476     else:
477         case_name = "rally_full"
478
479     # Evaluation of the success criteria
480     status = functest_utils.check_success_rate(case_name, success_rate)
481
482     exit_code = -1
483     if status == "PASS":
484         exit_code = 0
485
486     if args.report:
487         logger.debug("Pushing Rally summary into DB...")
488         functest_utils.push_results_to_db("functest",
489                                           case_name,
490                                           start_time,
491                                           stop_time,
492                                           status,
493                                           payload)
494     if args.noclean:
495         exit(exit_code)
496
497     if not image_exists:
498         logger.debug("Deleting image '%s' with ID '%s'..."
499                      % (GLANCE_IMAGE_NAME, image_id))
500         if not os_utils.delete_glance_image(nova_client, image_id):
501             logger.error("Error deleting the glance image")
502
503     if not volume_types:
504         logger.debug("Deleting volume type '%s'..."
505                      % CINDER_VOLUME_TYPE_NAME)
506         if not os_utils.delete_volume_type(cinder_client, volume_type):
507             logger.error("Error in deleting volume type...")
508
509     exit(exit_code)
510
511
512 if __name__ == '__main__':
513     main()