Merge "unify functest_yaml obtain process"
[functest-xtesting.git] / testcases / OpenStack / rally / run_rally-cert.py
1 #!/usr/bin/env python
2 #
3 # Copyright (c) 2015 Orange
4 # guyrodrigue.koffi@orange.com
5 # morgan.richomme@orange.com
6 # All rights reserved. This program and the accompanying materials
7 # are made available under the terms of the Apache License, Version 2.0
8 # which accompanies this distribution, and is available at
9 # http://www.apache.org/licenses/LICENSE-2.0
10 #
11 # 0.1 (05/2015) initial commit
12 # 0.2 (28/09/2015) extract Tempest, format json result, add ceilometer suite
13 # 0.3 (19/10/2015) remove Tempest from run_rally
14 # and push result into test DB
15 #
16 """ tests configuration """
17
18 import json
19 import os
20 import re
21 import subprocess
22 import time
23
24 import argparse
25 import functest.utils.functest_logger as ft_logger
26 import functest.utils.functest_utils as functest_utils
27 import functest.utils.openstack_utils as os_utils
28 import iniparse
29
30 tests = ['authenticate', 'glance', 'cinder', 'heat', 'keystone',
31          'neutron', 'nova', 'quotas', 'requests', 'vm', 'all']
32 parser = argparse.ArgumentParser()
33 parser.add_argument("test_name",
34                     help="Module name to be tested. "
35                          "Possible values are : "
36                          "[ {d[0]} | {d[1]} | {d[2]} | {d[3]} | {d[4]} | "
37                          "{d[5]} | {d[6]} | {d[7]} | {d[8]} | {d[9]} | "
38                          "{d[10]} ] "
39                          "The 'all' value "
40                          "performs all possible test scenarios"
41                          .format(d=tests))
42
43 parser.add_argument("-d", "--debug", help="Debug mode", action="store_true")
44 parser.add_argument("-r", "--report",
45                     help="Create json result file",
46                     action="store_true")
47 parser.add_argument("-s", "--smoke",
48                     help="Smoke test mode",
49                     action="store_true")
50 parser.add_argument("-v", "--verbose",
51                     help="Print verbose info about the progress",
52                     action="store_true")
53 parser.add_argument("-n", "--noclean",
54                     help="Don't clean the created resources for this test.",
55                     action="store_true")
56 parser.add_argument("-z", "--sanity",
57                     help="Sanity test mode, execute only a subset of tests",
58                     action="store_true")
59
60 args = parser.parse_args()
61
62 network_dict = {}
63
64 if args.verbose:
65     RALLY_STDERR = subprocess.STDOUT
66 else:
67     RALLY_STDERR = open(os.devnull, 'w')
68
69 """ logging configuration """
70 logger = ft_logger.Logger("run_rally").getLogger()
71
72 REPO_PATH = os.environ['repos_dir'] + '/functest/'
73 if not os.path.exists(REPO_PATH):
74     logger.error("Functest repository directory not found '%s'" % REPO_PATH)
75     exit(-1)
76
77
78 functest_yaml = functest_utils.get_functest_yaml()
79
80 HOME = os.environ['HOME'] + "/"
81 RALLY_DIR = REPO_PATH + functest_yaml.get("general").get(
82     "directories").get("dir_rally")
83 TEMPLATE_DIR = RALLY_DIR + "scenario/templates"
84 SUPPORT_DIR = RALLY_DIR + "scenario/support"
85
86 FLAVOR_NAME = "m1.tiny"
87 USERS_AMOUNT = 2
88 TENANTS_AMOUNT = 3
89 ITERATIONS_AMOUNT = 10
90 CONCURRENCY = 4
91
92 RESULTS_DIR = functest_yaml.get("general").get("directories").get(
93     "dir_rally_res")
94 TEMPEST_CONF_FILE = functest_yaml.get("general").get("directories").get(
95     "dir_results") + '/tempest/tempest.conf'
96 TEST_DB = functest_yaml.get("results").get("test_db_url")
97
98 PRIVATE_NET_NAME = functest_yaml.get("rally").get("network_name")
99 PRIVATE_SUBNET_NAME = functest_yaml.get("rally").get("subnet_name")
100 PRIVATE_SUBNET_CIDR = functest_yaml.get("rally").get("subnet_cidr")
101 ROUTER_NAME = functest_yaml.get("rally").get("router_name")
102
103 GLANCE_IMAGE_NAME = functest_yaml.get("general").get("openstack").get(
104     "image_name")
105 GLANCE_IMAGE_FILENAME = functest_yaml.get("general").get("openstack").get(
106     "image_file_name")
107 GLANCE_IMAGE_FORMAT = functest_yaml.get("general").get("openstack").get(
108     "image_disk_format")
109 GLANCE_IMAGE_PATH = functest_yaml.get("general").get("directories").get(
110     "dir_functest_data") + "/" + GLANCE_IMAGE_FILENAME
111
112 CINDER_VOLUME_TYPE_NAME = "volume_test"
113
114
115 SUMMARY = []
116 neutron_client = None
117
118
119 def get_task_id(cmd_raw):
120     """
121     get task id from command rally result
122     :param cmd_raw:
123     :return: task_id as string
124     """
125     taskid_re = re.compile('^Task +(.*): started$')
126     for line in cmd_raw.splitlines(True):
127         line = line.strip()
128         match = taskid_re.match(line)
129         if match:
130             return match.group(1)
131     return None
132
133
134 def task_succeed(json_raw):
135     """
136     Parse JSON from rally JSON results
137     :param json_raw:
138     :return: Bool
139     """
140     rally_report = json.loads(json_raw)
141     for report in rally_report:
142         if report is None or report.get('result') is None:
143             return False
144
145         for result in report.get('result'):
146             if result is None or len(result.get('error')) > 0:
147                 return False
148
149     return True
150
151
152 def live_migration_supported():
153     config = iniparse.ConfigParser()
154     if (config.read(TEMPEST_CONF_FILE) and
155             config.has_section('compute-feature-enabled') and
156             config.has_option('compute-feature-enabled', 'live_migration')):
157         return config.getboolean('compute-feature-enabled', 'live_migration')
158
159     return False
160
161
162 def build_task_args(test_file_name):
163     task_args = {'service_list': [test_file_name]}
164     task_args['image_name'] = GLANCE_IMAGE_NAME
165     task_args['flavor_name'] = FLAVOR_NAME
166     task_args['glance_image_location'] = GLANCE_IMAGE_PATH
167     task_args['tmpl_dir'] = TEMPLATE_DIR
168     task_args['sup_dir'] = SUPPORT_DIR
169     task_args['users_amount'] = USERS_AMOUNT
170     task_args['tenants_amount'] = TENANTS_AMOUNT
171     task_args['use_existing_users'] = False
172     task_args['iterations'] = ITERATIONS_AMOUNT
173     task_args['concurrency'] = CONCURRENCY
174
175     if args.sanity:
176         task_args['full_mode'] = False
177         task_args['smoke'] = True
178     else:
179         task_args['full_mode'] = True
180         task_args['smoke'] = args.smoke
181
182     ext_net = os_utils.get_external_net(neutron_client)
183     if ext_net:
184         task_args['floating_network'] = str(ext_net)
185     else:
186         task_args['floating_network'] = ''
187
188     net_id = network_dict['net_id']
189     task_args['netid'] = str(net_id)
190     task_args['live_migration'] = live_migration_supported()
191
192     auth_url = os.getenv('OS_AUTH_URL')
193     if auth_url is not None:
194         task_args['request_url'] = auth_url.rsplit(":", 1)[0]
195     else:
196         task_args['request_url'] = ''
197
198     return task_args
199
200
201 def get_output(proc, test_name):
202     global SUMMARY
203     result = ""
204     nb_tests = 0
205     overall_duration = 0.0
206     success = 0.0
207     nb_totals = 0
208
209     while proc.poll() is None:
210         line = proc.stdout.readline()
211         if args.verbose:
212             result += line
213         else:
214             if ("Load duration" in line or
215                     "started" in line or
216                     "finished" in line or
217                     " Preparing" in line or
218                     "+-" in line or
219                     "|" in line):
220                 result += line
221             elif "test scenario" in line:
222                 result += "\n" + line
223             elif "Full duration" in line:
224                 result += line + "\n\n"
225
226         # parse output for summary report
227         if ("| " in line and
228                 "| action" not in line and
229                 "| Starting" not in line and
230                 "| Completed" not in line and
231                 "| ITER" not in line and
232                 "|   " not in line and
233                 "| total" not in line):
234             nb_tests += 1
235         elif "| total" in line:
236             percentage = ((line.split('|')[8]).strip(' ')).strip('%')
237             try:
238                 success += float(percentage)
239             except ValueError:
240                 logger.info('Percentage error: %s, %s' % (percentage, line))
241             nb_totals += 1
242         elif "Full duration" in line:
243             duration = line.split(': ')[1]
244             try:
245                 overall_duration += float(duration)
246             except ValueError:
247                 logger.info('Duration error: %s, %s' % (duration, line))
248
249     overall_duration = "{:10.2f}".format(overall_duration)
250     if nb_totals == 0:
251         success_avg = 0
252     else:
253         success_avg = "{:0.2f}".format(success / nb_totals)
254
255     scenario_summary = {'test_name': test_name,
256                         'overall_duration': overall_duration,
257                         'nb_tests': nb_tests,
258                         'success': success_avg}
259     SUMMARY.append(scenario_summary)
260
261     logger.debug("\n" + result)
262
263     return result
264
265
266 def get_cmd_output(proc):
267     result = ""
268
269     while proc.poll() is None:
270         line = proc.stdout.readline()
271         result += line
272
273     return result
274
275
276 def run_task(test_name):
277     #
278     # the "main" function of the script who launch rally for a task
279     # :param test_name: name for the rally test
280     # :return: void
281     #
282     global SUMMARY
283     logger.info('Starting test scenario "{}" ...'.format(test_name))
284     start_time = time.time()
285
286     task_file = '{}task.yaml'.format(RALLY_DIR)
287     if not os.path.exists(task_file):
288         logger.error("Task file '%s' does not exist." % task_file)
289         exit(-1)
290
291     test_file_name = '{}opnfv-{}.yaml'.format(RALLY_DIR + "scenario/",
292                                               test_name)
293     if not os.path.exists(test_file_name):
294         logger.error("The scenario '%s' does not exist." % test_file_name)
295         exit(-1)
296
297     logger.debug('Scenario fetched from : {}'.format(test_file_name))
298
299     cmd_line = ("rally task start --abort-on-sla-failure " +
300                 "--task {} ".format(task_file) +
301                 "--task-args \"{}\" ".format(build_task_args(test_name)))
302     logger.debug('running command line : {}'.format(cmd_line))
303
304     p = subprocess.Popen(cmd_line, stdout=subprocess.PIPE,
305                          stderr=RALLY_STDERR, shell=True)
306     output = get_output(p, test_name)
307     task_id = get_task_id(output)
308     logger.debug('task_id : {}'.format(task_id))
309
310     if task_id is None:
311         logger.error('Failed to retrieve task_id, validating task...')
312         cmd_line = ("rally task validate " +
313                     "--task {} ".format(task_file) +
314                     "--task-args \"{}\" ".format(build_task_args(test_name)))
315         logger.debug('running command line : {}'.format(cmd_line))
316         p = subprocess.Popen(cmd_line, stdout=subprocess.PIPE,
317                              stderr=subprocess.STDOUT, shell=True)
318         output = get_cmd_output(p)
319         logger.error("Task validation result:" + "\n" + output)
320         return
321
322     # check for result directory and create it otherwise
323     if not os.path.exists(RESULTS_DIR):
324         logger.debug('%s does not exist, we create it.'.format(RESULTS_DIR))
325         os.makedirs(RESULTS_DIR)
326
327     # write html report file
328     report_file_name = '{}opnfv-{}.html'.format(RESULTS_DIR, test_name)
329     cmd_line = "rally task report {} --out {}".format(task_id,
330                                                       report_file_name)
331
332     logger.debug('running command line : {}'.format(cmd_line))
333     os.popen(cmd_line)
334
335     # get and save rally operation JSON result
336     cmd_line = "rally task results %s" % task_id
337     logger.debug('running command line : {}'.format(cmd_line))
338     cmd = os.popen(cmd_line)
339     json_results = cmd.read()
340     with open('{}opnfv-{}.json'.format(RESULTS_DIR, test_name), 'w') as f:
341         logger.debug('saving json file')
342         f.write(json_results)
343
344     with open('{}opnfv-{}.json'
345               .format(RESULTS_DIR, test_name)) as json_file:
346         json_data = json.load(json_file)
347
348     """ parse JSON operation result """
349     status = "FAIL"
350     if task_succeed(json_results):
351         logger.info('Test scenario: "{}" OK.'.format(test_name) + "\n")
352         status = "PASS"
353     else:
354         logger.info('Test scenario: "{}" Failed.'.format(test_name) + "\n")
355
356     # Push results in payload of testcase
357     if args.report:
358         stop_time = time.time()
359         logger.debug("Push Rally detailed results into DB")
360         functest_utils.push_results_to_db("functest",
361                                           "Rally_details",
362                                           start_time,
363                                           stop_time,
364                                           status,
365                                           json_data)
366
367
368 def main():
369     global SUMMARY
370     global network_dict
371     global neutron_client
372
373     nova_client = os_utils.get_nova_client()
374     neutron_client = os_utils.get_neutron_client()
375     cinder_client = os_utils.get_cinder_client()
376
377     start_time = time.time()
378
379     # configure script
380     if not (args.test_name in tests):
381         logger.error('argument not valid')
382         exit(-1)
383
384     SUMMARY = []
385
386     volume_types = os_utils.list_volume_types(cinder_client,
387                                               private=False)
388     if not volume_types:
389         volume_type = os_utils.create_volume_type(
390             cinder_client, CINDER_VOLUME_TYPE_NAME)
391         if not volume_type:
392             logger.error("Failed to create volume type...")
393             exit(-1)
394         else:
395             logger.debug("Volume type '%s' created succesfully..."
396                          % CINDER_VOLUME_TYPE_NAME)
397     else:
398         logger.debug("Using existing volume type(s)...")
399
400     image_exists, image_id = os_utils.get_or_create_image(GLANCE_IMAGE_NAME,
401                                                           GLANCE_IMAGE_PATH,
402                                                           GLANCE_IMAGE_FORMAT)
403     if not image_id:
404         exit(-1)
405
406     logger.debug("Creating network '%s'..." % PRIVATE_NET_NAME)
407     network_dict = os_utils.create_shared_network_full(PRIVATE_NET_NAME,
408                                                        PRIVATE_SUBNET_NAME,
409                                                        ROUTER_NAME,
410                                                        PRIVATE_SUBNET_CIDR)
411     if not network_dict:
412         exit(1)
413
414     if args.test_name == "all":
415         for test_name in tests:
416             if not (test_name == 'all' or
417                     test_name == 'vm'):
418                 run_task(test_name)
419     else:
420         logger.debug("Test name: " + args.test_name)
421         run_task(args.test_name)
422
423     report = ("\n"
424               "                                                              "
425               "\n"
426               "                     Rally Summary Report\n"
427               "\n"
428               "+===================+============+===============+===========+"
429               "\n"
430               "| Module            | Duration   | nb. Test Run  | Success   |"
431               "\n"
432               "+===================+============+===============+===========+"
433               "\n")
434     payload = []
435     stop_time = time.time()
436
437     # for each scenario we draw a row for the table
438     total_duration = 0.0
439     total_nb_tests = 0
440     total_success = 0.0
441     for s in SUMMARY:
442         name = "{0:<17}".format(s['test_name'])
443         duration = float(s['overall_duration'])
444         total_duration += duration
445         duration = time.strftime("%M:%S", time.gmtime(duration))
446         duration = "{0:<10}".format(duration)
447         nb_tests = "{0:<13}".format(s['nb_tests'])
448         total_nb_tests += int(s['nb_tests'])
449         success = "{0:<10}".format(str(s['success']) + '%')
450         total_success += float(s['success'])
451         report += ("" +
452                    "| " + name + " | " + duration + " | " +
453                    nb_tests + " | " + success + "|\n" +
454                    "+-------------------+------------"
455                    "+---------------+-----------+\n")
456         payload.append({'module': name,
457                         'details': {'duration': s['overall_duration'],
458                                     'nb tests': s['nb_tests'],
459                                     'success': s['success']}})
460
461     total_duration_str = time.strftime("%H:%M:%S", time.gmtime(total_duration))
462     total_duration_str2 = "{0:<10}".format(total_duration_str)
463     total_nb_tests_str = "{0:<13}".format(total_nb_tests)
464     success_rate = "{:0.2f}".format(total_success / len(SUMMARY))
465     success_rate_str = "{0:<10}".format(str(success_rate) + '%')
466     report += "+===================+============+===============+===========+"
467     report += "\n"
468     report += ("| TOTAL:            | " + total_duration_str2 + " | " +
469                total_nb_tests_str + " | " + success_rate_str + "|\n")
470     report += "+===================+============+===============+===========+"
471     report += "\n"
472
473     logger.info("\n" + report)
474     payload.append({'summary': {'duration': total_duration,
475                                 'nb tests': total_nb_tests,
476                                 'nb success': success_rate}})
477
478     if args.sanity:
479         case_name = "rally_sanity"
480     else:
481         case_name = "rally_full"
482
483     # Evaluation of the success criteria
484     status = functest_utils.check_success_rate(case_name, success_rate)
485
486     exit_code = -1
487     if status == "PASS":
488         exit_code = 0
489
490     if args.report:
491         logger.debug("Pushing Rally summary into DB...")
492         functest_utils.push_results_to_db("functest",
493                                           case_name,
494                                           start_time,
495                                           stop_time,
496                                           status,
497                                           payload)
498     if args.noclean:
499         exit(exit_code)
500
501     if not image_exists:
502         logger.debug("Deleting image '%s' with ID '%s'..."
503                      % (GLANCE_IMAGE_NAME, image_id))
504         if not os_utils.delete_glance_image(nova_client, image_id):
505             logger.error("Error deleting the glance image")
506
507     if not volume_types:
508         logger.debug("Deleting volume type '%s'..."
509                      % CINDER_VOLUME_TYPE_NAME)
510         if not os_utils.delete_volume_type(cinder_client, volume_type):
511             logger.error("Error in deleting volume type...")
512
513     exit(exit_code)
514
515
516 if __name__ == '__main__':
517     main()