refactor create shared network process to eliminate reduplicate
[functest.git] / testcases / OpenStack / rally / run_rally-cert.py
1 #!/usr/bin/env python
2 #
3 # Copyright (c) 2015 Orange
4 # guyrodrigue.koffi@orange.com
5 # morgan.richomme@orange.com
6 # All rights reserved. This program and the accompanying materials
7 # are made available under the terms of the Apache License, Version 2.0
8 # which accompanies this distribution, and is available at
9 # http://www.apache.org/licenses/LICENSE-2.0
10 #
11 # 0.1 (05/2015) initial commit
12 # 0.2 (28/09/2015) extract Tempest, format json result, add ceilometer suite
13 # 0.3 (19/10/2015) remove Tempest from run_rally
14 # and push result into test DB
15 #
16 """ tests configuration """
17
18 import argparse
19 import json
20 import os
21 import re
22 import subprocess
23 import time
24 import functest.utils.functest_logger as ft_logger
25 import functest.utils.functest_utils as functest_utils
26 import functest.utils.openstack_utils as os_utils
27 import iniparse
28 import yaml
29
30
31 tests = ['authenticate', 'glance', 'cinder', 'heat', 'keystone',
32          'neutron', 'nova', 'quotas', 'requests', 'vm', 'all']
33 parser = argparse.ArgumentParser()
34 parser.add_argument("test_name",
35                     help="Module name to be tested. "
36                          "Possible values are : "
37                          "[ {d[0]} | {d[1]} | {d[2]} | {d[3]} | {d[4]} | "
38                          "{d[5]} | {d[6]} | {d[7]} | {d[8]} | {d[9]} | "
39                          "{d[10]} ] "
40                          "The 'all' value "
41                          "performs all possible test scenarios"
42                          .format(d=tests))
43
44 parser.add_argument("-d", "--debug", help="Debug mode", action="store_true")
45 parser.add_argument("-r", "--report",
46                     help="Create json result file",
47                     action="store_true")
48 parser.add_argument("-s", "--smoke",
49                     help="Smoke test mode",
50                     action="store_true")
51 parser.add_argument("-v", "--verbose",
52                     help="Print verbose info about the progress",
53                     action="store_true")
54 parser.add_argument("-n", "--noclean",
55                     help="Don't clean the created resources for this test.",
56                     action="store_true")
57 parser.add_argument("-z", "--sanity",
58                     help="Sanity test mode, execute only a subset of tests",
59                     action="store_true")
60
61 args = parser.parse_args()
62
63 network_dict = {}
64
65 if args.verbose:
66     RALLY_STDERR = subprocess.STDOUT
67 else:
68     RALLY_STDERR = open(os.devnull, 'w')
69
70 """ logging configuration """
71 logger = ft_logger.Logger("run_rally").getLogger()
72
73 REPO_PATH = os.environ['repos_dir'] + '/functest/'
74 if not os.path.exists(REPO_PATH):
75     logger.error("Functest repository directory not found '%s'" % REPO_PATH)
76     exit(-1)
77
78
79 with open(os.environ["CONFIG_FUNCTEST_YAML"]) as f:
80     functest_yaml = yaml.safe_load(f)
81 f.close()
82
83 HOME = os.environ['HOME'] + "/"
84 RALLY_DIR = REPO_PATH + functest_yaml.get("general").get(
85     "directories").get("dir_rally")
86 TEMPLATE_DIR = RALLY_DIR + "scenario/templates"
87 SUPPORT_DIR = RALLY_DIR + "scenario/support"
88
89 FLAVOR_NAME = "m1.tiny"
90 USERS_AMOUNT = 2
91 TENANTS_AMOUNT = 3
92 ITERATIONS_AMOUNT = 10
93 CONCURRENCY = 4
94
95 RESULTS_DIR = functest_yaml.get("general").get("directories").get(
96     "dir_rally_res")
97 TEMPEST_CONF_FILE = functest_yaml.get("general").get("directories").get(
98     "dir_results") + '/tempest/tempest.conf'
99 TEST_DB = functest_yaml.get("results").get("test_db_url")
100
101 PRIVATE_NET_NAME = functest_yaml.get("rally").get("network_name")
102 PRIVATE_SUBNET_NAME = functest_yaml.get("rally").get("subnet_name")
103 PRIVATE_SUBNET_CIDR = functest_yaml.get("rally").get("subnet_cidr")
104 ROUTER_NAME = functest_yaml.get("rally").get("router_name")
105
106 GLANCE_IMAGE_NAME = functest_yaml.get("general").get("openstack").get(
107     "image_name")
108 GLANCE_IMAGE_FILENAME = functest_yaml.get("general").get("openstack").get(
109     "image_file_name")
110 GLANCE_IMAGE_FORMAT = functest_yaml.get("general").get("openstack").get(
111     "image_disk_format")
112 GLANCE_IMAGE_PATH = functest_yaml.get("general").get("directories").get(
113     "dir_functest_data") + "/" + GLANCE_IMAGE_FILENAME
114
115 CINDER_VOLUME_TYPE_NAME = "volume_test"
116
117
118 SUMMARY = []
119 neutron_client = None
120
121
122 def get_task_id(cmd_raw):
123     """
124     get task id from command rally result
125     :param cmd_raw:
126     :return: task_id as string
127     """
128     taskid_re = re.compile('^Task +(.*): started$')
129     for line in cmd_raw.splitlines(True):
130         line = line.strip()
131         match = taskid_re.match(line)
132         if match:
133             return match.group(1)
134     return None
135
136
137 def task_succeed(json_raw):
138     """
139     Parse JSON from rally JSON results
140     :param json_raw:
141     :return: Bool
142     """
143     rally_report = json.loads(json_raw)
144     for report in rally_report:
145         if report is None or report.get('result') is None:
146             return False
147
148         for result in report.get('result'):
149             if result is None or len(result.get('error')) > 0:
150                 return False
151
152     return True
153
154
155 def live_migration_supported():
156     config = iniparse.ConfigParser()
157     if (config.read(TEMPEST_CONF_FILE) and
158             config.has_section('compute-feature-enabled') and
159             config.has_option('compute-feature-enabled', 'live_migration')):
160         return config.getboolean('compute-feature-enabled', 'live_migration')
161
162     return False
163
164
165 def build_task_args(test_file_name):
166     task_args = {'service_list': [test_file_name]}
167     task_args['image_name'] = GLANCE_IMAGE_NAME
168     task_args['flavor_name'] = FLAVOR_NAME
169     task_args['glance_image_location'] = GLANCE_IMAGE_PATH
170     task_args['tmpl_dir'] = TEMPLATE_DIR
171     task_args['sup_dir'] = SUPPORT_DIR
172     task_args['users_amount'] = USERS_AMOUNT
173     task_args['tenants_amount'] = TENANTS_AMOUNT
174     task_args['use_existing_users'] = False
175     task_args['iterations'] = ITERATIONS_AMOUNT
176     task_args['concurrency'] = CONCURRENCY
177
178     if args.sanity:
179         task_args['full_mode'] = False
180         task_args['smoke'] = True
181     else:
182         task_args['full_mode'] = True
183         task_args['smoke'] = args.smoke
184
185     ext_net = os_utils.get_external_net(neutron_client)
186     if ext_net:
187         task_args['floating_network'] = str(ext_net)
188     else:
189         task_args['floating_network'] = ''
190
191     net_id = network_dict['net_id']
192     task_args['netid'] = str(net_id)
193     task_args['live_migration'] = live_migration_supported()
194
195     auth_url = os.getenv('OS_AUTH_URL')
196     if auth_url is not None:
197         task_args['request_url'] = auth_url.rsplit(":", 1)[0]
198     else:
199         task_args['request_url'] = ''
200
201     return task_args
202
203
204 def get_output(proc, test_name):
205     global SUMMARY
206     result = ""
207     nb_tests = 0
208     overall_duration = 0.0
209     success = 0.0
210     nb_totals = 0
211
212     while proc.poll() is None:
213         line = proc.stdout.readline()
214         if args.verbose:
215             result += line
216         else:
217             if ("Load duration" in line or
218                     "started" in line or
219                     "finished" in line or
220                     " Preparing" in line or
221                     "+-" in line or
222                     "|" in line):
223                 result += line
224             elif "test scenario" in line:
225                 result += "\n" + line
226             elif "Full duration" in line:
227                 result += line + "\n\n"
228
229         # parse output for summary report
230         if ("| " in line and
231                 "| action" not in line and
232                 "| Starting" not in line and
233                 "| Completed" not in line and
234                 "| ITER" not in line and
235                 "|   " not in line and
236                 "| total" not in line):
237             nb_tests += 1
238         elif "| total" in line:
239             percentage = ((line.split('|')[8]).strip(' ')).strip('%')
240             try:
241                 success += float(percentage)
242             except ValueError:
243                 logger.info('Percentage error: %s, %s' % (percentage, line))
244             nb_totals += 1
245         elif "Full duration" in line:
246             duration = line.split(': ')[1]
247             try:
248                 overall_duration += float(duration)
249             except ValueError:
250                 logger.info('Duration error: %s, %s' % (duration, line))
251
252     overall_duration = "{:10.2f}".format(overall_duration)
253     if nb_totals == 0:
254         success_avg = 0
255     else:
256         success_avg = "{:0.2f}".format(success / nb_totals)
257
258     scenario_summary = {'test_name': test_name,
259                         'overall_duration': overall_duration,
260                         'nb_tests': nb_tests,
261                         'success': success_avg}
262     SUMMARY.append(scenario_summary)
263
264     logger.debug("\n" + result)
265
266     return result
267
268
269 def get_cmd_output(proc):
270     result = ""
271
272     while proc.poll() is None:
273         line = proc.stdout.readline()
274         result += line
275
276     return result
277
278
279 def run_task(test_name):
280     #
281     # the "main" function of the script who launch rally for a task
282     # :param test_name: name for the rally test
283     # :return: void
284     #
285     global SUMMARY
286     logger.info('Starting test scenario "{}" ...'.format(test_name))
287     start_time = time.time()
288
289     task_file = '{}task.yaml'.format(RALLY_DIR)
290     if not os.path.exists(task_file):
291         logger.error("Task file '%s' does not exist." % task_file)
292         exit(-1)
293
294     test_file_name = '{}opnfv-{}.yaml'.format(RALLY_DIR + "scenario/",
295                                               test_name)
296     if not os.path.exists(test_file_name):
297         logger.error("The scenario '%s' does not exist." % test_file_name)
298         exit(-1)
299
300     logger.debug('Scenario fetched from : {}'.format(test_file_name))
301
302     cmd_line = ("rally task start --abort-on-sla-failure " +
303                 "--task {} ".format(task_file) +
304                 "--task-args \"{}\" ".format(build_task_args(test_name)))
305     logger.debug('running command line : {}'.format(cmd_line))
306
307     p = subprocess.Popen(cmd_line, stdout=subprocess.PIPE,
308                          stderr=RALLY_STDERR, shell=True)
309     output = get_output(p, test_name)
310     task_id = get_task_id(output)
311     logger.debug('task_id : {}'.format(task_id))
312
313     if task_id is None:
314         logger.error('Failed to retrieve task_id, validating task...')
315         cmd_line = ("rally task validate " +
316                     "--task {} ".format(task_file) +
317                     "--task-args \"{}\" ".format(build_task_args(test_name)))
318         logger.debug('running command line : {}'.format(cmd_line))
319         p = subprocess.Popen(cmd_line, stdout=subprocess.PIPE,
320                              stderr=subprocess.STDOUT, shell=True)
321         output = get_cmd_output(p)
322         logger.error("Task validation result:" + "\n" + output)
323         return
324
325     # check for result directory and create it otherwise
326     if not os.path.exists(RESULTS_DIR):
327         logger.debug('%s does not exist, we create it.'.format(RESULTS_DIR))
328         os.makedirs(RESULTS_DIR)
329
330     # write html report file
331     report_file_name = '{}opnfv-{}.html'.format(RESULTS_DIR, test_name)
332     cmd_line = "rally task report {} --out {}".format(task_id,
333                                                       report_file_name)
334
335     logger.debug('running command line : {}'.format(cmd_line))
336     os.popen(cmd_line)
337
338     # get and save rally operation JSON result
339     cmd_line = "rally task results %s" % task_id
340     logger.debug('running command line : {}'.format(cmd_line))
341     cmd = os.popen(cmd_line)
342     json_results = cmd.read()
343     with open('{}opnfv-{}.json'.format(RESULTS_DIR, test_name), 'w') as f:
344         logger.debug('saving json file')
345         f.write(json_results)
346
347     with open('{}opnfv-{}.json'
348               .format(RESULTS_DIR, test_name)) as json_file:
349         json_data = json.load(json_file)
350
351     """ parse JSON operation result """
352     status = "FAIL"
353     if task_succeed(json_results):
354         logger.info('Test scenario: "{}" OK.'.format(test_name) + "\n")
355         status = "PASS"
356     else:
357         logger.info('Test scenario: "{}" Failed.'.format(test_name) + "\n")
358
359     # Push results in payload of testcase
360     if args.report:
361         stop_time = time.time()
362         logger.debug("Push Rally detailed results into DB")
363         functest_utils.push_results_to_db("functest",
364                                           "Rally_details",
365                                           logger,
366                                           start_time,
367                                           stop_time,
368                                           status,
369                                           json_data)
370
371
372 def main():
373     global SUMMARY
374     global network_dict
375     global neutron_client
376
377     nova_client = os_utils.get_nova_client()
378     neutron_client = os_utils.get_neutron_client()
379     cinder_client = os_utils.get_cinder_client()
380
381     start_time = time.time()
382
383     # configure script
384     if not (args.test_name in tests):
385         logger.error('argument not valid')
386         exit(-1)
387
388     SUMMARY = []
389
390     volume_types = os_utils.list_volume_types(cinder_client,
391                                               private=False)
392     if not volume_types:
393         volume_type = os_utils.create_volume_type(
394             cinder_client, CINDER_VOLUME_TYPE_NAME)
395         if not volume_type:
396             logger.error("Failed to create volume type...")
397             exit(-1)
398         else:
399             logger.debug("Volume type '%s' created succesfully..."
400                          % CINDER_VOLUME_TYPE_NAME)
401     else:
402         logger.debug("Using existing volume type(s)...")
403
404     image_exists, image_id = os_utils.get_or_create_image(GLANCE_IMAGE_NAME,
405                                                           GLANCE_IMAGE_PATH,
406                                                           GLANCE_IMAGE_FORMAT)
407     if not image_id:
408         exit(-1)
409
410     logger.debug("Creating network '%s'..." % PRIVATE_NET_NAME)
411     network_dict = os_utils.create_shared_network_full(PRIVATE_NET_NAME,
412                                                        PRIVATE_SUBNET_NAME,
413                                                        ROUTER_NAME,
414                                                        PRIVATE_SUBNET_CIDR)
415     if not network_dict:
416         exit(1)
417
418     if args.test_name == "all":
419         for test_name in tests:
420             if not (test_name == 'all' or
421                     test_name == 'vm'):
422                 run_task(test_name)
423     else:
424         logger.debug("Test name: " + args.test_name)
425         run_task(args.test_name)
426
427     report = ("\n"
428               "                                                              "
429               "\n"
430               "                     Rally Summary Report\n"
431               "\n"
432               "+===================+============+===============+===========+"
433               "\n"
434               "| Module            | Duration   | nb. Test Run  | Success   |"
435               "\n"
436               "+===================+============+===============+===========+"
437               "\n")
438     payload = []
439     stop_time = time.time()
440
441     # for each scenario we draw a row for the table
442     total_duration = 0.0
443     total_nb_tests = 0
444     total_success = 0.0
445     for s in SUMMARY:
446         name = "{0:<17}".format(s['test_name'])
447         duration = float(s['overall_duration'])
448         total_duration += duration
449         duration = time.strftime("%M:%S", time.gmtime(duration))
450         duration = "{0:<10}".format(duration)
451         nb_tests = "{0:<13}".format(s['nb_tests'])
452         total_nb_tests += int(s['nb_tests'])
453         success = "{0:<10}".format(str(s['success']) + '%')
454         total_success += float(s['success'])
455         report += ("" +
456                    "| " + name + " | " + duration + " | " +
457                    nb_tests + " | " + success + "|\n" +
458                    "+-------------------+------------"
459                    "+---------------+-----------+\n")
460         payload.append({'module': name,
461                         'details': {'duration': s['overall_duration'],
462                                     'nb tests': s['nb_tests'],
463                                     'success': s['success']}})
464
465     total_duration_str = time.strftime("%H:%M:%S", time.gmtime(total_duration))
466     total_duration_str2 = "{0:<10}".format(total_duration_str)
467     total_nb_tests_str = "{0:<13}".format(total_nb_tests)
468     success_rate = "{:0.2f}".format(total_success / len(SUMMARY))
469     success_rate_str = "{0:<10}".format(str(success_rate) + '%')
470     report += "+===================+============+===============+===========+"
471     report += "\n"
472     report += ("| TOTAL:            | " + total_duration_str2 + " | " +
473                total_nb_tests_str + " | " + success_rate_str + "|\n")
474     report += "+===================+============+===============+===========+"
475     report += "\n"
476
477     logger.info("\n" + report)
478     payload.append({'summary': {'duration': total_duration,
479                                 'nb tests': total_nb_tests,
480                                 'nb success': success_rate}})
481
482     if args.sanity:
483         case_name = "rally_sanity"
484     else:
485         case_name = "rally_full"
486
487     # Evaluation of the success criteria
488     status = functest_utils.check_success_rate(case_name, success_rate)
489
490     exit_code = -1
491     if status == "PASS":
492         exit_code = 0
493
494     if args.report:
495         logger.debug("Pushing Rally summary into DB...")
496         functest_utils.push_results_to_db("functest",
497                                           case_name,
498                                           logger,
499                                           start_time,
500                                           stop_time,
501                                           status,
502                                           payload)
503     if args.noclean:
504         exit(exit_code)
505
506     if not image_exists:
507         logger.debug("Deleting image '%s' with ID '%s'..."
508                      % (GLANCE_IMAGE_NAME, image_id))
509         if not os_utils.delete_glance_image(nova_client, image_id):
510             logger.error("Error deleting the glance image")
511
512     if not volume_types:
513         logger.debug("Deleting volume type '%s'..."
514                      % CINDER_VOLUME_TYPE_NAME)
515         if not os_utils.delete_volume_type(cinder_client, volume_type):
516             logger.error("Error in deleting volume type...")
517
518     exit(exit_code)
519
520
521 if __name__ == '__main__':
522     main()