Merge "Add kingbird endpoint information to functest."
[functest-xtesting.git] / testcases / OpenStack / rally / run_rally-cert.py
1 #!/usr/bin/env python
2 #
3 # Copyright (c) 2015 Orange
4 # guyrodrigue.koffi@orange.com
5 # morgan.richomme@orange.com
6 # All rights reserved. This program and the accompanying materials
7 # are made available under the terms of the Apache License, Version 2.0
8 # which accompanies this distribution, and is available at
9 # http://www.apache.org/licenses/LICENSE-2.0
10 #
11 # 0.1 (05/2015) initial commit
12 # 0.2 (28/09/2015) extract Tempest, format json result, add ceilometer suite
13 # 0.3 (19/10/2015) remove Tempest from run_rally
14 # and push result into test DB
15 #
16 """ tests configuration """
17
18 import argparse
19 import json
20 import os
21 import re
22 import subprocess
23 import time
24 import functest.utils.functest_logger as ft_logger
25 import functest.utils.functest_utils as functest_utils
26 import functest.utils.openstack_utils as os_utils
27 import iniparse
28 import yaml
29
30
31 tests = ['authenticate', 'glance', 'cinder', 'heat', 'keystone',
32          'neutron', 'nova', 'quotas', 'requests', 'vm', 'all']
33 parser = argparse.ArgumentParser()
34 parser.add_argument("test_name",
35                     help="Module name to be tested. "
36                          "Possible values are : "
37                          "[ {d[0]} | {d[1]} | {d[2]} | {d[3]} | {d[4]} | "
38                          "{d[5]} | {d[6]} | {d[7]} | {d[8]} | {d[9]} | "
39                          "{d[10]} ] "
40                          "The 'all' value "
41                          "performs all possible test scenarios"
42                          .format(d=tests))
43
44 parser.add_argument("-d", "--debug", help="Debug mode", action="store_true")
45 parser.add_argument("-r", "--report",
46                     help="Create json result file",
47                     action="store_true")
48 parser.add_argument("-s", "--smoke",
49                     help="Smoke test mode",
50                     action="store_true")
51 parser.add_argument("-v", "--verbose",
52                     help="Print verbose info about the progress",
53                     action="store_true")
54 parser.add_argument("-n", "--noclean",
55                     help="Don't clean the created resources for this test.",
56                     action="store_true")
57 parser.add_argument("-z", "--sanity",
58                     help="Sanity test mode, execute only a subset of tests",
59                     action="store_true")
60
61 args = parser.parse_args()
62
63 network_dict = {}
64
65 if args.verbose:
66     RALLY_STDERR = subprocess.STDOUT
67 else:
68     RALLY_STDERR = open(os.devnull, 'w')
69
70 """ logging configuration """
71 logger = ft_logger.Logger("run_rally").getLogger()
72
73 REPO_PATH = os.environ['repos_dir'] + '/functest/'
74 if not os.path.exists(REPO_PATH):
75     logger.error("Functest repository directory not found '%s'" % REPO_PATH)
76     exit(-1)
77
78
79 with open(os.environ["CONFIG_FUNCTEST_YAML"]) as f:
80     functest_yaml = yaml.safe_load(f)
81 f.close()
82
83 HOME = os.environ['HOME'] + "/"
84 RALLY_DIR = REPO_PATH + functest_yaml.get("general").get(
85     "directories").get("dir_rally")
86 TEMPLATE_DIR = RALLY_DIR + "scenario/templates"
87 SUPPORT_DIR = RALLY_DIR + "scenario/support"
88
89 FLAVOR_NAME = "m1.tiny"
90 USERS_AMOUNT = 2
91 TENANTS_AMOUNT = 3
92 ITERATIONS_AMOUNT = 10
93 CONCURRENCY = 4
94
95 RESULTS_DIR = functest_yaml.get("general").get("directories").get(
96     "dir_rally_res")
97 TEMPEST_CONF_FILE = functest_yaml.get("general").get("directories").get(
98     "dir_results") + '/tempest/tempest.conf'
99 TEST_DB = functest_yaml.get("results").get("test_db_url")
100
101 PRIVATE_NET_NAME = functest_yaml.get("rally").get("network_name")
102 PRIVATE_SUBNET_NAME = functest_yaml.get("rally").get("subnet_name")
103 PRIVATE_SUBNET_CIDR = functest_yaml.get("rally").get("subnet_cidr")
104 ROUTER_NAME = functest_yaml.get("rally").get("router_name")
105
106 GLANCE_IMAGE_NAME = functest_yaml.get("general").get("openstack").get(
107     "image_name")
108 GLANCE_IMAGE_FILENAME = functest_yaml.get("general").get("openstack").get(
109     "image_file_name")
110 GLANCE_IMAGE_FORMAT = functest_yaml.get("general").get("openstack").get(
111     "image_disk_format")
112 GLANCE_IMAGE_PATH = functest_yaml.get("general").get("directories").get(
113     "dir_functest_data") + "/" + GLANCE_IMAGE_FILENAME
114
115 CINDER_VOLUME_TYPE_NAME = "volume_test"
116
117
118 SUMMARY = []
119 neutron_client = None
120
121
122 def get_task_id(cmd_raw):
123     """
124     get task id from command rally result
125     :param cmd_raw:
126     :return: task_id as string
127     """
128     taskid_re = re.compile('^Task +(.*): started$')
129     for line in cmd_raw.splitlines(True):
130         line = line.strip()
131         match = taskid_re.match(line)
132         if match:
133             return match.group(1)
134     return None
135
136
137 def task_succeed(json_raw):
138     """
139     Parse JSON from rally JSON results
140     :param json_raw:
141     :return: Bool
142     """
143     rally_report = json.loads(json_raw)
144     for report in rally_report:
145         if report is None or report.get('result') is None:
146             return False
147
148         for result in report.get('result'):
149             if result is None or len(result.get('error')) > 0:
150                 return False
151
152     return True
153
154
155 def live_migration_supported():
156     config = iniparse.ConfigParser()
157     if (config.read(TEMPEST_CONF_FILE) and
158             config.has_section('compute-feature-enabled') and
159             config.has_option('compute-feature-enabled', 'live_migration')):
160         return config.getboolean('compute-feature-enabled', 'live_migration')
161
162     return False
163
164
165 def build_task_args(test_file_name):
166     task_args = {'service_list': [test_file_name]}
167     task_args['image_name'] = GLANCE_IMAGE_NAME
168     task_args['flavor_name'] = FLAVOR_NAME
169     task_args['glance_image_location'] = GLANCE_IMAGE_PATH
170     task_args['tmpl_dir'] = TEMPLATE_DIR
171     task_args['sup_dir'] = SUPPORT_DIR
172     task_args['users_amount'] = USERS_AMOUNT
173     task_args['tenants_amount'] = TENANTS_AMOUNT
174     task_args['use_existing_users'] = False
175     task_args['iterations'] = ITERATIONS_AMOUNT
176     task_args['concurrency'] = CONCURRENCY
177
178     if args.sanity:
179         task_args['full_mode'] = False
180         task_args['smoke'] = True
181     else:
182         task_args['full_mode'] = True
183         task_args['smoke'] = args.smoke
184
185     ext_net = os_utils.get_external_net(neutron_client)
186     if ext_net:
187         task_args['floating_network'] = str(ext_net)
188     else:
189         task_args['floating_network'] = ''
190
191     net_id = network_dict['net_id']
192     task_args['netid'] = str(net_id)
193     task_args['live_migration'] = live_migration_supported()
194
195     auth_url = os.getenv('OS_AUTH_URL')
196     if auth_url is not None:
197         task_args['request_url'] = auth_url.rsplit(":", 1)[0]
198     else:
199         task_args['request_url'] = ''
200
201     return task_args
202
203
204 def get_output(proc, test_name):
205     global SUMMARY
206     result = ""
207     nb_tests = 0
208     overall_duration = 0.0
209     success = 0.0
210     nb_totals = 0
211
212     while proc.poll() is None:
213         line = proc.stdout.readline()
214         if args.verbose:
215             result += line
216         else:
217             if ("Load duration" in line or
218                     "started" in line or
219                     "finished" in line or
220                     " Preparing" in line or
221                     "+-" in line or
222                     "|" in line):
223                 result += line
224             elif "test scenario" in line:
225                 result += "\n" + line
226             elif "Full duration" in line:
227                 result += line + "\n\n"
228
229         # parse output for summary report
230         if ("| " in line and
231                 "| action" not in line and
232                 "| Starting" not in line and
233                 "| Completed" not in line and
234                 "| ITER" not in line and
235                 "|   " not in line and
236                 "| total" not in line):
237             nb_tests += 1
238         elif "| total" in line:
239             percentage = ((line.split('|')[8]).strip(' ')).strip('%')
240             try:
241                 success += float(percentage)
242             except ValueError:
243                 logger.info('Percentage error: %s, %s' % (percentage, line))
244             nb_totals += 1
245         elif "Full duration" in line:
246             duration = line.split(': ')[1]
247             try:
248                 overall_duration += float(duration)
249             except ValueError:
250                 logger.info('Duration error: %s, %s' % (duration, line))
251
252     overall_duration = "{:10.2f}".format(overall_duration)
253     if nb_totals == 0:
254         success_avg = 0
255     else:
256         success_avg = "{:0.2f}".format(success / nb_totals)
257
258     scenario_summary = {'test_name': test_name,
259                         'overall_duration': overall_duration,
260                         'nb_tests': nb_tests,
261                         'success': success_avg}
262     SUMMARY.append(scenario_summary)
263
264     logger.debug("\n" + result)
265
266     return result
267
268
269 def get_cmd_output(proc):
270     result = ""
271
272     while proc.poll() is None:
273         line = proc.stdout.readline()
274         result += line
275
276     return result
277
278
279 def run_task(test_name):
280     #
281     # the "main" function of the script who launch rally for a task
282     # :param test_name: name for the rally test
283     # :return: void
284     #
285     global SUMMARY
286     logger.info('Starting test scenario "{}" ...'.format(test_name))
287     start_time = time.time()
288     stop_time = start_time
289
290     task_file = '{}task.yaml'.format(RALLY_DIR)
291     if not os.path.exists(task_file):
292         logger.error("Task file '%s' does not exist." % task_file)
293         exit(-1)
294
295     test_file_name = '{}opnfv-{}.yaml'.format(RALLY_DIR + "scenario/",
296                                               test_name)
297     if not os.path.exists(test_file_name):
298         logger.error("The scenario '%s' does not exist." % test_file_name)
299         exit(-1)
300
301     logger.debug('Scenario fetched from : {}'.format(test_file_name))
302
303     cmd_line = ("rally task start --abort-on-sla-failure " +
304                 "--task {} ".format(task_file) +
305                 "--task-args \"{}\" ".format(build_task_args(test_name)))
306     logger.debug('running command line : {}'.format(cmd_line))
307
308     p = subprocess.Popen(cmd_line, stdout=subprocess.PIPE,
309                          stderr=RALLY_STDERR, shell=True)
310     output = get_output(p, test_name)
311     task_id = get_task_id(output)
312     logger.debug('task_id : {}'.format(task_id))
313
314     if task_id is None:
315         logger.error('Failed to retrieve task_id, validating task...')
316         cmd_line = ("rally task validate " +
317                     "--task {} ".format(task_file) +
318                     "--task-args \"{}\" ".format(build_task_args(test_name)))
319         logger.debug('running command line : {}'.format(cmd_line))
320         p = subprocess.Popen(cmd_line, stdout=subprocess.PIPE,
321                              stderr=subprocess.STDOUT, shell=True)
322         output = get_cmd_output(p)
323         logger.error("Task validation result:" + "\n" + output)
324         return
325
326     # check for result directory and create it otherwise
327     if not os.path.exists(RESULTS_DIR):
328         logger.debug('%s does not exist, we create it.'.format(RESULTS_DIR))
329         os.makedirs(RESULTS_DIR)
330
331     # write html report file
332     report_file_name = '{}opnfv-{}.html'.format(RESULTS_DIR, test_name)
333     cmd_line = "rally task report {} --out {}".format(task_id,
334                                                       report_file_name)
335
336     logger.debug('running command line : {}'.format(cmd_line))
337     os.popen(cmd_line)
338
339     # get and save rally operation JSON result
340     cmd_line = "rally task results %s" % task_id
341     logger.debug('running command line : {}'.format(cmd_line))
342     cmd = os.popen(cmd_line)
343     json_results = cmd.read()
344     with open('{}opnfv-{}.json'.format(RESULTS_DIR, test_name), 'w') as f:
345         logger.debug('saving json file')
346         f.write(json_results)
347
348     with open('{}opnfv-{}.json'
349               .format(RESULTS_DIR, test_name)) as json_file:
350         json_data = json.load(json_file)
351
352     """ parse JSON operation result """
353     status = "FAIL"
354     if task_succeed(json_results):
355         logger.info('Test scenario: "{}" OK.'.format(test_name) + "\n")
356         status = "PASS"
357     else:
358         logger.info('Test scenario: "{}" Failed.'.format(test_name) + "\n")
359
360     # Push results in payload of testcase
361     if args.report:
362         stop_time = time.time()
363         logger.debug("Push Rally detailed results into DB")
364         functest_utils.push_results_to_db("functest",
365                                           "Rally_details",
366                                           logger,
367                                           start_time,
368                                           stop_time,
369                                           status,
370                                           json_data)
371
372
373 def main():
374     global SUMMARY
375     global network_dict
376     global neutron_client
377
378     nova_client = os_utils.get_nova_client()
379     neutron_client = os_utils.get_neutron_client()
380     glance_client = os_utils.get_glance_client()
381     cinder_client = os_utils.get_cinder_client()
382
383     start_time = time.time()
384     stop_time = start_time
385
386     # configure script
387     if not (args.test_name in tests):
388         logger.error('argument not valid')
389         exit(-1)
390
391     SUMMARY = []
392
393     volume_types = os_utils.list_volume_types(cinder_client,
394                                               private=False)
395     if not volume_types:
396         volume_type = os_utils.create_volume_type(
397             cinder_client, CINDER_VOLUME_TYPE_NAME)
398         if not volume_type:
399             logger.error("Failed to create volume type...")
400             exit(-1)
401         else:
402             logger.debug("Volume type '%s' created succesfully..."
403                          % CINDER_VOLUME_TYPE_NAME)
404     else:
405         logger.debug("Using existing volume type(s)...")
406
407     image_id = os_utils.get_image_id(glance_client, GLANCE_IMAGE_NAME)
408     image_exists = False
409
410     if image_id == '':
411         logger.debug("Creating image '%s' from '%s'..." % (GLANCE_IMAGE_NAME,
412                                                            GLANCE_IMAGE_PATH))
413         image_id = os_utils.create_glance_image(glance_client,
414                                                 GLANCE_IMAGE_NAME,
415                                                 GLANCE_IMAGE_PATH)
416         if not image_id:
417             logger.error("Failed to create the Glance image...")
418             exit(-1)
419         else:
420             logger.debug("Image '%s' with ID '%s' created succesfully ."
421                          % (GLANCE_IMAGE_NAME, image_id))
422     else:
423         logger.debug("Using existing image '%s' with ID '%s'..."
424                      % (GLANCE_IMAGE_NAME, image_id))
425         image_exists = True
426
427     logger.debug("Creating network '%s'..." % PRIVATE_NET_NAME)
428     network_dict = os_utils.create_network_full(neutron_client,
429                                                 PRIVATE_NET_NAME,
430                                                 PRIVATE_SUBNET_NAME,
431                                                 ROUTER_NAME,
432                                                 PRIVATE_SUBNET_CIDR)
433     if not network_dict:
434         logger.error("Failed to create network...")
435         exit(-1)
436     else:
437         if not os_utils.update_neutron_net(neutron_client,
438                                            network_dict['net_id'],
439                                            shared=True):
440             logger.error("Failed to update network...")
441             exit(-1)
442         else:
443             logger.debug("Network '%s' available..." % PRIVATE_NET_NAME)
444
445     if args.test_name == "all":
446         for test_name in tests:
447             if not (test_name == 'all' or
448                     test_name == 'vm'):
449                 run_task(test_name)
450     else:
451         logger.debug("Test name: " + args.test_name)
452         run_task(args.test_name)
453
454     report = ("\n"
455               "                                                              "
456               "\n"
457               "                     Rally Summary Report\n"
458               "\n"
459               "+===================+============+===============+===========+"
460               "\n"
461               "| Module            | Duration   | nb. Test Run  | Success   |"
462               "\n"
463               "+===================+============+===============+===========+"
464               "\n")
465     payload = []
466     stop_time = time.time()
467
468     # for each scenario we draw a row for the table
469     total_duration = 0.0
470     total_nb_tests = 0
471     total_success = 0.0
472     for s in SUMMARY:
473         name = "{0:<17}".format(s['test_name'])
474         duration = float(s['overall_duration'])
475         total_duration += duration
476         duration = time.strftime("%M:%S", time.gmtime(duration))
477         duration = "{0:<10}".format(duration)
478         nb_tests = "{0:<13}".format(s['nb_tests'])
479         total_nb_tests += int(s['nb_tests'])
480         success = "{0:<10}".format(str(s['success']) + '%')
481         total_success += float(s['success'])
482         report += ("" +
483                    "| " + name + " | " + duration + " | " +
484                    nb_tests + " | " + success + "|\n" +
485                    "+-------------------+------------"
486                    "+---------------+-----------+\n")
487         payload.append({'module': name,
488                         'details': {'duration': s['overall_duration'],
489                                     'nb tests': s['nb_tests'],
490                                     'success': s['success']}})
491
492     total_duration_str = time.strftime("%H:%M:%S", time.gmtime(total_duration))
493     total_duration_str2 = "{0:<10}".format(total_duration_str)
494     total_nb_tests_str = "{0:<13}".format(total_nb_tests)
495     total_success = "{:0.2f}".format(total_success / len(SUMMARY))
496     total_success_str = "{0:<10}".format(str(total_success) + '%')
497     report += "+===================+============+===============+===========+"
498     report += "\n"
499     report += ("| TOTAL:            | " + total_duration_str2 + " | " +
500                total_nb_tests_str + " | " + total_success_str + "|\n")
501     report += "+===================+============+===============+===========+"
502     report += "\n"
503
504     logger.info("\n" + report)
505     payload.append({'summary': {'duration': total_duration,
506                                 'nb tests': total_nb_tests,
507                                 'nb success': total_success}})
508
509     # Generate json results for DB
510     # json_results = {"timestart": time_start, "duration": total_duration,
511     #                "tests": int(total_nb_tests),
512     #                "success": int(total_success)}
513     # logger.info("Results: "+str(json_results))
514
515     # Evaluation of the success criteria
516     status = "FAIL"
517     # for Rally we decided that the overall success rate must be above 90%
518     if total_success >= 90:
519         status = "PASS"
520
521     if args.sanity:
522         case_name = "rally_sanity"
523     else:
524         case_name = "rally_full"
525
526     if args.report:
527         logger.debug("Pushing Rally summary into DB...")
528         functest_utils.push_results_to_db("functest",
529                                           case_name,
530                                           None,
531                                           start_time,
532                                           stop_time,
533                                           status,
534                                           payload)
535     if args.noclean:
536         exit(0)
537
538     if not image_exists:
539         logger.debug("Deleting image '%s' with ID '%s'..."
540                      % (GLANCE_IMAGE_NAME, image_id))
541         if not os_utils.delete_glance_image(nova_client, image_id):
542             logger.error("Error deleting the glance image")
543
544     if not volume_types:
545         logger.debug("Deleting volume type '%s'..."
546                      % CINDER_VOLUME_TYPE_NAME)
547         if not os_utils.delete_volume_type(cinder_client, volume_type):
548             logger.error("Error in deleting volume type...")
549
550
551 if __name__ == '__main__':
552     main()