Adapt functest testcase to APi refactoring
[functest.git] / testcases / OpenStack / rally / run_rally-cert.py
1 #!/usr/bin/env python
2 #
3 # Copyright (c) 2015 Orange
4 # guyrodrigue.koffi@orange.com
5 # morgan.richomme@orange.com
6 # All rights reserved. This program and the accompanying materials
7 # are made available under the terms of the Apache License, Version 2.0
8 # which accompanies this distribution, and is available at
9 # http://www.apache.org/licenses/LICENSE-2.0
10 #
11 # 0.1 (05/2015) initial commit
12 # 0.2 (28/09/2015) extract Tempest, format json result, add ceilometer suite
13 # 0.3 (19/10/2015) remove Tempest from run_rally
14 # and push result into test DB
15 #
16 import argparse
17 import iniparse
18 import json
19 import os
20 import re
21 import subprocess
22 import time
23 import yaml
24
25 from novaclient import client as novaclient
26 from glanceclient import client as glanceclient
27 from keystoneclient.v2_0 import client as keystoneclient
28 from neutronclient.v2_0 import client as neutronclient
29 from cinderclient import client as cinderclient
30
31 import functest.utils.functest_logger as ft_logger
32 import functest.utils.functest_utils as functest_utils
33 import functest.utils.openstack_utils as openstack_utils
34
35 """ tests configuration """
36 tests = ['authenticate', 'glance', 'cinder', 'heat', 'keystone',
37          'neutron', 'nova', 'quotas', 'requests', 'vm', 'all']
38 parser = argparse.ArgumentParser()
39 parser.add_argument("test_name",
40                     help="Module name to be tested. "
41                          "Possible values are : "
42                          "[ {d[0]} | {d[1]} | {d[2]} | {d[3]} | {d[4]} | "
43                          "{d[5]} | {d[6]} | {d[7]} | {d[8]} | {d[9]} | "
44                          "{d[10]} ] "
45                          "The 'all' value "
46                          "performs all possible test scenarios"
47                          .format(d=tests))
48
49 parser.add_argument("-d", "--debug", help="Debug mode", action="store_true")
50 parser.add_argument("-r", "--report",
51                     help="Create json result file",
52                     action="store_true")
53 parser.add_argument("-s", "--smoke",
54                     help="Smoke test mode",
55                     action="store_true")
56 parser.add_argument("-v", "--verbose",
57                     help="Print verbose info about the progress",
58                     action="store_true")
59 parser.add_argument("-n", "--noclean",
60                     help="Don't clean the created resources for this test.",
61                     action="store_true")
62 parser.add_argument("-z", "--sanity",
63                     help="Sanity test mode, execute only a subset of tests",
64                     action="store_true")
65
66 args = parser.parse_args()
67
68 client_dict = {}
69 network_dict = {}
70
71 if args.verbose:
72     RALLY_STDERR = subprocess.STDOUT
73 else:
74     RALLY_STDERR = open(os.devnull, 'w')
75
76 """ logging configuration """
77 logger = ft_logger.Logger("run_rally").getLogger()
78
79 REPO_PATH = os.environ['repos_dir'] + '/functest/'
80 if not os.path.exists(REPO_PATH):
81     logger.error("Functest repository directory not found '%s'" % REPO_PATH)
82     exit(-1)
83
84
85 with open(os.environ["CONFIG_FUNCTEST_YAML"]) as f:
86     functest_yaml = yaml.safe_load(f)
87 f.close()
88
89 HOME = os.environ['HOME'] + "/"
90 RALLY_DIR = REPO_PATH + functest_yaml.get("general").get(
91     "directories").get("dir_rally")
92 TEMPLATE_DIR = RALLY_DIR + "scenario/templates"
93 SUPPORT_DIR = RALLY_DIR + "scenario/support"
94
95 FLAVOR_NAME = "m1.tiny"
96 USERS_AMOUNT = 2
97 TENANTS_AMOUNT = 3
98 ITERATIONS_AMOUNT = 10
99 CONCURRENCY = 4
100
101 RESULTS_DIR = functest_yaml.get("general").get("directories").get(
102     "dir_rally_res")
103 TEMPEST_CONF_FILE = functest_yaml.get("general").get("directories").get(
104     "dir_results") + '/tempest/tempest.conf'
105 TEST_DB = functest_yaml.get("results").get("test_db_url")
106
107 PRIVATE_NET_NAME = functest_yaml.get("rally").get("network_name")
108 PRIVATE_SUBNET_NAME = functest_yaml.get("rally").get("subnet_name")
109 PRIVATE_SUBNET_CIDR = functest_yaml.get("rally").get("subnet_cidr")
110 ROUTER_NAME = functest_yaml.get("rally").get("router_name")
111
112 GLANCE_IMAGE_NAME = functest_yaml.get("general").get("openstack").get(
113     "image_name")
114 GLANCE_IMAGE_FILENAME = functest_yaml.get("general").get("openstack").get(
115     "image_file_name")
116 GLANCE_IMAGE_FORMAT = functest_yaml.get("general").get("openstack").get(
117     "image_disk_format")
118 GLANCE_IMAGE_PATH = functest_yaml.get("general").get("directories").get(
119     "dir_functest_data") + "/" + GLANCE_IMAGE_FILENAME
120
121 CINDER_VOLUME_TYPE_NAME = "volume_test"
122
123
124 SUMMARY = []
125
126
127 def get_task_id(cmd_raw):
128     """
129     get task id from command rally result
130     :param cmd_raw:
131     :return: task_id as string
132     """
133     taskid_re = re.compile('^Task +(.*): started$')
134     for line in cmd_raw.splitlines(True):
135         line = line.strip()
136         match = taskid_re.match(line)
137         if match:
138             return match.group(1)
139     return None
140
141
142 def task_succeed(json_raw):
143     """
144     Parse JSON from rally JSON results
145     :param json_raw:
146     :return: Bool
147     """
148     rally_report = json.loads(json_raw)
149     for report in rally_report:
150         if report is None or report.get('result') is None:
151             return False
152
153         for result in report.get('result'):
154             if result is None or len(result.get('error')) > 0:
155                 return False
156
157     return True
158
159
160 def live_migration_supported():
161     config = iniparse.ConfigParser()
162     if (config.read(TEMPEST_CONF_FILE) and
163             config.has_section('compute-feature-enabled') and
164             config.has_option('compute-feature-enabled', 'live_migration')):
165         return config.getboolean('compute-feature-enabled', 'live_migration')
166
167     return False
168
169
170 def build_task_args(test_file_name):
171     task_args = {'service_list': [test_file_name]}
172     task_args['image_name'] = GLANCE_IMAGE_NAME
173     task_args['flavor_name'] = FLAVOR_NAME
174     task_args['glance_image_location'] = GLANCE_IMAGE_PATH
175     task_args['tmpl_dir'] = TEMPLATE_DIR
176     task_args['sup_dir'] = SUPPORT_DIR
177     task_args['users_amount'] = USERS_AMOUNT
178     task_args['tenants_amount'] = TENANTS_AMOUNT
179     task_args['iterations'] = ITERATIONS_AMOUNT
180     task_args['concurrency'] = CONCURRENCY
181
182     if args.sanity:
183         task_args['full_mode'] = False
184         task_args['smoke'] = True
185     else:
186         task_args['full_mode'] = True
187         task_args['smoke'] = args.smoke
188
189     ext_net = openstack_utils.get_external_net(client_dict['neutron'])
190     if ext_net:
191         task_args['floating_network'] = str(ext_net)
192     else:
193         task_args['floating_network'] = ''
194
195     net_id = network_dict['net_id']
196     task_args['netid'] = str(net_id)
197     task_args['live_migration'] = live_migration_supported()
198
199     return task_args
200
201
202 def get_output(proc, test_name):
203     global SUMMARY
204     result = ""
205     nb_tests = 0
206     overall_duration = 0.0
207     success = 0.0
208     nb_totals = 0
209
210     while proc.poll() is None:
211         line = proc.stdout.readline()
212         if args.verbose:
213             result += line
214         else:
215             if ("Load duration" in line or
216                     "started" in line or
217                     "finished" in line or
218                     " Preparing" in line or
219                     "+-" in line or
220                     "|" in line):
221                 result += line
222             elif "test scenario" in line:
223                 result += "\n" + line
224             elif "Full duration" in line:
225                 result += line + "\n\n"
226
227         # parse output for summary report
228         if ("| " in line and
229                 "| action" not in line and
230                 "| Starting" not in line and
231                 "| Completed" not in line and
232                 "| ITER" not in line and
233                 "|   " not in line and
234                 "| total" not in line):
235             nb_tests += 1
236         elif "| total" in line:
237             percentage = ((line.split('|')[8]).strip(' ')).strip('%')
238             try:
239                 success += float(percentage)
240             except ValueError:
241                 logger.info('Percentage error: %s, %s' % (percentage, line))
242             nb_totals += 1
243         elif "Full duration" in line:
244             duration = line.split(': ')[1]
245             try:
246                 overall_duration += float(duration)
247             except ValueError:
248                 logger.info('Duration error: %s, %s' % (duration, line))
249
250     overall_duration = "{:10.2f}".format(overall_duration)
251     if nb_totals == 0:
252         success_avg = 0
253     else:
254         success_avg = "{:0.2f}".format(success / nb_totals)
255
256     scenario_summary = {'test_name': test_name,
257                         'overall_duration': overall_duration,
258                         'nb_tests': nb_tests,
259                         'success': success_avg}
260     SUMMARY.append(scenario_summary)
261
262     logger.info("\n" + result)
263
264     return result
265
266
267 def get_cmd_output(proc):
268     result = ""
269
270     while proc.poll() is None:
271         line = proc.stdout.readline()
272         result += line
273
274     return result
275
276
277 def run_task(test_name):
278     #
279     # the "main" function of the script who launch rally for a task
280     # :param test_name: name for the rally test
281     # :return: void
282     #
283     global SUMMARY
284     logger.info('Starting test scenario "{}" ...'.format(test_name))
285     start_time = time.time()
286     stop_time = start_time
287
288     task_file = '{}task.yaml'.format(RALLY_DIR)
289     if not os.path.exists(task_file):
290         logger.error("Task file '%s' does not exist." % task_file)
291         exit(-1)
292
293     test_file_name = '{}opnfv-{}.yaml'.format(RALLY_DIR + "scenario/",
294                                               test_name)
295     if not os.path.exists(test_file_name):
296         logger.error("The scenario '%s' does not exist." % test_file_name)
297         exit(-1)
298
299     logger.debug('Scenario fetched from : {}'.format(test_file_name))
300
301     cmd_line = ("rally task start --abort-on-sla-failure " +
302                 "--task {} ".format(task_file) +
303                 "--task-args \"{}\" ".format(build_task_args(test_name)))
304     logger.debug('running command line : {}'.format(cmd_line))
305
306     p = subprocess.Popen(cmd_line, stdout=subprocess.PIPE,
307                          stderr=RALLY_STDERR, shell=True)
308     output = get_output(p, test_name)
309     task_id = get_task_id(output)
310     logger.debug('task_id : {}'.format(task_id))
311
312     if task_id is None:
313         logger.error('Failed to retrieve task_id, validating task...')
314         cmd_line = ("rally task validate " +
315                     "--task {} ".format(task_file) +
316                     "--task-args \"{}\" ".format(build_task_args(test_name)))
317         logger.debug('running command line : {}'.format(cmd_line))
318         p = subprocess.Popen(cmd_line, stdout=subprocess.PIPE,
319                              stderr=subprocess.STDOUT, shell=True)
320         output = get_cmd_output(p)
321         logger.error("Task validation result:" + "\n" + output)
322         return
323
324     # check for result directory and create it otherwise
325     if not os.path.exists(RESULTS_DIR):
326         logger.debug('%s does not exist, we create it.'.format(RESULTS_DIR))
327         os.makedirs(RESULTS_DIR)
328
329     # write html report file
330     report_file_name = '{}opnfv-{}.html'.format(RESULTS_DIR, test_name)
331     cmd_line = "rally task report {} --out {}".format(task_id,
332                                                       report_file_name)
333
334     logger.debug('running command line : {}'.format(cmd_line))
335     os.popen(cmd_line)
336
337     # get and save rally operation JSON result
338     cmd_line = "rally task results %s" % task_id
339     logger.debug('running command line : {}'.format(cmd_line))
340     cmd = os.popen(cmd_line)
341     json_results = cmd.read()
342     with open('{}opnfv-{}.json'.format(RESULTS_DIR, test_name), 'w') as f:
343         logger.debug('saving json file')
344         f.write(json_results)
345
346     with open('{}opnfv-{}.json'
347               .format(RESULTS_DIR, test_name)) as json_file:
348         json_data = json.load(json_file)
349
350     """ parse JSON operation result """
351     status = "failed"
352     if task_succeed(json_results):
353         logger.info('Test scenario: "{}" OK.'.format(test_name) + "\n")
354         status = "passed"
355     else:
356         logger.info('Test scenario: "{}" Failed.'.format(test_name) + "\n")
357
358     # Push results in payload of testcase
359     if args.report:
360         stop_time = time.time()
361         logger.debug("Push Rally detailed results into DB")
362         functest_utils.push_results_to_db("functest",
363                                           "Rally_details",
364                                           logger,
365                                           start_time,
366                                           stop_time,
367                                           status,
368                                           json_data)
369
370
371 def main():
372     global SUMMARY
373     global network_dict
374     start_time = time.time()
375     stop_time = start_time
376
377     # configure script
378     if not (args.test_name in tests):
379         logger.error('argument not valid')
380         exit(-1)
381
382     SUMMARY = []
383     creds_nova = openstack_utils.get_credentials("nova")
384     nova_client = novaclient.Client('2', **creds_nova)
385     creds_neutron = openstack_utils.get_credentials("neutron")
386     neutron_client = neutronclient.Client(**creds_neutron)
387     creds_keystone = openstack_utils.get_credentials("keystone")
388     keystone_client = keystoneclient.Client(**creds_keystone)
389     glance_endpoint = keystone_client.service_catalog.url_for(
390         service_type='image', endpoint_type='publicURL')
391     glance_client = glanceclient.Client(1, glance_endpoint,
392                                         token=keystone_client.auth_token)
393     creds_cinder = openstack_utils.get_credentials("cinder")
394     cinder_client = cinderclient.Client('2', creds_cinder['username'],
395                                         creds_cinder['api_key'],
396                                         creds_cinder['project_id'],
397                                         creds_cinder['auth_url'],
398                                         service_type="volume")
399
400     client_dict['neutron'] = neutron_client
401
402     volume_types = openstack_utils.list_volume_types(cinder_client,
403                                                      private=False)
404     if not volume_types:
405         volume_type = openstack_utils.create_volume_type(
406             cinder_client, CINDER_VOLUME_TYPE_NAME)
407         if not volume_type:
408             logger.error("Failed to create volume type...")
409             exit(-1)
410         else:
411             logger.debug("Volume type '%s' created succesfully..."
412                          % CINDER_VOLUME_TYPE_NAME)
413     else:
414         logger.debug("Using existing volume type(s)...")
415
416     image_id = openstack_utils.get_image_id(glance_client, GLANCE_IMAGE_NAME)
417     image_exists = False
418
419     if image_id == '':
420         logger.debug("Creating image '%s' from '%s'..." % (GLANCE_IMAGE_NAME,
421                                                            GLANCE_IMAGE_PATH))
422         image_id = openstack_utils.create_glance_image(glance_client,
423                                                        GLANCE_IMAGE_NAME,
424                                                        GLANCE_IMAGE_PATH)
425         if not image_id:
426             logger.error("Failed to create the Glance image...")
427             exit(-1)
428         else:
429             logger.debug("Image '%s' with ID '%s' created succesfully ."
430                          % (GLANCE_IMAGE_NAME, image_id))
431     else:
432         logger.debug("Using existing image '%s' with ID '%s'..."
433                      % (GLANCE_IMAGE_NAME, image_id))
434         image_exists = True
435
436     logger.debug("Creating network '%s'..." % PRIVATE_NET_NAME)
437     network_dict = openstack_utils.create_network_full(logger,
438                                                        client_dict['neutron'],
439                                                        PRIVATE_NET_NAME,
440                                                        PRIVATE_SUBNET_NAME,
441                                                        ROUTER_NAME,
442                                                        PRIVATE_SUBNET_CIDR)
443     if not network_dict:
444         logger.error("Failed to create network...")
445         exit(-1)
446     else:
447         if not openstack_utils.update_neutron_net(client_dict['neutron'],
448                                                   network_dict['net_id'],
449                                                   shared=True):
450             logger.error("Failed to update network...")
451             exit(-1)
452         else:
453             logger.debug("Network '%s' available..." % PRIVATE_NET_NAME)
454
455     if args.test_name == "all":
456         for test_name in tests:
457             if not (test_name == 'all' or
458                     test_name == 'vm'):
459                 run_task(test_name)
460     else:
461         logger.debug("Test name: " + args.test_name)
462         run_task(args.test_name)
463
464     report = ("\n"
465               "                                                              "
466               "\n"
467               "                     Rally Summary Report\n"
468               "\n"
469               "+===================+============+===============+===========+"
470               "\n"
471               "| Module            | Duration   | nb. Test Run  | Success   |"
472               "\n"
473               "+===================+============+===============+===========+"
474               "\n")
475     payload = []
476     stop_time = time.time()
477
478     # for each scenario we draw a row for the table
479     total_duration = 0.0
480     total_nb_tests = 0
481     total_success = 0.0
482     for s in SUMMARY:
483         name = "{0:<17}".format(s['test_name'])
484         duration = float(s['overall_duration'])
485         total_duration += duration
486         duration = time.strftime("%M:%S", time.gmtime(duration))
487         duration = "{0:<10}".format(duration)
488         nb_tests = "{0:<13}".format(s['nb_tests'])
489         total_nb_tests += int(s['nb_tests'])
490         success = "{0:<10}".format(str(s['success']) + '%')
491         total_success += float(s['success'])
492         report += ("" +
493                    "| " + name + " | " + duration + " | " +
494                    nb_tests + " | " + success + "|\n" +
495                    "+-------------------+------------"
496                    "+---------------+-----------+\n")
497         payload.append({'module': name,
498                         'details': {'duration': s['overall_duration'],
499                                     'nb tests': s['nb_tests'],
500                                     'success': s['success']}})
501
502     total_duration_str = time.strftime("%H:%M:%S", time.gmtime(total_duration))
503     total_duration_str2 = "{0:<10}".format(total_duration_str)
504     total_nb_tests_str = "{0:<13}".format(total_nb_tests)
505     total_success = "{:0.2f}".format(total_success / len(SUMMARY))
506     total_success_str = "{0:<10}".format(str(total_success) + '%')
507     report += "+===================+============+===============+===========+"
508     report += "\n"
509     report += ("| TOTAL:            | " + total_duration_str2 + " | " +
510                total_nb_tests_str + " | " + total_success_str + "|\n")
511     report += "+===================+============+===============+===========+"
512     report += "\n"
513
514     logger.info("\n" + report)
515     payload.append({'summary': {'duration': total_duration,
516                                 'nb tests': total_nb_tests,
517                                 'nb success': total_success}})
518
519     # Generate json results for DB
520     # json_results = {"timestart": time_start, "duration": total_duration,
521     #                "tests": int(total_nb_tests),
522     #                "success": int(total_success)}
523     # logger.info("Results: "+str(json_results))
524
525     # Evaluation of the success criteria
526     status = "failed"
527     # for Rally we decided that the overall success rate must be above 90%
528     if total_success >= 90:
529         status = "passed"
530
531     if args.report:
532         logger.debug("Pushing Rally summary into DB...")
533         functest_utils.push_results_to_db("functest",
534                                           "Rally",
535                                           logger,
536                                           start_time,
537                                           stop_time,
538                                           status,
539                                           payload)
540     if args.noclean:
541         exit(0)
542
543     if not image_exists:
544         logger.debug("Deleting image '%s' with ID '%s'..."
545                      % (GLANCE_IMAGE_NAME, image_id))
546         if not openstack_utils.delete_glance_image(nova_client, image_id):
547             logger.error("Error deleting the glance image")
548
549     if not volume_types:
550         logger.debug("Deleting volume type '%s'..."
551                      % CINDER_VOLUME_TYPE_NAME)
552         if not openstack_utils.delete_volume_type(cinder_client, volume_type):
553             logger.error("Error in deleting volume type...")
554
555
556 if __name__ == '__main__':
557     main()