7822c7e9ebb23f6b0b8c0fb80ffde46f79b7b8c7
[functest.git] / testcases / OpenStack / rally / run_rally-cert.py
1 #!/usr/bin/env python
2 #
3 # Copyright (c) 2015 Orange
4 # guyrodrigue.koffi@orange.com
5 # morgan.richomme@orange.com
6 # All rights reserved. This program and the accompanying materials
7 # are made available under the terms of the Apache License, Version 2.0
8 # which accompanies this distribution, and is available at
9 # http://www.apache.org/licenses/LICENSE-2.0
10 #
11 # 0.1 (05/2015) initial commit
12 # 0.2 (28/09/2015) extract Tempest, format json result, add ceilometer suite
13 # 0.3 (19/10/2015) remove Tempest from run_rally
14 # and push result into test DB
15 #
16 import argparse
17 import iniparse
18 import json
19 import os
20 import re
21 import subprocess
22 import time
23 import yaml
24
25 from novaclient import client as novaclient
26 from glanceclient import client as glanceclient
27 from keystoneclient.v2_0 import client as keystoneclient
28 from neutronclient.v2_0 import client as neutronclient
29 from cinderclient import client as cinderclient
30
31 import functest.utils.functest_logger as ft_logger
32 import functest.utils.functest_utils as functest_utils
33 import functest.utils.openstack_utils as openstack_utils
34
35 """ tests configuration """
36 tests = ['authenticate', 'glance', 'cinder', 'heat', 'keystone',
37          'neutron', 'nova', 'quotas', 'requests', 'vm', 'all']
38 parser = argparse.ArgumentParser()
39 parser.add_argument("test_name",
40                     help="Module name to be tested. "
41                          "Possible values are : "
42                          "[ {d[0]} | {d[1]} | {d[2]} | {d[3]} | {d[4]} | "
43                          "{d[5]} | {d[6]} | {d[7]} | {d[8]} | {d[9]} | "
44                          "{d[10]} ] "
45                          "The 'all' value "
46                          "performs all possible test scenarios"
47                          .format(d=tests))
48
49 parser.add_argument("-d", "--debug", help="Debug mode", action="store_true")
50 parser.add_argument("-r", "--report",
51                     help="Create json result file",
52                     action="store_true")
53 parser.add_argument("-s", "--smoke",
54                     help="Smoke test mode",
55                     action="store_true")
56 parser.add_argument("-v", "--verbose",
57                     help="Print verbose info about the progress",
58                     action="store_true")
59 parser.add_argument("-n", "--noclean",
60                     help="Don't clean the created resources for this test.",
61                     action="store_true")
62 parser.add_argument("-z", "--sanity",
63                     help="Sanity test mode, execute only a subset of tests",
64                     action="store_true")
65
66 args = parser.parse_args()
67
68 client_dict = {}
69 network_dict = {}
70
71 if args.verbose:
72     RALLY_STDERR = subprocess.STDOUT
73 else:
74     RALLY_STDERR = open(os.devnull, 'w')
75
76 """ logging configuration """
77 logger = ft_logger.Logger("run_rally").getLogger()
78
79 REPO_PATH = os.environ['repos_dir'] + '/functest/'
80 if not os.path.exists(REPO_PATH):
81     logger.error("Functest repository directory not found '%s'" % REPO_PATH)
82     exit(-1)
83
84
85 with open(os.environ["CONFIG_FUNCTEST_YAML"]) as f:
86     functest_yaml = yaml.safe_load(f)
87 f.close()
88
89 HOME = os.environ['HOME'] + "/"
90 RALLY_DIR = REPO_PATH + functest_yaml.get("general").get(
91     "directories").get("dir_rally")
92 TEMPLATE_DIR = RALLY_DIR + "scenario/templates"
93 SUPPORT_DIR = RALLY_DIR + "scenario/support"
94
95 FLAVOR_NAME = "m1.tiny"
96 USERS_AMOUNT = 2
97 TENANTS_AMOUNT = 3
98 ITERATIONS_AMOUNT = 10
99 CONCURRENCY = 4
100
101 RESULTS_DIR = functest_yaml.get("general").get("directories").get(
102     "dir_rally_res")
103 TEMPEST_CONF_FILE = functest_yaml.get("general").get("directories").get(
104     "dir_results") + '/tempest/tempest.conf'
105 TEST_DB = functest_yaml.get("results").get("test_db_url")
106
107 PRIVATE_NET_NAME = functest_yaml.get("rally").get("network_name")
108 PRIVATE_SUBNET_NAME = functest_yaml.get("rally").get("subnet_name")
109 PRIVATE_SUBNET_CIDR = functest_yaml.get("rally").get("subnet_cidr")
110 ROUTER_NAME = functest_yaml.get("rally").get("router_name")
111
112 GLANCE_IMAGE_NAME = functest_yaml.get("general").get("openstack").get(
113     "image_name")
114 GLANCE_IMAGE_FILENAME = functest_yaml.get("general").get("openstack").get(
115     "image_file_name")
116 GLANCE_IMAGE_FORMAT = functest_yaml.get("general").get("openstack").get(
117     "image_disk_format")
118 GLANCE_IMAGE_PATH = functest_yaml.get("general").get("directories").get(
119     "dir_functest_data") + "/" + GLANCE_IMAGE_FILENAME
120
121 CINDER_VOLUME_TYPE_NAME = "volume_test"
122
123
124 SUMMARY = []
125
126
127 def get_task_id(cmd_raw):
128     """
129     get task id from command rally result
130     :param cmd_raw:
131     :return: task_id as string
132     """
133     taskid_re = re.compile('^Task +(.*): started$')
134     for line in cmd_raw.splitlines(True):
135         line = line.strip()
136         match = taskid_re.match(line)
137         if match:
138             return match.group(1)
139     return None
140
141
142 def task_succeed(json_raw):
143     """
144     Parse JSON from rally JSON results
145     :param json_raw:
146     :return: Bool
147     """
148     rally_report = json.loads(json_raw)
149     for report in rally_report:
150         if report is None or report.get('result') is None:
151             return False
152
153         for result in report.get('result'):
154             if result is None or len(result.get('error')) > 0:
155                 return False
156
157     return True
158
159
160 def live_migration_supported():
161     config = iniparse.ConfigParser()
162     if (config.read(TEMPEST_CONF_FILE) and
163             config.has_section('compute-feature-enabled') and
164             config.has_option('compute-feature-enabled', 'live_migration')):
165         return config.getboolean('compute-feature-enabled', 'live_migration')
166
167     return False
168
169
170 def build_task_args(test_file_name):
171     task_args = {'service_list': [test_file_name]}
172     task_args['image_name'] = GLANCE_IMAGE_NAME
173     task_args['flavor_name'] = FLAVOR_NAME
174     task_args['glance_image_location'] = GLANCE_IMAGE_PATH
175     task_args['tmpl_dir'] = TEMPLATE_DIR
176     task_args['sup_dir'] = SUPPORT_DIR
177     task_args['users_amount'] = USERS_AMOUNT
178     task_args['tenants_amount'] = TENANTS_AMOUNT
179     task_args['use_existing_users'] = False
180     task_args['iterations'] = ITERATIONS_AMOUNT
181     task_args['concurrency'] = CONCURRENCY
182
183     if args.sanity:
184         task_args['full_mode'] = False
185         task_args['smoke'] = True
186     else:
187         task_args['full_mode'] = True
188         task_args['smoke'] = args.smoke
189
190     ext_net = openstack_utils.get_external_net(client_dict['neutron'])
191     if ext_net:
192         task_args['floating_network'] = str(ext_net)
193     else:
194         task_args['floating_network'] = ''
195
196     net_id = network_dict['net_id']
197     task_args['netid'] = str(net_id)
198     task_args['live_migration'] = live_migration_supported()
199
200     auth_url = os.getenv('OS_AUTH_URL')
201     if auth_url is not None:
202         task_args['request_url'] = auth_url.rsplit(":", 1)[0]
203     else:
204         task_args['request_url'] = ''
205
206     return task_args
207
208
209 def get_output(proc, test_name):
210     global SUMMARY
211     result = ""
212     nb_tests = 0
213     overall_duration = 0.0
214     success = 0.0
215     nb_totals = 0
216
217     while proc.poll() is None:
218         line = proc.stdout.readline()
219         if args.verbose:
220             result += line
221         else:
222             if ("Load duration" in line or
223                     "started" in line or
224                     "finished" in line or
225                     " Preparing" in line or
226                     "+-" in line or
227                     "|" in line):
228                 result += line
229             elif "test scenario" in line:
230                 result += "\n" + line
231             elif "Full duration" in line:
232                 result += line + "\n\n"
233
234         # parse output for summary report
235         if ("| " in line and
236                 "| action" not in line and
237                 "| Starting" not in line and
238                 "| Completed" not in line and
239                 "| ITER" not in line and
240                 "|   " not in line and
241                 "| total" not in line):
242             nb_tests += 1
243         elif "| total" in line:
244             percentage = ((line.split('|')[8]).strip(' ')).strip('%')
245             try:
246                 success += float(percentage)
247             except ValueError:
248                 logger.info('Percentage error: %s, %s' % (percentage, line))
249             nb_totals += 1
250         elif "Full duration" in line:
251             duration = line.split(': ')[1]
252             try:
253                 overall_duration += float(duration)
254             except ValueError:
255                 logger.info('Duration error: %s, %s' % (duration, line))
256
257     overall_duration = "{:10.2f}".format(overall_duration)
258     if nb_totals == 0:
259         success_avg = 0
260     else:
261         success_avg = "{:0.2f}".format(success / nb_totals)
262
263     scenario_summary = {'test_name': test_name,
264                         'overall_duration': overall_duration,
265                         'nb_tests': nb_tests,
266                         'success': success_avg}
267     SUMMARY.append(scenario_summary)
268
269     logger.debug("\n" + result)
270
271     return result
272
273
274 def get_cmd_output(proc):
275     result = ""
276
277     while proc.poll() is None:
278         line = proc.stdout.readline()
279         result += line
280
281     return result
282
283
284 def run_task(test_name):
285     #
286     # the "main" function of the script who launch rally for a task
287     # :param test_name: name for the rally test
288     # :return: void
289     #
290     global SUMMARY
291     logger.info('Starting test scenario "{}" ...'.format(test_name))
292     start_time = time.time()
293     stop_time = start_time
294
295     task_file = '{}task.yaml'.format(RALLY_DIR)
296     if not os.path.exists(task_file):
297         logger.error("Task file '%s' does not exist." % task_file)
298         exit(-1)
299
300     test_file_name = '{}opnfv-{}.yaml'.format(RALLY_DIR + "scenario/",
301                                               test_name)
302     if not os.path.exists(test_file_name):
303         logger.error("The scenario '%s' does not exist." % test_file_name)
304         exit(-1)
305
306     logger.debug('Scenario fetched from : {}'.format(test_file_name))
307
308     cmd_line = ("rally task start --abort-on-sla-failure " +
309                 "--task {} ".format(task_file) +
310                 "--task-args \"{}\" ".format(build_task_args(test_name)))
311     logger.debug('running command line : {}'.format(cmd_line))
312
313     p = subprocess.Popen(cmd_line, stdout=subprocess.PIPE,
314                          stderr=RALLY_STDERR, shell=True)
315     output = get_output(p, test_name)
316     task_id = get_task_id(output)
317     logger.debug('task_id : {}'.format(task_id))
318
319     if task_id is None:
320         logger.error('Failed to retrieve task_id, validating task...')
321         cmd_line = ("rally task validate " +
322                     "--task {} ".format(task_file) +
323                     "--task-args \"{}\" ".format(build_task_args(test_name)))
324         logger.debug('running command line : {}'.format(cmd_line))
325         p = subprocess.Popen(cmd_line, stdout=subprocess.PIPE,
326                              stderr=subprocess.STDOUT, shell=True)
327         output = get_cmd_output(p)
328         logger.error("Task validation result:" + "\n" + output)
329         return
330
331     # check for result directory and create it otherwise
332     if not os.path.exists(RESULTS_DIR):
333         logger.debug('%s does not exist, we create it.'.format(RESULTS_DIR))
334         os.makedirs(RESULTS_DIR)
335
336     # write html report file
337     report_file_name = '{}opnfv-{}.html'.format(RESULTS_DIR, test_name)
338     cmd_line = "rally task report {} --out {}".format(task_id,
339                                                       report_file_name)
340
341     logger.debug('running command line : {}'.format(cmd_line))
342     os.popen(cmd_line)
343
344     # get and save rally operation JSON result
345     cmd_line = "rally task results %s" % task_id
346     logger.debug('running command line : {}'.format(cmd_line))
347     cmd = os.popen(cmd_line)
348     json_results = cmd.read()
349     with open('{}opnfv-{}.json'.format(RESULTS_DIR, test_name), 'w') as f:
350         logger.debug('saving json file')
351         f.write(json_results)
352
353     with open('{}opnfv-{}.json'
354               .format(RESULTS_DIR, test_name)) as json_file:
355         json_data = json.load(json_file)
356
357     """ parse JSON operation result """
358     status = "failed"
359     if task_succeed(json_results):
360         logger.info('Test scenario: "{}" OK.'.format(test_name) + "\n")
361         status = "passed"
362     else:
363         logger.info('Test scenario: "{}" Failed.'.format(test_name) + "\n")
364
365     # Push results in payload of testcase
366     if args.report:
367         stop_time = time.time()
368         logger.debug("Push Rally detailed results into DB")
369         functest_utils.push_results_to_db("functest",
370                                           "Rally_details",
371                                           logger,
372                                           start_time,
373                                           stop_time,
374                                           status,
375                                           json_data)
376
377
378 def main():
379     global SUMMARY
380     global network_dict
381     start_time = time.time()
382     stop_time = start_time
383
384     # configure script
385     if not (args.test_name in tests):
386         logger.error('argument not valid')
387         exit(-1)
388
389     SUMMARY = []
390     creds_nova = openstack_utils.get_credentials("nova")
391     nova_client = novaclient.Client('2', **creds_nova)
392     creds_neutron = openstack_utils.get_credentials("neutron")
393     neutron_client = neutronclient.Client(**creds_neutron)
394     creds_keystone = openstack_utils.get_credentials("keystone")
395     keystone_client = keystoneclient.Client(**creds_keystone)
396     glance_endpoint = keystone_client.service_catalog.url_for(
397         service_type='image', endpoint_type='publicURL')
398     glance_client = glanceclient.Client(1, glance_endpoint,
399                                         token=keystone_client.auth_token)
400     creds_cinder = openstack_utils.get_credentials("cinder")
401     cinder_client = cinderclient.Client('2', creds_cinder['username'],
402                                         creds_cinder['api_key'],
403                                         creds_cinder['project_id'],
404                                         creds_cinder['auth_url'],
405                                         service_type="volume")
406
407     client_dict['neutron'] = neutron_client
408
409     volume_types = openstack_utils.list_volume_types(cinder_client,
410                                                      private=False)
411     if not volume_types:
412         volume_type = openstack_utils.create_volume_type(
413             cinder_client, CINDER_VOLUME_TYPE_NAME)
414         if not volume_type:
415             logger.error("Failed to create volume type...")
416             exit(-1)
417         else:
418             logger.debug("Volume type '%s' created succesfully..."
419                          % CINDER_VOLUME_TYPE_NAME)
420     else:
421         logger.debug("Using existing volume type(s)...")
422
423     image_id = openstack_utils.get_image_id(glance_client, GLANCE_IMAGE_NAME)
424     image_exists = False
425
426     if image_id == '':
427         logger.debug("Creating image '%s' from '%s'..." % (GLANCE_IMAGE_NAME,
428                                                            GLANCE_IMAGE_PATH))
429         image_id = openstack_utils.create_glance_image(glance_client,
430                                                        GLANCE_IMAGE_NAME,
431                                                        GLANCE_IMAGE_PATH)
432         if not image_id:
433             logger.error("Failed to create the Glance image...")
434             exit(-1)
435         else:
436             logger.debug("Image '%s' with ID '%s' created succesfully ."
437                          % (GLANCE_IMAGE_NAME, image_id))
438     else:
439         logger.debug("Using existing image '%s' with ID '%s'..."
440                      % (GLANCE_IMAGE_NAME, image_id))
441         image_exists = True
442
443     logger.debug("Creating network '%s'..." % PRIVATE_NET_NAME)
444     network_dict = openstack_utils.create_network_full(logger,
445                                                        client_dict['neutron'],
446                                                        PRIVATE_NET_NAME,
447                                                        PRIVATE_SUBNET_NAME,
448                                                        ROUTER_NAME,
449                                                        PRIVATE_SUBNET_CIDR)
450     if not network_dict:
451         logger.error("Failed to create network...")
452         exit(-1)
453     else:
454         if not openstack_utils.update_neutron_net(client_dict['neutron'],
455                                                   network_dict['net_id'],
456                                                   shared=True):
457             logger.error("Failed to update network...")
458             exit(-1)
459         else:
460             logger.debug("Network '%s' available..." % PRIVATE_NET_NAME)
461
462     if args.test_name == "all":
463         for test_name in tests:
464             if not (test_name == 'all' or
465                     test_name == 'vm'):
466                 run_task(test_name)
467     else:
468         logger.debug("Test name: " + args.test_name)
469         run_task(args.test_name)
470
471     report = ("\n"
472               "                                                              "
473               "\n"
474               "                     Rally Summary Report\n"
475               "\n"
476               "+===================+============+===============+===========+"
477               "\n"
478               "| Module            | Duration   | nb. Test Run  | Success   |"
479               "\n"
480               "+===================+============+===============+===========+"
481               "\n")
482     payload = []
483     stop_time = time.time()
484
485     # for each scenario we draw a row for the table
486     total_duration = 0.0
487     total_nb_tests = 0
488     total_success = 0.0
489     for s in SUMMARY:
490         name = "{0:<17}".format(s['test_name'])
491         duration = float(s['overall_duration'])
492         total_duration += duration
493         duration = time.strftime("%M:%S", time.gmtime(duration))
494         duration = "{0:<10}".format(duration)
495         nb_tests = "{0:<13}".format(s['nb_tests'])
496         total_nb_tests += int(s['nb_tests'])
497         success = "{0:<10}".format(str(s['success']) + '%')
498         total_success += float(s['success'])
499         report += ("" +
500                    "| " + name + " | " + duration + " | " +
501                    nb_tests + " | " + success + "|\n" +
502                    "+-------------------+------------"
503                    "+---------------+-----------+\n")
504         payload.append({'module': name,
505                         'details': {'duration': s['overall_duration'],
506                                     'nb tests': s['nb_tests'],
507                                     'success': s['success']}})
508
509     total_duration_str = time.strftime("%H:%M:%S", time.gmtime(total_duration))
510     total_duration_str2 = "{0:<10}".format(total_duration_str)
511     total_nb_tests_str = "{0:<13}".format(total_nb_tests)
512     total_success = "{:0.2f}".format(total_success / len(SUMMARY))
513     total_success_str = "{0:<10}".format(str(total_success) + '%')
514     report += "+===================+============+===============+===========+"
515     report += "\n"
516     report += ("| TOTAL:            | " + total_duration_str2 + " | " +
517                total_nb_tests_str + " | " + total_success_str + "|\n")
518     report += "+===================+============+===============+===========+"
519     report += "\n"
520
521     logger.info("\n" + report)
522     payload.append({'summary': {'duration': total_duration,
523                                 'nb tests': total_nb_tests,
524                                 'nb success': total_success}})
525
526     # Generate json results for DB
527     # json_results = {"timestart": time_start, "duration": total_duration,
528     #                "tests": int(total_nb_tests),
529     #                "success": int(total_success)}
530     # logger.info("Results: "+str(json_results))
531
532     # Evaluation of the success criteria
533     status = "failed"
534     # for Rally we decided that the overall success rate must be above 90%
535     if total_success >= 90:
536         status = "passed"
537
538     if args.report:
539         logger.debug("Pushing Rally summary into DB...")
540         functest_utils.push_results_to_db("functest",
541                                           "Rally",
542                                           logger,
543                                           start_time,
544                                           stop_time,
545                                           status,
546                                           payload)
547     if args.noclean:
548         exit(0)
549
550     if not image_exists:
551         logger.debug("Deleting image '%s' with ID '%s'..."
552                      % (GLANCE_IMAGE_NAME, image_id))
553         if not openstack_utils.delete_glance_image(nova_client, image_id):
554             logger.error("Error deleting the glance image")
555
556     if not volume_types:
557         logger.debug("Deleting volume type '%s'..."
558                      % CINDER_VOLUME_TYPE_NAME)
559         if not openstack_utils.delete_volume_type(cinder_client, volume_type):
560             logger.error("Error in deleting volume type...")
561
562
563 if __name__ == '__main__':
564     main()