c3dd304ac4c86daf9d23840571add32d6cb30028
[functest.git] / testcases / OpenStack / rally / run_rally-cert.py
1 #!/usr/bin/env python
2 #
3 # Copyright (c) 2015 Orange
4 # guyrodrigue.koffi@orange.com
5 # morgan.richomme@orange.com
6 # All rights reserved. This program and the accompanying materials
7 # are made available under the terms of the Apache License, Version 2.0
8 # which accompanies this distribution, and is available at
9 # http://www.apache.org/licenses/LICENSE-2.0
10 #
11 # 0.1 (05/2015) initial commit
12 # 0.2 (28/09/2015) extract Tempest, format json result, add ceilometer suite
13 # 0.3 (19/10/2015) remove Tempest from run_rally
14 # and push result into test DB
15 #
16 import argparse
17 import iniparse
18 import json
19 import os
20 import re
21 import requests
22 import subprocess
23 import time
24 import yaml
25
26 from novaclient import client as novaclient
27 from glanceclient import client as glanceclient
28 from keystoneclient.v2_0 import client as keystoneclient
29 from neutronclient.v2_0 import client as neutronclient
30 from cinderclient import client as cinderclient
31
32 import functest.utils.functest_logger as ft_logger
33 import functest.utils.functest_utils as functest_utils
34 import functest.utils.openstack_utils as openstack_utils
35
36 """ tests configuration """
37 tests = ['authenticate', 'glance', 'cinder', 'heat', 'keystone',
38          'neutron', 'nova', 'quotas', 'requests', 'vm', 'all']
39 parser = argparse.ArgumentParser()
40 parser.add_argument("test_name",
41                     help="Module name to be tested. "
42                          "Possible values are : "
43                          "[ {d[0]} | {d[1]} | {d[2]} | {d[3]} | {d[4]} | "
44                          "{d[5]} | {d[6]} | {d[7]} | {d[8]} | {d[9]} | "
45                          "{d[10]} ] "
46                          "The 'all' value "
47                          "performs all possible test scenarios"
48                          .format(d=tests))
49
50 parser.add_argument("-d", "--debug", help="Debug mode", action="store_true")
51 parser.add_argument("-r", "--report",
52                     help="Create json result file",
53                     action="store_true")
54 parser.add_argument("-s", "--smoke",
55                     help="Smoke test mode",
56                     action="store_true")
57 parser.add_argument("-v", "--verbose",
58                     help="Print verbose info about the progress",
59                     action="store_true")
60 parser.add_argument("-n", "--noclean",
61                     help="Don't clean the created resources for this test.",
62                     action="store_true")
63 parser.add_argument("-z", "--sanity",
64                     help="Sanity test mode, execute only a subset of tests",
65                     action="store_true")
66
67 args = parser.parse_args()
68
69 client_dict = {}
70 network_dict = {}
71
72 if args.verbose:
73     RALLY_STDERR = subprocess.STDOUT
74 else:
75     RALLY_STDERR = open(os.devnull, 'w')
76
77 """ logging configuration """
78 logger = ft_logger.Logger("run_rally").getLogger()
79
80 REPO_PATH = os.environ['repos_dir'] + '/functest/'
81 if not os.path.exists(REPO_PATH):
82     logger.error("Functest repository directory not found '%s'" % REPO_PATH)
83     exit(-1)
84
85
86 with open(os.environ["CONFIG_FUNCTEST_YAML"]) as f:
87     functest_yaml = yaml.safe_load(f)
88 f.close()
89
90 HOME = os.environ['HOME'] + "/"
91 RALLY_DIR = REPO_PATH + functest_yaml.get("general").get(
92     "directories").get("dir_rally")
93 TEMPLATE_DIR = RALLY_DIR + "scenario/templates"
94 SUPPORT_DIR = RALLY_DIR + "scenario/support"
95
96 FLAVOR_NAME = "m1.tiny"
97 USERS_AMOUNT = 2
98 TENANTS_AMOUNT = 3
99 ITERATIONS_AMOUNT = 10
100 CONCURRENCY = 4
101
102 RESULTS_DIR = functest_yaml.get("general").get("directories").get(
103     "dir_rally_res")
104 TEMPEST_CONF_FILE = functest_yaml.get("general").get("directories").get(
105     "dir_results") + '/tempest/tempest.conf'
106 TEST_DB = functest_yaml.get("results").get("test_db_url")
107
108 PRIVATE_NET_NAME = functest_yaml.get("rally").get("network_name")
109 PRIVATE_SUBNET_NAME = functest_yaml.get("rally").get("subnet_name")
110 PRIVATE_SUBNET_CIDR = functest_yaml.get("rally").get("subnet_cidr")
111 ROUTER_NAME = functest_yaml.get("rally").get("router_name")
112
113 GLANCE_IMAGE_NAME = functest_yaml.get("general").get("openstack").get(
114     "image_name")
115 GLANCE_IMAGE_FILENAME = functest_yaml.get("general").get("openstack").get(
116     "image_file_name")
117 GLANCE_IMAGE_FORMAT = functest_yaml.get("general").get("openstack").get(
118     "image_disk_format")
119 GLANCE_IMAGE_PATH = functest_yaml.get("general").get("directories").get(
120     "dir_functest_data") + "/" + GLANCE_IMAGE_FILENAME
121
122 CINDER_VOLUME_TYPE_NAME = "volume_test"
123
124
125 SUMMARY = []
126
127
128 def push_results_to_db(case, payload, criteria):
129
130     url = TEST_DB + "/results"
131     installer = functest_utils.get_installer_type(logger)
132     scenario = functest_utils.get_scenario(logger)
133     version = functest_utils.get_version(logger)
134     pod_name = functest_utils.get_pod_name(logger)
135
136     # evalutate success criteria
137
138     params = {"project_name": "functest", "case_name": case,
139               "pod_name": pod_name, "installer": installer,
140               "version": version, "scenario": scenario,
141               "criteria": criteria, "details": payload}
142
143     headers = {'Content-Type': 'application/json'}
144     r = requests.post(url, data=json.dumps(params), headers=headers)
145     logger.debug(r)
146
147
148 def get_task_id(cmd_raw):
149     """
150     get task id from command rally result
151     :param cmd_raw:
152     :return: task_id as string
153     """
154     taskid_re = re.compile('^Task +(.*): started$')
155     for line in cmd_raw.splitlines(True):
156         line = line.strip()
157         match = taskid_re.match(line)
158         if match:
159             return match.group(1)
160     return None
161
162
163 def task_succeed(json_raw):
164     """
165     Parse JSON from rally JSON results
166     :param json_raw:
167     :return: Bool
168     """
169     rally_report = json.loads(json_raw)
170     for report in rally_report:
171         if report is None or report.get('result') is None:
172             return False
173
174         for result in report.get('result'):
175             if result is None or len(result.get('error')) > 0:
176                 return False
177
178     return True
179
180
181 def live_migration_supported():
182     config = iniparse.ConfigParser()
183     if (config.read(TEMPEST_CONF_FILE) and
184             config.has_section('compute-feature-enabled') and
185             config.has_option('compute-feature-enabled', 'live_migration')):
186         return config.getboolean('compute-feature-enabled', 'live_migration')
187
188     return False
189
190
191 def build_task_args(test_file_name):
192     task_args = {'service_list': [test_file_name]}
193     task_args['image_name'] = GLANCE_IMAGE_NAME
194     task_args['flavor_name'] = FLAVOR_NAME
195     task_args['glance_image_location'] = GLANCE_IMAGE_PATH
196     task_args['tmpl_dir'] = TEMPLATE_DIR
197     task_args['sup_dir'] = SUPPORT_DIR
198     task_args['users_amount'] = USERS_AMOUNT
199     task_args['tenants_amount'] = TENANTS_AMOUNT
200     task_args['iterations'] = ITERATIONS_AMOUNT
201     task_args['concurrency'] = CONCURRENCY
202
203     if args.sanity:
204         task_args['full_mode'] = False
205         task_args['smoke'] = True
206     else:
207         task_args['full_mode'] = True
208         task_args['smoke'] = args.smoke
209
210     ext_net = openstack_utils.get_external_net(client_dict['neutron'])
211     if ext_net:
212         task_args['floating_network'] = str(ext_net)
213     else:
214         task_args['floating_network'] = ''
215
216     net_id = network_dict['net_id']
217     task_args['netid'] = str(net_id)
218     task_args['live_migration'] = live_migration_supported()
219
220     return task_args
221
222
223 def get_output(proc, test_name):
224     global SUMMARY
225     result = ""
226     nb_tests = 0
227     overall_duration = 0.0
228     success = 0.0
229     nb_totals = 0
230
231     while proc.poll() is None:
232         line = proc.stdout.readline()
233         if args.verbose:
234             result += line
235         else:
236             if ("Load duration" in line or
237                     "started" in line or
238                     "finished" in line or
239                     " Preparing" in line or
240                     "+-" in line or
241                     "|" in line):
242                 result += line
243             elif "test scenario" in line:
244                 result += "\n" + line
245             elif "Full duration" in line:
246                 result += line + "\n\n"
247
248         # parse output for summary report
249         if ("| " in line and
250                 "| action" not in line and
251                 "| Starting" not in line and
252                 "| Completed" not in line and
253                 "| ITER" not in line and
254                 "|   " not in line and
255                 "| total" not in line):
256             nb_tests += 1
257         elif "| total" in line:
258             percentage = ((line.split('|')[8]).strip(' ')).strip('%')
259             try:
260                 success += float(percentage)
261             except ValueError:
262                 logger.info('Percentage error: %s, %s' % (percentage, line))
263             nb_totals += 1
264         elif "Full duration" in line:
265             duration = line.split(': ')[1]
266             try:
267                 overall_duration += float(duration)
268             except ValueError:
269                 logger.info('Duration error: %s, %s' % (duration, line))
270
271     overall_duration = "{:10.2f}".format(overall_duration)
272     if nb_totals == 0:
273         success_avg = 0
274     else:
275         success_avg = "{:0.2f}".format(success / nb_totals)
276
277     scenario_summary = {'test_name': test_name,
278                         'overall_duration': overall_duration,
279                         'nb_tests': nb_tests,
280                         'success': success_avg}
281     SUMMARY.append(scenario_summary)
282
283     logger.info("\n" + result)
284
285     return result
286
287
288 def get_cmd_output(proc):
289     result = ""
290
291     while proc.poll() is None:
292         line = proc.stdout.readline()
293         result += line
294
295     return result
296
297
298 def run_task(test_name):
299     #
300     # the "main" function of the script who launch rally for a task
301     # :param test_name: name for the rally test
302     # :return: void
303     #
304     global SUMMARY
305     logger.info('Starting test scenario "{}" ...'.format(test_name))
306
307     task_file = '{}task.yaml'.format(RALLY_DIR)
308     if not os.path.exists(task_file):
309         logger.error("Task file '%s' does not exist." % task_file)
310         exit(-1)
311
312     test_file_name = '{}opnfv-{}.yaml'.format(RALLY_DIR + "scenario/",
313                                               test_name)
314     if not os.path.exists(test_file_name):
315         logger.error("The scenario '%s' does not exist." % test_file_name)
316         exit(-1)
317
318     logger.debug('Scenario fetched from : {}'.format(test_file_name))
319
320     cmd_line = ("rally task start --abort-on-sla-failure " +
321                 "--task {} ".format(task_file) +
322                 "--task-args \"{}\" ".format(build_task_args(test_name)))
323     logger.debug('running command line : {}'.format(cmd_line))
324
325     p = subprocess.Popen(cmd_line, stdout=subprocess.PIPE,
326                          stderr=RALLY_STDERR, shell=True)
327     output = get_output(p, test_name)
328     task_id = get_task_id(output)
329     logger.debug('task_id : {}'.format(task_id))
330
331     if task_id is None:
332         logger.error('Failed to retrieve task_id, validating task...')
333         cmd_line = ("rally task validate " +
334                     "--task {} ".format(task_file) +
335                     "--task-args \"{}\" ".format(build_task_args(test_name)))
336         logger.debug('running command line : {}'.format(cmd_line))
337         p = subprocess.Popen(cmd_line, stdout=subprocess.PIPE,
338                              stderr=subprocess.STDOUT, shell=True)
339         output = get_cmd_output(p)
340         logger.error("Task validation result:" + "\n" + output)
341         return
342
343     # check for result directory and create it otherwise
344     if not os.path.exists(RESULTS_DIR):
345         logger.debug('%s does not exist, we create it.'.format(RESULTS_DIR))
346         os.makedirs(RESULTS_DIR)
347
348     # write html report file
349     report_file_name = '{}opnfv-{}.html'.format(RESULTS_DIR, test_name)
350     cmd_line = "rally task report {} --out {}".format(task_id,
351                                                       report_file_name)
352
353     logger.debug('running command line : {}'.format(cmd_line))
354     os.popen(cmd_line)
355
356     # get and save rally operation JSON result
357     cmd_line = "rally task results %s" % task_id
358     logger.debug('running command line : {}'.format(cmd_line))
359     cmd = os.popen(cmd_line)
360     json_results = cmd.read()
361     with open('{}opnfv-{}.json'.format(RESULTS_DIR, test_name), 'w') as f:
362         logger.debug('saving json file')
363         f.write(json_results)
364
365     with open('{}opnfv-{}.json'
366               .format(RESULTS_DIR, test_name)) as json_file:
367         json_data = json.load(json_file)
368
369     """ parse JSON operation result """
370     status = "failed"
371     if task_succeed(json_results):
372         logger.info('Test scenario: "{}" OK.'.format(test_name) + "\n")
373         status = "passed"
374     else:
375         logger.info('Test scenario: "{}" Failed.'.format(test_name) + "\n")
376
377     # Push results in payload of testcase
378     if args.report:
379         logger.debug("Push result into DB")
380         push_results_to_db("Rally_details", json_data, status)
381
382
383 def main():
384     global SUMMARY
385     global network_dict
386     # configure script
387     if not (args.test_name in tests):
388         logger.error('argument not valid')
389         exit(-1)
390
391     SUMMARY = []
392     creds_nova = openstack_utils.get_credentials("nova")
393     nova_client = novaclient.Client('2', **creds_nova)
394     creds_neutron = openstack_utils.get_credentials("neutron")
395     neutron_client = neutronclient.Client(**creds_neutron)
396     creds_keystone = openstack_utils.get_credentials("keystone")
397     keystone_client = keystoneclient.Client(**creds_keystone)
398     glance_endpoint = keystone_client.service_catalog.url_for(
399         service_type='image', endpoint_type='publicURL')
400     glance_client = glanceclient.Client(1, glance_endpoint,
401                                         token=keystone_client.auth_token)
402     creds_cinder = openstack_utils.get_credentials("cinder")
403     cinder_client = cinderclient.Client('2', creds_cinder['username'],
404                                         creds_cinder['api_key'],
405                                         creds_cinder['project_id'],
406                                         creds_cinder['auth_url'],
407                                         service_type="volume")
408
409     client_dict['neutron'] = neutron_client
410
411     volume_types = openstack_utils.list_volume_types(cinder_client,
412                                                      private=False)
413     if not volume_types:
414         volume_type = openstack_utils.create_volume_type(
415             cinder_client, CINDER_VOLUME_TYPE_NAME)
416         if not volume_type:
417             logger.error("Failed to create volume type...")
418             exit(-1)
419         else:
420             logger.debug("Volume type '%s' created succesfully..."
421                          % CINDER_VOLUME_TYPE_NAME)
422     else:
423         logger.debug("Using existing volume type(s)...")
424
425     image_id = openstack_utils.get_image_id(glance_client, GLANCE_IMAGE_NAME)
426     image_exists = False
427
428     if image_id == '':
429         logger.debug("Creating image '%s' from '%s'..." % (GLANCE_IMAGE_NAME,
430                                                            GLANCE_IMAGE_PATH))
431         image_id = openstack_utils.create_glance_image(glance_client,
432                                                        GLANCE_IMAGE_NAME,
433                                                        GLANCE_IMAGE_PATH)
434         if not image_id:
435             logger.error("Failed to create the Glance image...")
436             exit(-1)
437         else:
438             logger.debug("Image '%s' with ID '%s' created succesfully ."
439                          % (GLANCE_IMAGE_NAME, image_id))
440     else:
441         logger.debug("Using existing image '%s' with ID '%s'..."
442                      % (GLANCE_IMAGE_NAME, image_id))
443         image_exists = True
444
445     logger.debug("Creating network '%s'..." % PRIVATE_NET_NAME)
446     network_dict = openstack_utils.create_network_full(logger,
447                                                        client_dict['neutron'],
448                                                        PRIVATE_NET_NAME,
449                                                        PRIVATE_SUBNET_NAME,
450                                                        ROUTER_NAME,
451                                                        PRIVATE_SUBNET_CIDR)
452     if not network_dict:
453         logger.error("Failed to create network...")
454         exit(-1)
455     else:
456         if not openstack_utils.update_neutron_net(client_dict['neutron'],
457                                                   network_dict['net_id'],
458                                                   shared=True):
459             logger.error("Failed to update network...")
460             exit(-1)
461         else:
462             logger.debug("Network '%s' available..." % PRIVATE_NET_NAME)
463
464     if args.test_name == "all":
465         for test_name in tests:
466             if not (test_name == 'all' or
467                     test_name == 'vm'):
468                 run_task(test_name)
469     else:
470         logger.debug("Test name: " + args.test_name)
471         run_task(args.test_name)
472
473     report = ("\n"
474               "                                                              "
475               "\n"
476               "                     Rally Summary Report\n"
477               "\n"
478               "+===================+============+===============+===========+"
479               "\n"
480               "| Module            | Duration   | nb. Test Run  | Success   |"
481               "\n"
482               "+===================+============+===============+===========+"
483               "\n")
484     payload = []
485
486     # for each scenario we draw a row for the table
487     total_duration = 0.0
488     total_nb_tests = 0
489     total_success = 0.0
490     for s in SUMMARY:
491         name = "{0:<17}".format(s['test_name'])
492         duration = float(s['overall_duration'])
493         total_duration += duration
494         duration = time.strftime("%M:%S", time.gmtime(duration))
495         duration = "{0:<10}".format(duration)
496         nb_tests = "{0:<13}".format(s['nb_tests'])
497         total_nb_tests += int(s['nb_tests'])
498         success = "{0:<10}".format(str(s['success']) + '%')
499         total_success += float(s['success'])
500         report += ("" +
501                    "| " + name + " | " + duration + " | " +
502                    nb_tests + " | " + success + "|\n" +
503                    "+-------------------+------------"
504                    "+---------------+-----------+\n")
505         payload.append({'module': name,
506                         'details': {'duration': s['overall_duration'],
507                                     'nb tests': s['nb_tests'],
508                                     'success': s['success']}})
509
510     total_duration_str = time.strftime("%H:%M:%S", time.gmtime(total_duration))
511     total_duration_str2 = "{0:<10}".format(total_duration_str)
512     total_nb_tests_str = "{0:<13}".format(total_nb_tests)
513     total_success = "{:0.2f}".format(total_success / len(SUMMARY))
514     total_success_str = "{0:<10}".format(str(total_success) + '%')
515     report += "+===================+============+===============+===========+"
516     report += "\n"
517     report += ("| TOTAL:            | " + total_duration_str2 + " | " +
518                total_nb_tests_str + " | " + total_success_str + "|\n")
519     report += "+===================+============+===============+===========+"
520     report += "\n"
521
522     logger.info("\n" + report)
523     payload.append({'summary': {'duration': total_duration,
524                                 'nb tests': total_nb_tests,
525                                 'nb success': total_success}})
526
527     # Generate json results for DB
528     # json_results = {"timestart": time_start, "duration": total_duration,
529     #                "tests": int(total_nb_tests),
530     #                "success": int(total_success)}
531     # logger.info("Results: "+str(json_results))
532
533     # Evaluation of the success criteria
534     status = "failed"
535     # for Rally we decided that the overall success rate must be above 90%
536     if total_success >= 90:
537         status = "passed"
538
539     if args.report:
540         logger.debug("Pushing Rally summary into DB...")
541         push_results_to_db("Rally", payload, status)
542
543     if args.noclean:
544         exit(0)
545
546     if not image_exists:
547         logger.debug("Deleting image '%s' with ID '%s'..."
548                      % (GLANCE_IMAGE_NAME, image_id))
549         if not openstack_utils.delete_glance_image(nova_client, image_id):
550             logger.error("Error deleting the glance image")
551
552     if not volume_types:
553         logger.debug("Deleting volume type '%s'..."
554                      % CINDER_VOLUME_TYPE_NAME)
555         if not openstack_utils.delete_volume_type(cinder_client, volume_type):
556             logger.error("Error in deleting volume type...")
557
558
559 if __name__ == '__main__':
560     main()