Remove external server references on Rally scenarios
[functest-xtesting.git] / testcases / OpenStack / rally / run_rally-cert.py
1 #!/usr/bin/env python
2 #
3 # Copyright (c) 2015 Orange
4 # guyrodrigue.koffi@orange.com
5 # morgan.richomme@orange.com
6 # All rights reserved. This program and the accompanying materials
7 # are made available under the terms of the Apache License, Version 2.0
8 # which accompanies this distribution, and is available at
9 # http://www.apache.org/licenses/LICENSE-2.0
10 #
11 # 0.1 (05/2015) initial commit
12 # 0.2 (28/09/2015) extract Tempest, format json result, add ceilometer suite
13 # 0.3 (19/10/2015) remove Tempest from run_rally
14 # and push result into test DB
15 #
16 import argparse
17 import iniparse
18 import json
19 import os
20 import re
21 import subprocess
22 import time
23 import yaml
24
25 from novaclient import client as novaclient
26 from glanceclient import client as glanceclient
27 from keystoneclient.v2_0 import client as keystoneclient
28 from neutronclient.v2_0 import client as neutronclient
29 from cinderclient import client as cinderclient
30
31 import functest.utils.functest_logger as ft_logger
32 import functest.utils.functest_utils as functest_utils
33 import functest.utils.openstack_utils as openstack_utils
34
35 """ tests configuration """
36 tests = ['authenticate', 'glance', 'cinder', 'heat', 'keystone',
37          'neutron', 'nova', 'quotas', 'requests', 'vm', 'all']
38 parser = argparse.ArgumentParser()
39 parser.add_argument("test_name",
40                     help="Module name to be tested. "
41                          "Possible values are : "
42                          "[ {d[0]} | {d[1]} | {d[2]} | {d[3]} | {d[4]} | "
43                          "{d[5]} | {d[6]} | {d[7]} | {d[8]} | {d[9]} | "
44                          "{d[10]} ] "
45                          "The 'all' value "
46                          "performs all possible test scenarios"
47                          .format(d=tests))
48
49 parser.add_argument("-d", "--debug", help="Debug mode", action="store_true")
50 parser.add_argument("-r", "--report",
51                     help="Create json result file",
52                     action="store_true")
53 parser.add_argument("-s", "--smoke",
54                     help="Smoke test mode",
55                     action="store_true")
56 parser.add_argument("-v", "--verbose",
57                     help="Print verbose info about the progress",
58                     action="store_true")
59 parser.add_argument("-n", "--noclean",
60                     help="Don't clean the created resources for this test.",
61                     action="store_true")
62 parser.add_argument("-z", "--sanity",
63                     help="Sanity test mode, execute only a subset of tests",
64                     action="store_true")
65
66 args = parser.parse_args()
67
68 client_dict = {}
69 network_dict = {}
70
71 if args.verbose:
72     RALLY_STDERR = subprocess.STDOUT
73 else:
74     RALLY_STDERR = open(os.devnull, 'w')
75
76 """ logging configuration """
77 logger = ft_logger.Logger("run_rally").getLogger()
78
79 REPO_PATH = os.environ['repos_dir'] + '/functest/'
80 if not os.path.exists(REPO_PATH):
81     logger.error("Functest repository directory not found '%s'" % REPO_PATH)
82     exit(-1)
83
84
85 with open(os.environ["CONFIG_FUNCTEST_YAML"]) as f:
86     functest_yaml = yaml.safe_load(f)
87 f.close()
88
89 HOME = os.environ['HOME'] + "/"
90 RALLY_DIR = REPO_PATH + functest_yaml.get("general").get(
91     "directories").get("dir_rally")
92 TEMPLATE_DIR = RALLY_DIR + "scenario/templates"
93 SUPPORT_DIR = RALLY_DIR + "scenario/support"
94
95 FLAVOR_NAME = "m1.tiny"
96 USERS_AMOUNT = 2
97 TENANTS_AMOUNT = 3
98 ITERATIONS_AMOUNT = 10
99 CONCURRENCY = 4
100
101 RESULTS_DIR = functest_yaml.get("general").get("directories").get(
102     "dir_rally_res")
103 TEMPEST_CONF_FILE = functest_yaml.get("general").get("directories").get(
104     "dir_results") + '/tempest/tempest.conf'
105 TEST_DB = functest_yaml.get("results").get("test_db_url")
106
107 PRIVATE_NET_NAME = functest_yaml.get("rally").get("network_name")
108 PRIVATE_SUBNET_NAME = functest_yaml.get("rally").get("subnet_name")
109 PRIVATE_SUBNET_CIDR = functest_yaml.get("rally").get("subnet_cidr")
110 ROUTER_NAME = functest_yaml.get("rally").get("router_name")
111
112 GLANCE_IMAGE_NAME = functest_yaml.get("general").get("openstack").get(
113     "image_name")
114 GLANCE_IMAGE_FILENAME = functest_yaml.get("general").get("openstack").get(
115     "image_file_name")
116 GLANCE_IMAGE_FORMAT = functest_yaml.get("general").get("openstack").get(
117     "image_disk_format")
118 GLANCE_IMAGE_PATH = functest_yaml.get("general").get("directories").get(
119     "dir_functest_data") + "/" + GLANCE_IMAGE_FILENAME
120
121 CINDER_VOLUME_TYPE_NAME = "volume_test"
122
123
124 SUMMARY = []
125
126
127 def get_task_id(cmd_raw):
128     """
129     get task id from command rally result
130     :param cmd_raw:
131     :return: task_id as string
132     """
133     taskid_re = re.compile('^Task +(.*): started$')
134     for line in cmd_raw.splitlines(True):
135         line = line.strip()
136         match = taskid_re.match(line)
137         if match:
138             return match.group(1)
139     return None
140
141
142 def task_succeed(json_raw):
143     """
144     Parse JSON from rally JSON results
145     :param json_raw:
146     :return: Bool
147     """
148     rally_report = json.loads(json_raw)
149     for report in rally_report:
150         if report is None or report.get('result') is None:
151             return False
152
153         for result in report.get('result'):
154             if result is None or len(result.get('error')) > 0:
155                 return False
156
157     return True
158
159
160 def live_migration_supported():
161     config = iniparse.ConfigParser()
162     if (config.read(TEMPEST_CONF_FILE) and
163             config.has_section('compute-feature-enabled') and
164             config.has_option('compute-feature-enabled', 'live_migration')):
165         return config.getboolean('compute-feature-enabled', 'live_migration')
166
167     return False
168
169
170 def build_task_args(test_file_name):
171     task_args = {'service_list': [test_file_name]}
172     task_args['image_name'] = GLANCE_IMAGE_NAME
173     task_args['flavor_name'] = FLAVOR_NAME
174     task_args['glance_image_location'] = GLANCE_IMAGE_PATH
175     task_args['tmpl_dir'] = TEMPLATE_DIR
176     task_args['sup_dir'] = SUPPORT_DIR
177     task_args['users_amount'] = USERS_AMOUNT
178     task_args['tenants_amount'] = TENANTS_AMOUNT
179     task_args['iterations'] = ITERATIONS_AMOUNT
180     task_args['concurrency'] = CONCURRENCY
181
182     if args.sanity:
183         task_args['full_mode'] = False
184         task_args['smoke'] = True
185     else:
186         task_args['full_mode'] = True
187         task_args['smoke'] = args.smoke
188
189     ext_net = openstack_utils.get_external_net(client_dict['neutron'])
190     if ext_net:
191         task_args['floating_network'] = str(ext_net)
192     else:
193         task_args['floating_network'] = ''
194
195     net_id = network_dict['net_id']
196     task_args['netid'] = str(net_id)
197     task_args['live_migration'] = live_migration_supported()
198
199     auth_url = os.getenv('OS_AUTH_URL')
200     if auth_url is not None:
201         task_args['request_url'] = auth_url.rsplit(":", 1)[0]
202     else:
203         task_args['request_url'] = ''
204
205     return task_args
206
207
208 def get_output(proc, test_name):
209     global SUMMARY
210     result = ""
211     nb_tests = 0
212     overall_duration = 0.0
213     success = 0.0
214     nb_totals = 0
215
216     while proc.poll() is None:
217         line = proc.stdout.readline()
218         if args.verbose:
219             result += line
220         else:
221             if ("Load duration" in line or
222                     "started" in line or
223                     "finished" in line or
224                     " Preparing" in line or
225                     "+-" in line or
226                     "|" in line):
227                 result += line
228             elif "test scenario" in line:
229                 result += "\n" + line
230             elif "Full duration" in line:
231                 result += line + "\n\n"
232
233         # parse output for summary report
234         if ("| " in line and
235                 "| action" not in line and
236                 "| Starting" not in line and
237                 "| Completed" not in line and
238                 "| ITER" not in line and
239                 "|   " not in line and
240                 "| total" not in line):
241             nb_tests += 1
242         elif "| total" in line:
243             percentage = ((line.split('|')[8]).strip(' ')).strip('%')
244             try:
245                 success += float(percentage)
246             except ValueError:
247                 logger.info('Percentage error: %s, %s' % (percentage, line))
248             nb_totals += 1
249         elif "Full duration" in line:
250             duration = line.split(': ')[1]
251             try:
252                 overall_duration += float(duration)
253             except ValueError:
254                 logger.info('Duration error: %s, %s' % (duration, line))
255
256     overall_duration = "{:10.2f}".format(overall_duration)
257     if nb_totals == 0:
258         success_avg = 0
259     else:
260         success_avg = "{:0.2f}".format(success / nb_totals)
261
262     scenario_summary = {'test_name': test_name,
263                         'overall_duration': overall_duration,
264                         'nb_tests': nb_tests,
265                         'success': success_avg}
266     SUMMARY.append(scenario_summary)
267
268     logger.info("\n" + result)
269
270     return result
271
272
273 def get_cmd_output(proc):
274     result = ""
275
276     while proc.poll() is None:
277         line = proc.stdout.readline()
278         result += line
279
280     return result
281
282
283 def run_task(test_name):
284     #
285     # the "main" function of the script who launch rally for a task
286     # :param test_name: name for the rally test
287     # :return: void
288     #
289     global SUMMARY
290     logger.info('Starting test scenario "{}" ...'.format(test_name))
291     start_time = time.time()
292     stop_time = start_time
293
294     task_file = '{}task.yaml'.format(RALLY_DIR)
295     if not os.path.exists(task_file):
296         logger.error("Task file '%s' does not exist." % task_file)
297         exit(-1)
298
299     test_file_name = '{}opnfv-{}.yaml'.format(RALLY_DIR + "scenario/",
300                                               test_name)
301     if not os.path.exists(test_file_name):
302         logger.error("The scenario '%s' does not exist." % test_file_name)
303         exit(-1)
304
305     logger.debug('Scenario fetched from : {}'.format(test_file_name))
306
307     cmd_line = ("rally task start --abort-on-sla-failure " +
308                 "--task {} ".format(task_file) +
309                 "--task-args \"{}\" ".format(build_task_args(test_name)))
310     logger.debug('running command line : {}'.format(cmd_line))
311
312     p = subprocess.Popen(cmd_line, stdout=subprocess.PIPE,
313                          stderr=RALLY_STDERR, shell=True)
314     output = get_output(p, test_name)
315     task_id = get_task_id(output)
316     logger.debug('task_id : {}'.format(task_id))
317
318     if task_id is None:
319         logger.error('Failed to retrieve task_id, validating task...')
320         cmd_line = ("rally task validate " +
321                     "--task {} ".format(task_file) +
322                     "--task-args \"{}\" ".format(build_task_args(test_name)))
323         logger.debug('running command line : {}'.format(cmd_line))
324         p = subprocess.Popen(cmd_line, stdout=subprocess.PIPE,
325                              stderr=subprocess.STDOUT, shell=True)
326         output = get_cmd_output(p)
327         logger.error("Task validation result:" + "\n" + output)
328         return
329
330     # check for result directory and create it otherwise
331     if not os.path.exists(RESULTS_DIR):
332         logger.debug('%s does not exist, we create it.'.format(RESULTS_DIR))
333         os.makedirs(RESULTS_DIR)
334
335     # write html report file
336     report_file_name = '{}opnfv-{}.html'.format(RESULTS_DIR, test_name)
337     cmd_line = "rally task report {} --out {}".format(task_id,
338                                                       report_file_name)
339
340     logger.debug('running command line : {}'.format(cmd_line))
341     os.popen(cmd_line)
342
343     # get and save rally operation JSON result
344     cmd_line = "rally task results %s" % task_id
345     logger.debug('running command line : {}'.format(cmd_line))
346     cmd = os.popen(cmd_line)
347     json_results = cmd.read()
348     with open('{}opnfv-{}.json'.format(RESULTS_DIR, test_name), 'w') as f:
349         logger.debug('saving json file')
350         f.write(json_results)
351
352     with open('{}opnfv-{}.json'
353               .format(RESULTS_DIR, test_name)) as json_file:
354         json_data = json.load(json_file)
355
356     """ parse JSON operation result """
357     status = "failed"
358     if task_succeed(json_results):
359         logger.info('Test scenario: "{}" OK.'.format(test_name) + "\n")
360         status = "passed"
361     else:
362         logger.info('Test scenario: "{}" Failed.'.format(test_name) + "\n")
363
364     # Push results in payload of testcase
365     if args.report:
366         stop_time = time.time()
367         logger.debug("Push Rally detailed results into DB")
368         functest_utils.push_results_to_db("functest",
369                                           "Rally_details",
370                                           logger,
371                                           start_time,
372                                           stop_time,
373                                           status,
374                                           json_data)
375
376
377 def main():
378     global SUMMARY
379     global network_dict
380     start_time = time.time()
381     stop_time = start_time
382
383     # configure script
384     if not (args.test_name in tests):
385         logger.error('argument not valid')
386         exit(-1)
387
388     SUMMARY = []
389     creds_nova = openstack_utils.get_credentials("nova")
390     nova_client = novaclient.Client('2', **creds_nova)
391     creds_neutron = openstack_utils.get_credentials("neutron")
392     neutron_client = neutronclient.Client(**creds_neutron)
393     creds_keystone = openstack_utils.get_credentials("keystone")
394     keystone_client = keystoneclient.Client(**creds_keystone)
395     glance_endpoint = keystone_client.service_catalog.url_for(
396         service_type='image', endpoint_type='publicURL')
397     glance_client = glanceclient.Client(1, glance_endpoint,
398                                         token=keystone_client.auth_token)
399     creds_cinder = openstack_utils.get_credentials("cinder")
400     cinder_client = cinderclient.Client('2', creds_cinder['username'],
401                                         creds_cinder['api_key'],
402                                         creds_cinder['project_id'],
403                                         creds_cinder['auth_url'],
404                                         service_type="volume")
405
406     client_dict['neutron'] = neutron_client
407
408     volume_types = openstack_utils.list_volume_types(cinder_client,
409                                                      private=False)
410     if not volume_types:
411         volume_type = openstack_utils.create_volume_type(
412             cinder_client, CINDER_VOLUME_TYPE_NAME)
413         if not volume_type:
414             logger.error("Failed to create volume type...")
415             exit(-1)
416         else:
417             logger.debug("Volume type '%s' created succesfully..."
418                          % CINDER_VOLUME_TYPE_NAME)
419     else:
420         logger.debug("Using existing volume type(s)...")
421
422     image_id = openstack_utils.get_image_id(glance_client, GLANCE_IMAGE_NAME)
423     image_exists = False
424
425     if image_id == '':
426         logger.debug("Creating image '%s' from '%s'..." % (GLANCE_IMAGE_NAME,
427                                                            GLANCE_IMAGE_PATH))
428         image_id = openstack_utils.create_glance_image(glance_client,
429                                                        GLANCE_IMAGE_NAME,
430                                                        GLANCE_IMAGE_PATH)
431         if not image_id:
432             logger.error("Failed to create the Glance image...")
433             exit(-1)
434         else:
435             logger.debug("Image '%s' with ID '%s' created succesfully ."
436                          % (GLANCE_IMAGE_NAME, image_id))
437     else:
438         logger.debug("Using existing image '%s' with ID '%s'..."
439                      % (GLANCE_IMAGE_NAME, image_id))
440         image_exists = True
441
442     logger.debug("Creating network '%s'..." % PRIVATE_NET_NAME)
443     network_dict = openstack_utils.create_network_full(logger,
444                                                        client_dict['neutron'],
445                                                        PRIVATE_NET_NAME,
446                                                        PRIVATE_SUBNET_NAME,
447                                                        ROUTER_NAME,
448                                                        PRIVATE_SUBNET_CIDR)
449     if not network_dict:
450         logger.error("Failed to create network...")
451         exit(-1)
452     else:
453         if not openstack_utils.update_neutron_net(client_dict['neutron'],
454                                                   network_dict['net_id'],
455                                                   shared=True):
456             logger.error("Failed to update network...")
457             exit(-1)
458         else:
459             logger.debug("Network '%s' available..." % PRIVATE_NET_NAME)
460
461     if args.test_name == "all":
462         for test_name in tests:
463             if not (test_name == 'all' or
464                     test_name == 'vm'):
465                 run_task(test_name)
466     else:
467         logger.debug("Test name: " + args.test_name)
468         run_task(args.test_name)
469
470     report = ("\n"
471               "                                                              "
472               "\n"
473               "                     Rally Summary Report\n"
474               "\n"
475               "+===================+============+===============+===========+"
476               "\n"
477               "| Module            | Duration   | nb. Test Run  | Success   |"
478               "\n"
479               "+===================+============+===============+===========+"
480               "\n")
481     payload = []
482     stop_time = time.time()
483
484     # for each scenario we draw a row for the table
485     total_duration = 0.0
486     total_nb_tests = 0
487     total_success = 0.0
488     for s in SUMMARY:
489         name = "{0:<17}".format(s['test_name'])
490         duration = float(s['overall_duration'])
491         total_duration += duration
492         duration = time.strftime("%M:%S", time.gmtime(duration))
493         duration = "{0:<10}".format(duration)
494         nb_tests = "{0:<13}".format(s['nb_tests'])
495         total_nb_tests += int(s['nb_tests'])
496         success = "{0:<10}".format(str(s['success']) + '%')
497         total_success += float(s['success'])
498         report += ("" +
499                    "| " + name + " | " + duration + " | " +
500                    nb_tests + " | " + success + "|\n" +
501                    "+-------------------+------------"
502                    "+---------------+-----------+\n")
503         payload.append({'module': name,
504                         'details': {'duration': s['overall_duration'],
505                                     'nb tests': s['nb_tests'],
506                                     'success': s['success']}})
507
508     total_duration_str = time.strftime("%H:%M:%S", time.gmtime(total_duration))
509     total_duration_str2 = "{0:<10}".format(total_duration_str)
510     total_nb_tests_str = "{0:<13}".format(total_nb_tests)
511     total_success = "{:0.2f}".format(total_success / len(SUMMARY))
512     total_success_str = "{0:<10}".format(str(total_success) + '%')
513     report += "+===================+============+===============+===========+"
514     report += "\n"
515     report += ("| TOTAL:            | " + total_duration_str2 + " | " +
516                total_nb_tests_str + " | " + total_success_str + "|\n")
517     report += "+===================+============+===============+===========+"
518     report += "\n"
519
520     logger.info("\n" + report)
521     payload.append({'summary': {'duration': total_duration,
522                                 'nb tests': total_nb_tests,
523                                 'nb success': total_success}})
524
525     # Generate json results for DB
526     # json_results = {"timestart": time_start, "duration": total_duration,
527     #                "tests": int(total_nb_tests),
528     #                "success": int(total_success)}
529     # logger.info("Results: "+str(json_results))
530
531     # Evaluation of the success criteria
532     status = "failed"
533     # for Rally we decided that the overall success rate must be above 90%
534     if total_success >= 90:
535         status = "passed"
536
537     if args.report:
538         logger.debug("Pushing Rally summary into DB...")
539         functest_utils.push_results_to_db("functest",
540                                           "Rally",
541                                           logger,
542                                           start_time,
543                                           stop_time,
544                                           status,
545                                           payload)
546     if args.noclean:
547         exit(0)
548
549     if not image_exists:
550         logger.debug("Deleting image '%s' with ID '%s'..."
551                      % (GLANCE_IMAGE_NAME, image_id))
552         if not openstack_utils.delete_glance_image(nova_client, image_id):
553             logger.error("Error deleting the glance image")
554
555     if not volume_types:
556         logger.debug("Deleting volume type '%s'..."
557                      % CINDER_VOLUME_TYPE_NAME)
558         if not openstack_utils.delete_volume_type(cinder_client, volume_type):
559             logger.error("Error in deleting volume type...")
560
561
562 if __name__ == '__main__':
563     main()