Merge "Add a new directory to hold pre-downloaded images"
[functest.git] / functest / opnfv_tests / openstack / rally / rally.py
1 #!/usr/bin/python
2 #
3 # Copyright (c) 2015 All rights reserved
4 # This program and the accompanying materials
5 # are made available under the terms of the Apache License, Version 2.0
6 # which accompanies this distribution, and is available at
7 #
8 # http://www.apache.org/licenses/LICENSE-2.0
9 #
10
11 from __future__ import division
12
13 import json
14 import logging
15 import os
16 import re
17 import subprocess
18 import time
19
20 import iniparse
21 import yaml
22
23 from functest.core import testcase
24 from functest.utils.constants import CONST
25 import functest.utils.openstack_utils as os_utils
26
27 logger = logging.getLogger(__name__)
28
29
30 class RallyBase(testcase.TestCase):
31     TESTS = ['authenticate', 'glance', 'cinder', 'heat', 'keystone',
32              'neutron', 'nova', 'quotas', 'requests', 'vm', 'all']
33     GLANCE_IMAGE_NAME = CONST.openstack_image_name
34     GLANCE_IMAGE_FILENAME = CONST.openstack_image_file_name
35     GLANCE_IMAGE_PATH = os.path.join(CONST.dir_functest_images,
36                                      GLANCE_IMAGE_FILENAME)
37     GLANCE_IMAGE_FORMAT = CONST.openstack_image_disk_format
38     FLAVOR_NAME = "m1.tiny"
39
40     RALLY_DIR = os.path.join(CONST.dir_repo_functest, CONST.dir_rally)
41     RALLY_SCENARIO_DIR = os.path.join(RALLY_DIR, "scenario")
42     TEMPLATE_DIR = os.path.join(RALLY_SCENARIO_DIR, "templates")
43     SUPPORT_DIR = os.path.join(RALLY_SCENARIO_DIR, "support")
44     USERS_AMOUNT = 2
45     TENANTS_AMOUNT = 3
46     ITERATIONS_AMOUNT = 10
47     CONCURRENCY = 4
48     RESULTS_DIR = os.path.join(CONST.dir_results, 'rally')
49     TEMPEST_CONF_FILE = os.path.join(CONST.dir_results,
50                                      'tempest/tempest.conf')
51     BLACKLIST_FILE = os.path.join(RALLY_DIR, "blacklist.txt")
52     TEMP_DIR = os.path.join(RALLY_DIR, "var")
53
54     CINDER_VOLUME_TYPE_NAME = "volume_test"
55     RALLY_PRIVATE_NET_NAME = CONST.rally_network_name
56     RALLY_PRIVATE_SUBNET_NAME = CONST.rally_subnet_name
57     RALLY_PRIVATE_SUBNET_CIDR = CONST.rally_subnet_cidr
58     RALLY_ROUTER_NAME = CONST.rally_router_name
59
60     def __init__(self, **kwargs):
61         super(RallyBase, self).__init__(**kwargs)
62         self.mode = ''
63         self.summary = []
64         self.scenario_dir = ''
65         self.nova_client = os_utils.get_nova_client()
66         self.neutron_client = os_utils.get_neutron_client()
67         self.cinder_client = os_utils.get_cinder_client()
68         self.network_dict = {}
69         self.volume_type = None
70         self.smoke = None
71
72     def _build_task_args(self, test_file_name):
73         task_args = {'service_list': [test_file_name]}
74         task_args['image_name'] = self.GLANCE_IMAGE_NAME
75         task_args['flavor_name'] = self.FLAVOR_NAME
76         task_args['glance_image_location'] = self.GLANCE_IMAGE_PATH
77         task_args['glance_image_format'] = self.GLANCE_IMAGE_FORMAT
78         task_args['tmpl_dir'] = self.TEMPLATE_DIR
79         task_args['sup_dir'] = self.SUPPORT_DIR
80         task_args['users_amount'] = self.USERS_AMOUNT
81         task_args['tenants_amount'] = self.TENANTS_AMOUNT
82         task_args['use_existing_users'] = False
83         task_args['iterations'] = self.ITERATIONS_AMOUNT
84         task_args['concurrency'] = self.CONCURRENCY
85         task_args['smoke'] = self.smoke
86
87         ext_net = os_utils.get_external_net(self.neutron_client)
88         if ext_net:
89             task_args['floating_network'] = str(ext_net)
90         else:
91             task_args['floating_network'] = ''
92
93         net_id = self.network_dict['net_id']
94         if net_id:
95             task_args['netid'] = str(net_id)
96         else:
97             task_args['netid'] = ''
98
99         # get keystone auth endpoint
100         task_args['request_url'] = CONST.OS_AUTH_URL or ''
101
102         return task_args
103
104     def _prepare_test_list(self, test_name):
105         test_yaml_file_name = 'opnfv-{}.yaml'.format(test_name)
106         scenario_file_name = os.path.join(self.RALLY_SCENARIO_DIR,
107                                           test_yaml_file_name)
108
109         if not os.path.exists(scenario_file_name):
110             scenario_file_name = os.path.join(self.scenario_dir,
111                                               test_yaml_file_name)
112
113             if not os.path.exists(scenario_file_name):
114                 raise Exception("The scenario '%s' does not exist."
115                                 % scenario_file_name)
116
117         logger.debug('Scenario fetched from : {}'.format(scenario_file_name))
118         test_file_name = os.path.join(self.TEMP_DIR, test_yaml_file_name)
119
120         if not os.path.exists(self.TEMP_DIR):
121             os.makedirs(self.TEMP_DIR)
122
123         self.apply_blacklist(scenario_file_name, test_file_name)
124         return test_file_name
125
126     @staticmethod
127     def get_task_id(cmd_raw):
128         """
129         get task id from command rally result
130         :param cmd_raw:
131         :return: task_id as string
132         """
133         taskid_re = re.compile('^Task +(.*): started$')
134         for line in cmd_raw.splitlines(True):
135             line = line.strip()
136             match = taskid_re.match(line)
137             if match:
138                 return match.group(1)
139         return None
140
141     @staticmethod
142     def task_succeed(json_raw):
143         """
144         Parse JSON from rally JSON results
145         :param json_raw:
146         :return: Bool
147         """
148         rally_report = json.loads(json_raw)
149         for report in rally_report:
150             if report is None or report.get('result') is None:
151                 return False
152
153             for result in report.get('result'):
154                 if result is None or len(result.get('error')) > 0:
155                     return False
156
157         return True
158
159     @staticmethod
160     def live_migration_supported():
161         config = iniparse.ConfigParser()
162         if (config.read(RallyBase.TEMPEST_CONF_FILE) and
163                 config.has_section('compute-feature-enabled') and
164                 config.has_option('compute-feature-enabled',
165                                   'live_migration')):
166             return config.getboolean('compute-feature-enabled',
167                                      'live_migration')
168
169         return False
170
171     @staticmethod
172     def get_cmd_output(proc):
173         result = ""
174         while proc.poll() is None:
175             line = proc.stdout.readline()
176             result += line
177         return result
178
179     @staticmethod
180     def excl_scenario():
181         black_tests = []
182         try:
183             with open(RallyBase.BLACKLIST_FILE, 'r') as black_list_file:
184                 black_list_yaml = yaml.safe_load(black_list_file)
185
186             installer_type = CONST.INSTALLER_TYPE
187             deploy_scenario = CONST.DEPLOY_SCENARIO
188             if (bool(installer_type) * bool(deploy_scenario)):
189                 if 'scenario' in black_list_yaml.keys():
190                     for item in black_list_yaml['scenario']:
191                         scenarios = item['scenarios']
192                         installers = item['installers']
193                         if (deploy_scenario in scenarios and
194                                 installer_type in installers):
195                             tests = item['tests']
196                             black_tests.extend(tests)
197         except Exception:
198             logger.debug("Scenario exclusion not applied.")
199
200         return black_tests
201
202     @staticmethod
203     def excl_func():
204         black_tests = []
205         func_list = []
206
207         try:
208             with open(RallyBase.BLACKLIST_FILE, 'r') as black_list_file:
209                 black_list_yaml = yaml.safe_load(black_list_file)
210
211             if not RallyBase.live_migration_supported():
212                 func_list.append("no_live_migration")
213
214             if 'functionality' in black_list_yaml.keys():
215                 for item in black_list_yaml['functionality']:
216                     functions = item['functions']
217                     for func in func_list:
218                         if func in functions:
219                             tests = item['tests']
220                             black_tests.extend(tests)
221         except Exception:
222             logger.debug("Functionality exclusion not applied.")
223
224         return black_tests
225
226     @staticmethod
227     def apply_blacklist(case_file_name, result_file_name):
228         logger.debug("Applying blacklist...")
229         cases_file = open(case_file_name, 'r')
230         result_file = open(result_file_name, 'w')
231
232         black_tests = list(set(RallyBase.excl_func() +
233                            RallyBase.excl_scenario()))
234
235         include = True
236         for cases_line in cases_file:
237             if include:
238                 for black_tests_line in black_tests:
239                     if re.search(black_tests_line,
240                                  cases_line.strip().rstrip(':')):
241                         include = False
242                         break
243                 else:
244                     result_file.write(str(cases_line))
245             else:
246                 if cases_line.isspace():
247                     include = True
248
249         cases_file.close()
250         result_file.close()
251
252     @staticmethod
253     def file_is_empty(file_name):
254         try:
255             if os.stat(file_name).st_size > 0:
256                 return False
257         except:
258             pass
259
260         return True
261
262     def _run_task(self, test_name):
263         logger.info('Starting test scenario "{}" ...'.format(test_name))
264
265         task_file = os.path.join(self.RALLY_DIR, 'task.yaml')
266         if not os.path.exists(task_file):
267             logger.error("Task file '%s' does not exist." % task_file)
268             raise Exception("Task file '%s' does not exist." % task_file)
269
270         file_name = self._prepare_test_list(test_name)
271         if self.file_is_empty(file_name):
272             logger.info('No tests for scenario "{}"'.format(test_name))
273             return
274
275         cmd_line = ("rally task start --abort-on-sla-failure "
276                     "--task {0} "
277                     "--task-args \"{1}\""
278                     .format(task_file, self._build_task_args(test_name)))
279         logger.debug('running command line: {}'.format(cmd_line))
280
281         p = subprocess.Popen(cmd_line, stdout=subprocess.PIPE,
282                              stderr=subprocess.STDOUT, shell=True)
283         output = self._get_output(p, test_name)
284         task_id = self.get_task_id(output)
285         logger.debug('task_id : {}'.format(task_id))
286
287         if task_id is None:
288             logger.error('Failed to retrieve task_id, validating task...')
289             cmd_line = ("rally task validate "
290                         "--task {0} "
291                         "--task-args \"{1}\""
292                         .format(task_file, self._build_task_args(test_name)))
293             logger.debug('running command line: {}'.format(cmd_line))
294             p = subprocess.Popen(cmd_line, stdout=subprocess.PIPE,
295                                  stderr=subprocess.STDOUT, shell=True)
296             output = self.get_cmd_output(p)
297             logger.error("Task validation result:" + "\n" + output)
298             return
299
300         # check for result directory and create it otherwise
301         if not os.path.exists(self.RESULTS_DIR):
302             logger.debug('{} does not exist, we create it.'
303                          .format(self.RESULTS_DIR))
304             os.makedirs(self.RESULTS_DIR)
305
306         # write html report file
307         report_html_name = 'opnfv-{}.html'.format(test_name)
308         report_html_dir = os.path.join(self.RESULTS_DIR, report_html_name)
309         cmd_line = "rally task report {} --out {}".format(task_id,
310                                                           report_html_dir)
311
312         logger.debug('running command line: {}'.format(cmd_line))
313         os.popen(cmd_line)
314
315         # get and save rally operation JSON result
316         cmd_line = "rally task results %s" % task_id
317         logger.debug('running command line: {}'.format(cmd_line))
318         cmd = os.popen(cmd_line)
319         json_results = cmd.read()
320         report_json_name = 'opnfv-{}.json'.format(test_name)
321         report_json_dir = os.path.join(self.RESULTS_DIR, report_json_name)
322         with open(report_json_dir, 'w') as f:
323             logger.debug('saving json file')
324             f.write(json_results)
325
326         """ parse JSON operation result """
327         if self.task_succeed(json_results):
328             logger.info('Test scenario: "{}" OK.'.format(test_name) + "\n")
329         else:
330             logger.info('Test scenario: "{}" Failed.'.format(test_name) + "\n")
331
332     def _get_output(self, proc, test_name):
333         result = ""
334         nb_tests = 0
335         overall_duration = 0.0
336         success = 0.0
337         nb_totals = 0
338
339         while proc.poll() is None:
340             line = proc.stdout.readline()
341             if ("Load duration" in line or
342                     "started" in line or
343                     "finished" in line or
344                     " Preparing" in line or
345                     "+-" in line or
346                     "|" in line):
347                 result += line
348             elif "test scenario" in line:
349                 result += "\n" + line
350             elif "Full duration" in line:
351                 result += line + "\n\n"
352
353             # parse output for summary report
354             if ("| " in line and
355                     "| action" not in line and
356                     "| Starting" not in line and
357                     "| Completed" not in line and
358                     "| ITER" not in line and
359                     "|   " not in line and
360                     "| total" not in line):
361                 nb_tests += 1
362             elif "| total" in line:
363                 percentage = ((line.split('|')[8]).strip(' ')).strip('%')
364                 try:
365                     success += float(percentage)
366                 except ValueError:
367                     logger.info('Percentage error: %s, %s' %
368                                 (percentage, line))
369                 nb_totals += 1
370             elif "Full duration" in line:
371                 duration = line.split(': ')[1]
372                 try:
373                     overall_duration += float(duration)
374                 except ValueError:
375                     logger.info('Duration error: %s, %s' % (duration, line))
376
377         overall_duration = "{:10.2f}".format(overall_duration)
378         if nb_totals == 0:
379             success_avg = 0
380         else:
381             success_avg = "{:0.2f}".format(success / nb_totals)
382
383         scenario_summary = {'test_name': test_name,
384                             'overall_duration': overall_duration,
385                             'nb_tests': nb_tests,
386                             'success': success_avg}
387         self.summary.append(scenario_summary)
388
389         logger.debug("\n" + result)
390
391         return result
392
393     def _prepare_env(self):
394         logger.debug('Validating the test name...')
395         if not (self.test_name in self.TESTS):
396             raise Exception("Test name '%s' is invalid" % self.test_name)
397
398         volume_types = os_utils.list_volume_types(self.cinder_client,
399                                                   private=False)
400         if volume_types:
401             logger.debug("Using existing volume type(s)...")
402         else:
403             logger.debug('Creating volume type...')
404             self.volume_type = os_utils.create_volume_type(
405                 self.cinder_client, self.CINDER_VOLUME_TYPE_NAME)
406             if self.volume_type is None:
407                 raise Exception("Failed to create volume type '%s'" %
408                                 self.CINDER_VOLUME_TYPE_NAME)
409             logger.debug("Volume type '%s' is created succesfully." %
410                          self.CINDER_VOLUME_TYPE_NAME)
411
412         logger.debug('Getting or creating image...')
413         self.image_exists, self.image_id = os_utils.get_or_create_image(
414             self.GLANCE_IMAGE_NAME,
415             self.GLANCE_IMAGE_PATH,
416             self.GLANCE_IMAGE_FORMAT)
417         if self.image_id is None:
418             raise Exception("Failed to get or create image '%s'" %
419                             self.GLANCE_IMAGE_NAME)
420
421         logger.debug("Creating network '%s'..." % self.RALLY_PRIVATE_NET_NAME)
422         self.network_dict = os_utils.create_shared_network_full(
423             self.RALLY_PRIVATE_NET_NAME,
424             self.RALLY_PRIVATE_SUBNET_NAME,
425             self.RALLY_ROUTER_NAME,
426             self.RALLY_PRIVATE_SUBNET_CIDR)
427         if self.network_dict is None:
428             raise Exception("Failed to create shared network '%s'" %
429                             self.RALLY_PRIVATE_NET_NAME)
430
431     def _run_tests(self):
432         if self.test_name == 'all':
433             for test in self.TESTS:
434                 if (test == 'all' or test == 'vm'):
435                     continue
436                 self._run_task(test)
437         else:
438             self._run_task(self.test_name)
439
440     def _generate_report(self):
441         report = (
442             "\n"
443             "                                                              "
444             "\n"
445             "                     Rally Summary Report\n"
446             "\n"
447             "+===================+============+===============+===========+"
448             "\n"
449             "| Module            | Duration   | nb. Test Run  | Success   |"
450             "\n"
451             "+===================+============+===============+===========+"
452             "\n")
453         payload = []
454
455         # for each scenario we draw a row for the table
456         total_duration = 0.0
457         total_nb_tests = 0
458         total_success = 0.0
459         for s in self.summary:
460             name = "{0:<17}".format(s['test_name'])
461             duration = float(s['overall_duration'])
462             total_duration += duration
463             duration = time.strftime("%M:%S", time.gmtime(duration))
464             duration = "{0:<10}".format(duration)
465             nb_tests = "{0:<13}".format(s['nb_tests'])
466             total_nb_tests += int(s['nb_tests'])
467             success = "{0:<10}".format(str(s['success']) + '%')
468             total_success += float(s['success'])
469             report += ("" +
470                        "| " + name + " | " + duration + " | " +
471                        nb_tests + " | " + success + "|\n" +
472                        "+-------------------+------------"
473                        "+---------------+-----------+\n")
474             payload.append({'module': name,
475                             'details': {'duration': s['overall_duration'],
476                                         'nb tests': s['nb_tests'],
477                                         'success': s['success']}})
478
479         total_duration_str = time.strftime("%H:%M:%S",
480                                            time.gmtime(total_duration))
481         total_duration_str2 = "{0:<10}".format(total_duration_str)
482         total_nb_tests_str = "{0:<13}".format(total_nb_tests)
483
484         try:
485             self.result = total_success / len(self.summary)
486         except ZeroDivisionError:
487             self.result = 100
488
489         success_rate = "{:0.2f}".format(self.result)
490         success_rate_str = "{0:<10}".format(str(success_rate) + '%')
491         report += ("+===================+============"
492                    "+===============+===========+")
493         report += "\n"
494         report += ("| TOTAL:            | " + total_duration_str2 + " | " +
495                    total_nb_tests_str + " | " + success_rate_str + "|\n")
496         report += ("+===================+============"
497                    "+===============+===========+")
498         report += "\n"
499
500         logger.info("\n" + report)
501         payload.append({'summary': {'duration': total_duration,
502                                     'nb tests': total_nb_tests,
503                                     'nb success': success_rate}})
504
505         self.details = payload
506
507         logger.info("Rally '%s' success_rate is %s%%"
508                     % (self.case_name, success_rate))
509
510     def _clean_up(self):
511         if self.volume_type:
512             logger.debug("Deleting volume type '%s'..." % self.volume_type)
513             os_utils.delete_volume_type(self.cinder_client, self.volume_type)
514
515         if not self.image_exists:
516             logger.debug("Deleting image '%s' with ID '%s'..."
517                          % (self.GLANCE_IMAGE_NAME, self.image_id))
518             if not os_utils.delete_glance_image(self.nova_client,
519                                                 self.image_id):
520                 logger.error("Error deleting the glance image")
521
522     def run(self):
523         self.start_time = time.time()
524         try:
525             self._prepare_env()
526             self._run_tests()
527             self._generate_report()
528             self._clean_up()
529             res = testcase.TestCase.EX_OK
530         except Exception as e:
531             logger.error('Error with run: %s' % e)
532             res = testcase.TestCase.EX_RUN_ERROR
533
534         self.stop_time = time.time()
535         return res
536
537
538 class RallySanity(RallyBase):
539     def __init__(self, **kwargs):
540         if "case_name" not in kwargs:
541             kwargs["case_name"] = "rally_sanity"
542         super(RallySanity, self).__init__(**kwargs)
543         self.mode = 'sanity'
544         self.test_name = 'all'
545         self.smoke = True
546         self.scenario_dir = os.path.join(self.RALLY_SCENARIO_DIR, 'sanity')
547
548
549 class RallyFull(RallyBase):
550     def __init__(self, **kwargs):
551         if "case_name" not in kwargs:
552             kwargs["case_name"] = "rally_full"
553         super(RallyFull, self).__init__(**kwargs)
554         self.mode = 'full'
555         self.test_name = 'all'
556         self.smoke = False
557         self.scenario_dir = os.path.join(self.RALLY_SCENARIO_DIR, 'full')