a91059a2b19c1a04737b0e0071277e70582ed8a4
[functest.git] / functest / opnfv_tests / openstack / rally / rally.py
1 #!/usr/bin/env python
2 #
3 # Copyright (c) 2015 All rights reserved
4 # This program and the accompanying materials
5 # are made available under the terms of the Apache License, Version 2.0
6 # which accompanies this distribution, and is available at
7 #
8 # http://www.apache.org/licenses/LICENSE-2.0
9 #
10
11 """Rally testcases implementation."""
12
13 from __future__ import division
14
15 import json
16 import logging
17 import os
18 import re
19 import subprocess
20 import time
21
22 import pkg_resources
23 import prettytable
24 from xtesting.core import testcase
25 from xtesting.energy import energy
26 import yaml
27
28 from functest.core import singlevm
29 from functest.opnfv_tests.openstack.tempest import conf_utils
30 from functest.utils import config
31 from functest.utils import env
32
33 LOGGER = logging.getLogger(__name__)
34
35
36 class RallyBase(singlevm.VmReady2):
37     """Base class form Rally testcases implementation."""
38
39     # pylint: disable=too-many-instance-attributes
40     TESTS = ['authenticate', 'glance', 'cinder', 'gnocchi', 'heat',
41              'keystone', 'neutron', 'nova', 'quotas']
42
43     RALLY_DIR = pkg_resources.resource_filename(
44         'functest', 'opnfv_tests/openstack/rally')
45     RALLY_SCENARIO_DIR = pkg_resources.resource_filename(
46         'functest', 'opnfv_tests/openstack/rally/scenario')
47     TEMPLATE_DIR = pkg_resources.resource_filename(
48         'functest', 'opnfv_tests/openstack/rally/scenario/templates')
49     SUPPORT_DIR = pkg_resources.resource_filename(
50         'functest', 'opnfv_tests/openstack/rally/scenario/support')
51     USERS_AMOUNT = 2
52     TENANTS_AMOUNT = 3
53     ITERATIONS_AMOUNT = 10
54     CONCURRENCY = 4
55     RESULTS_DIR = os.path.join(getattr(config.CONF, 'dir_results'), 'rally')
56     BLACKLIST_FILE = os.path.join(RALLY_DIR, "blacklist.txt")
57     TEMP_DIR = os.path.join(RALLY_DIR, "var")
58
59     visibility = 'public'
60     shared_network = True
61
62     def __init__(self, **kwargs):
63         """Initialize RallyBase object."""
64         super(RallyBase, self).__init__(**kwargs)
65         assert self.orig_cloud
66         assert self.project
67         if self.orig_cloud.get_role("admin"):
68             role_name = "admin"
69         elif self.orig_cloud.get_role("Admin"):
70             role_name = "Admin"
71         else:
72             raise Exception("Cannot detect neither admin nor Admin")
73         self.orig_cloud.grant_role(
74             role_name, user=self.project.user.id,
75             project=self.project.project.id,
76             domain=self.project.domain.id)
77         self.creators = []
78         self.summary = []
79         self.scenario_dir = ''
80         self.smoke = None
81         self.test_name = None
82         self.start_time = None
83         self.result = None
84         self.details = None
85         self.compute_cnt = 0
86         self.flavor_alt = None
87         self.tests = []
88         self.task_file = ''
89         self.run_cmd = ''
90
91     def _build_task_args(self, test_file_name):
92         """Build arguments for the Rally task."""
93         task_args = {'service_list': [test_file_name]}
94         task_args['image_name'] = str(self.image.name)
95         task_args['flavor_name'] = str(self.flavor.name)
96         task_args['flavor_alt_name'] = str(self.flavor_alt.name)
97         task_args['glance_image_location'] = str(self.filename)
98         task_args['glance_image_format'] = str(self.image_format)
99         task_args['tmpl_dir'] = str(self.TEMPLATE_DIR)
100         task_args['sup_dir'] = str(self.SUPPORT_DIR)
101         task_args['users_amount'] = self.USERS_AMOUNT
102         task_args['tenants_amount'] = self.TENANTS_AMOUNT
103         task_args['use_existing_users'] = False
104         task_args['iterations'] = self.ITERATIONS_AMOUNT
105         task_args['concurrency'] = self.CONCURRENCY
106         task_args['smoke'] = self.smoke
107
108         if self.ext_net:
109             task_args['floating_network'] = str(self.ext_net.name)
110         else:
111             task_args['floating_network'] = ''
112
113         if self.network:
114             task_args['netid'] = str(self.network.id)
115         else:
116             task_args['netid'] = ''
117
118         return task_args
119
120     def _prepare_test_list(self, test_name):
121         """Build the list of test cases to be executed."""
122         test_yaml_file_name = 'opnfv-{}.yaml'.format(test_name)
123         scenario_file_name = os.path.join(self.RALLY_SCENARIO_DIR,
124                                           test_yaml_file_name)
125
126         if not os.path.exists(scenario_file_name):
127             scenario_file_name = os.path.join(self.scenario_dir,
128                                               test_yaml_file_name)
129
130             if not os.path.exists(scenario_file_name):
131                 raise Exception("The scenario '%s' does not exist."
132                                 % scenario_file_name)
133
134         LOGGER.debug('Scenario fetched from : %s', scenario_file_name)
135         test_file_name = os.path.join(self.TEMP_DIR, test_yaml_file_name)
136
137         if not os.path.exists(self.TEMP_DIR):
138             os.makedirs(self.TEMP_DIR)
139
140         self._apply_blacklist(scenario_file_name, test_file_name)
141         return test_file_name
142
143     @staticmethod
144     def get_task_id(cmd_raw):
145         """
146         Get task id from command rally result.
147
148         :param cmd_raw:
149         :return: task_id as string
150         """
151         taskid_re = re.compile('^Task +(.*): started$')
152         for line in cmd_raw.splitlines(True):
153             line = line.strip()
154             match = taskid_re.match(line)
155             if match:
156                 return match.group(1)
157         return None
158
159     @staticmethod
160     def task_succeed(json_raw):
161         """
162         Parse JSON from rally JSON results.
163
164         :param json_raw:
165         :return: Bool
166         """
167         rally_report = json.loads(json_raw)
168         tasks = rally_report.get('tasks')
169         if tasks:
170             for task in tasks:
171                 if task.get('status') != 'finished' or \
172                    task.get('pass_sla') is not True:
173                     return False
174         else:
175             return False
176         return True
177
178     def _migration_supported(self):
179         """Determine if migration is supported."""
180         if self.compute_cnt > 1:
181             return True
182
183         return False
184
185     @staticmethod
186     def excl_scenario():
187         """Exclude scenario."""
188         black_tests = []
189         try:
190             with open(RallyBase.BLACKLIST_FILE, 'r') as black_list_file:
191                 black_list_yaml = yaml.safe_load(black_list_file)
192
193             deploy_scenario = env.get('DEPLOY_SCENARIO')
194             if (bool(deploy_scenario) and
195                     'scenario' in black_list_yaml.keys()):
196                 for item in black_list_yaml['scenario']:
197                     scenarios = item['scenarios']
198                     in_it = RallyBase.in_iterable_re
199                     if in_it(deploy_scenario, scenarios):
200                         tests = item['tests']
201                         black_tests.extend(tests)
202         except Exception:  # pylint: disable=broad-except
203             LOGGER.debug("Scenario exclusion not applied.")
204
205         return black_tests
206
207     @staticmethod
208     def in_iterable_re(needle, haystack):
209         """
210         Check if given needle is in the iterable haystack, using regex.
211
212         :param needle: string to be matched
213         :param haystack: iterable of strings (optionally regex patterns)
214         :return: True if needle is eqial to any of the elements in haystack,
215                  or if a nonempty regex pattern in haystack is found in needle.
216         """
217         # match without regex
218         if needle in haystack:
219             return True
220
221         for pattern in haystack:
222             # match if regex pattern is set and found in the needle
223             if pattern and re.search(pattern, needle) is not None:
224                 return True
225
226         return False
227
228     def excl_func(self):
229         """Exclude functionalities."""
230         black_tests = []
231         func_list = []
232
233         try:
234             with open(RallyBase.BLACKLIST_FILE, 'r') as black_list_file:
235                 black_list_yaml = yaml.safe_load(black_list_file)
236
237             if not self._migration_supported():
238                 func_list.append("no_migration")
239
240             if 'functionality' in black_list_yaml.keys():
241                 for item in black_list_yaml['functionality']:
242                     functions = item['functions']
243                     for func in func_list:
244                         if func in functions:
245                             tests = item['tests']
246                             black_tests.extend(tests)
247         except Exception:  # pylint: disable=broad-except
248             LOGGER.debug("Functionality exclusion not applied.")
249
250         return black_tests
251
252     def _apply_blacklist(self, case_file_name, result_file_name):
253         """Apply blacklist."""
254         LOGGER.debug("Applying blacklist...")
255         cases_file = open(case_file_name, 'r')
256         result_file = open(result_file_name, 'w')
257
258         black_tests = list(set(self.excl_func() +
259                                self.excl_scenario()))
260
261         if black_tests:
262             LOGGER.debug("Blacklisted tests: %s", str(black_tests))
263
264         include = True
265         for cases_line in cases_file:
266             if include:
267                 for black_tests_line in black_tests:
268                     if re.search(black_tests_line,
269                                  cases_line.strip().rstrip(':')):
270                         include = False
271                         break
272                 else:
273                     result_file.write(str(cases_line))
274             else:
275                 if cases_line.isspace():
276                     include = True
277
278         cases_file.close()
279         result_file.close()
280
281     @staticmethod
282     def file_is_empty(file_name):
283         """Determine is a file is empty."""
284         try:
285             if os.stat(file_name).st_size > 0:
286                 return False
287         except Exception:  # pylint: disable=broad-except
288             pass
289
290         return True
291
292     def _save_results(self, test_name, task_id):
293         """ Generate and save task execution results"""
294         # check for result directory and create it otherwise
295         if not os.path.exists(self.RESULTS_DIR):
296             LOGGER.debug('%s does not exist, we create it.',
297                          self.RESULTS_DIR)
298             os.makedirs(self.RESULTS_DIR)
299
300         # put detailed result to log
301         cmd = (["rally", "task", "detailed", "--uuid", task_id])
302         LOGGER.debug('running command: %s', cmd)
303         output = subprocess.check_output(cmd, stderr=subprocess.STDOUT)
304         LOGGER.info("%s\n%s", " ".join(cmd), output)
305
306         # save report as JSON
307         report_json_name = 'opnfv-{}.json'.format(test_name)
308         report_json_dir = os.path.join(self.RESULTS_DIR, report_json_name)
309         cmd = (["rally", "task", "report", "--json", "--uuid", task_id,
310                 "--out", report_json_dir])
311         LOGGER.debug('running command: %s', cmd)
312         output = subprocess.check_output(cmd, stderr=subprocess.STDOUT)
313         LOGGER.info("%s\n%s", " ".join(cmd), output)
314
315         # save report as HTML
316         report_html_name = 'opnfv-{}.html'.format(test_name)
317         report_html_dir = os.path.join(self.RESULTS_DIR, report_html_name)
318         cmd = (["rally", "task", "report", "--html", "--uuid", task_id,
319                 "--out", report_html_dir])
320         LOGGER.debug('running command: %s', cmd)
321         output = subprocess.check_output(cmd, stderr=subprocess.STDOUT)
322         LOGGER.info("%s\n%s", " ".join(cmd), output)
323
324         json_results = open(report_json_dir).read()
325         self._append_summary(json_results, test_name)
326
327         # parse JSON operation result
328         if self.task_succeed(json_results):
329             LOGGER.info('Test scenario: "%s" OK.', test_name)
330         else:
331             LOGGER.info('Test scenario: "%s" Failed.', test_name)
332
333     def run_task(self, test_name):
334         """Run a task."""
335         LOGGER.info('Starting test scenario "%s" ...', test_name)
336         LOGGER.debug('running command: %s', self.run_cmd)
337         proc = subprocess.Popen(self.run_cmd, stdout=subprocess.PIPE,
338                                 stderr=subprocess.STDOUT)
339         output = proc.communicate()[0]
340
341         task_id = self.get_task_id(output)
342         LOGGER.debug('task_id : %s', task_id)
343         if task_id is None:
344             LOGGER.error("Failed to retrieve task_id")
345             LOGGER.error("Result:\n%s", output)
346             raise Exception("Failed to retrieve task id")
347         self._save_results(test_name, task_id)
348
349     def _append_summary(self, json_raw, test_name):
350         """Update statistics summary info."""
351         nb_tests = 0
352         nb_success = 0
353         overall_duration = 0.0
354
355         rally_report = json.loads(json_raw)
356         for task in rally_report.get('tasks'):
357             for subtask in task.get('subtasks'):
358                 for workload in subtask.get('workloads'):
359                     if workload.get('full_duration'):
360                         overall_duration += workload.get('full_duration')
361
362                     if workload.get('data'):
363                         nb_tests += len(workload.get('data'))
364
365                     for result in workload.get('data'):
366                         if not result.get('error'):
367                             nb_success += 1
368
369         scenario_summary = {'test_name': test_name,
370                             'overall_duration': overall_duration,
371                             'nb_tests': nb_tests,
372                             'nb_success': nb_success,
373                             'task_status': self.task_succeed(json_raw)}
374         self.summary.append(scenario_summary)
375
376     def prepare_run(self):
377         """Prepare resources needed by test scenarios."""
378         assert self.cloud
379         LOGGER.debug('Validating the test name...')
380         if self.test_name == 'all':
381             self.tests = self.TESTS
382         elif self.test_name in self.TESTS:
383             self.tests = [self.test_name]
384         else:
385             raise Exception("Test name '%s' is invalid" % self.test_name)
386
387         self.task_file = os.path.join(self.RALLY_DIR, 'task.yaml')
388         if not os.path.exists(self.task_file):
389             LOGGER.error("Task file '%s' does not exist.", self.task_file)
390             raise Exception("Task file '{}' does not exist.".
391                             format(self.task_file))
392
393         self.compute_cnt = len(self.cloud.list_hypervisors())
394         self.flavor_alt = self.create_flavor_alt()
395         LOGGER.debug("flavor: %s", self.flavor_alt)
396
397     def prepare_task(self, test_name):
398         """Prepare resources for test run."""
399         file_name = self._prepare_test_list(test_name)
400         if self.file_is_empty(file_name):
401             LOGGER.info('No tests for scenario "%s"', test_name)
402             return False
403         self.run_cmd = (["rally", "task", "start", "--abort-on-sla-failure",
404                          "--task", self.task_file, "--task-args",
405                          str(self._build_task_args(test_name))])
406         return True
407
408     def run_tests(self):
409         """Execute tests."""
410         for test in self.tests:
411             if self.prepare_task(test):
412                 self.run_task(test)
413
414     def _generate_report(self):
415         """Generate test execution summary report."""
416         total_duration = 0.0
417         total_nb_tests = 0
418         total_nb_success = 0
419         nb_modules = 0
420         payload = []
421
422         res_table = prettytable.PrettyTable(
423             padding_width=2,
424             field_names=['Module', 'Duration', 'nb. Test Run', 'Success'])
425         res_table.align['Module'] = "l"
426         res_table.align['Duration'] = "r"
427         res_table.align['Success'] = "r"
428
429         # for each scenario we draw a row for the table
430         for item in self.summary:
431             if item['task_status'] is True:
432                 nb_modules += 1
433             total_duration += item['overall_duration']
434             total_nb_tests += item['nb_tests']
435             total_nb_success += item['nb_success']
436             try:
437                 success_avg = 100 * item['nb_success'] / item['nb_tests']
438             except ZeroDivisionError:
439                 success_avg = 0
440             success_str = str("{:0.2f}".format(success_avg)) + '%'
441             duration_str = time.strftime("%M:%S",
442                                          time.gmtime(item['overall_duration']))
443             res_table.add_row([item['test_name'], duration_str,
444                                item['nb_tests'], success_str])
445             payload.append({'module': item['test_name'],
446                             'details': {'duration': item['overall_duration'],
447                                         'nb tests': item['nb_tests'],
448                                         'success': success_str}})
449
450         total_duration_str = time.strftime("%H:%M:%S",
451                                            time.gmtime(total_duration))
452         try:
453             self.result = 100 * total_nb_success / total_nb_tests
454         except ZeroDivisionError:
455             self.result = 100
456         success_rate = "{:0.2f}".format(self.result)
457         success_rate_str = str(success_rate) + '%'
458         res_table.add_row(["", "", "", ""])
459         res_table.add_row(["TOTAL:", total_duration_str, total_nb_tests,
460                            success_rate_str])
461
462         LOGGER.info("Rally Summary Report:\n\n%s\n", res_table.get_string())
463         LOGGER.info("Rally '%s' success_rate is %s%% in %s/%s modules",
464                     self.case_name, success_rate, nb_modules,
465                     len(self.summary))
466         payload.append({'summary': {'duration': total_duration,
467                                     'nb tests': total_nb_tests,
468                                     'nb success': success_rate}})
469         self.details = payload
470
471     def clean(self):
472         """Cleanup of OpenStack resources. Should be called on completion."""
473         if self.flavor_alt:
474             self.orig_cloud.delete_flavor(self.flavor_alt.id)
475         super(RallyBase, self).clean()
476
477     def is_successful(self):
478         """The overall result of the test."""
479         for item in self.summary:
480             if item['task_status'] is False:
481                 return testcase.TestCase.EX_TESTCASE_FAILED
482
483         return super(RallyBase, self).is_successful()
484
485     @energy.enable_recording
486     def run(self, **kwargs):
487         """Run testcase."""
488         self.start_time = time.time()
489         try:
490             assert super(RallyBase, self).run(
491                 **kwargs) == testcase.TestCase.EX_OK
492             environ = dict(
493                 os.environ,
494                 OS_USERNAME=self.project.user.name,
495                 OS_PROJECT_NAME=self.project.project.name,
496                 OS_PROJECT_ID=self.project.project.id,
497                 OS_PASSWORD=self.project.password)
498             conf_utils.create_rally_deployment(environ=environ)
499             self.prepare_run()
500             self.run_tests()
501             self._generate_report()
502             res = testcase.TestCase.EX_OK
503         except Exception as exc:   # pylint: disable=broad-except
504             LOGGER.error('Error with run: %s', exc)
505             self.result = 0
506             res = testcase.TestCase.EX_RUN_ERROR
507         self.stop_time = time.time()
508         return res
509
510
511 class RallySanity(RallyBase):
512     """Rally sanity testcase implementation."""
513
514     def __init__(self, **kwargs):
515         """Initialize RallySanity object."""
516         if "case_name" not in kwargs:
517             kwargs["case_name"] = "rally_sanity"
518         super(RallySanity, self).__init__(**kwargs)
519         self.test_name = 'all'
520         self.smoke = True
521         self.scenario_dir = os.path.join(self.RALLY_SCENARIO_DIR, 'sanity')
522
523
524 class RallyFull(RallyBase):
525     """Rally full testcase implementation."""
526
527     def __init__(self, **kwargs):
528         """Initialize RallyFull object."""
529         if "case_name" not in kwargs:
530             kwargs["case_name"] = "rally_full"
531         super(RallyFull, self).__init__(**kwargs)
532         self.test_name = 'all'
533         self.smoke = False
534         self.scenario_dir = os.path.join(self.RALLY_SCENARIO_DIR, 'full')