Merge "Refactor resource creation and cleanup in rally"
[functest-xtesting.git] / functest / opnfv_tests / openstack / rally / rally.py
1 #!/usr/bin/env python
2 #
3 # Copyright (c) 2015 All rights reserved
4 # This program and the accompanying materials
5 # are made available under the terms of the Apache License, Version 2.0
6 # which accompanies this distribution, and is available at
7 #
8 # http://www.apache.org/licenses/LICENSE-2.0
9 #
10
11 """Rally testcases implementation."""
12
13 from __future__ import division
14
15 import json
16 import logging
17 import os
18 import re
19 import subprocess
20 import time
21 import uuid
22
23 import iniparse
24 import pkg_resources
25 import yaml
26
27 from functest.core import testcase
28 from functest.energy import energy
29 from functest.opnfv_tests.openstack.snaps import snaps_utils
30 from functest.utils.constants import CONST
31
32 from snaps.openstack.create_image import ImageSettings
33 from snaps.openstack.create_network import NetworkSettings, SubnetSettings
34 from snaps.openstack.create_router import RouterSettings
35 from snaps.openstack.tests import openstack_tests
36 from snaps.openstack.utils import deploy_utils
37
38 LOGGER = logging.getLogger(__name__)
39
40
41 class RallyBase(testcase.TestCase):
42     """Base class form Rally testcases implementation."""
43
44     TESTS = ['authenticate', 'glance', 'ceilometer', 'cinder', 'heat',
45              'keystone', 'neutron', 'nova', 'quotas', 'vm', 'all']
46     GLANCE_IMAGE_NAME = CONST.__getattribute__('openstack_image_name')
47     GLANCE_IMAGE_FILENAME = CONST.__getattribute__('openstack_image_file_name')
48     GLANCE_IMAGE_PATH = os.path.join(
49         CONST.__getattribute__('dir_functest_images'),
50         GLANCE_IMAGE_FILENAME)
51     GLANCE_IMAGE_FORMAT = CONST.__getattribute__('openstack_image_disk_format')
52     GLANCE_IMAGE_USERNAME = CONST.__getattribute__('openstack_image_username')
53     GLANCE_IMAGE_EXTRA_PROPERTIES = {}
54     if hasattr(CONST, 'openstack_extra_properties'):
55         GLANCE_IMAGE_EXTRA_PROPERTIES = CONST.__getattribute__(
56             'openstack_extra_properties')
57     FLAVOR_NAME = "m1.tiny"
58
59     RALLY_DIR = pkg_resources.resource_filename(
60         'functest', 'opnfv_tests/openstack/rally')
61     RALLY_SCENARIO_DIR = pkg_resources.resource_filename(
62         'functest', 'opnfv_tests/openstack/rally/scenario')
63     TEMPLATE_DIR = pkg_resources.resource_filename(
64         'functest', 'opnfv_tests/openstack/rally/scenario/templates')
65     SUPPORT_DIR = pkg_resources.resource_filename(
66         'functest', 'opnfv_tests/openstack/rally/scenario/support')
67     USERS_AMOUNT = 2
68     TENANTS_AMOUNT = 3
69     ITERATIONS_AMOUNT = 10
70     CONCURRENCY = 4
71     RESULTS_DIR = os.path.join(CONST.__getattribute__('dir_results'), 'rally')
72     TEMPEST_CONF_FILE = os.path.join(CONST.__getattribute__('dir_results'),
73                                      'tempest/tempest.conf')
74     BLACKLIST_FILE = os.path.join(RALLY_DIR, "blacklist.txt")
75     TEMP_DIR = os.path.join(RALLY_DIR, "var")
76
77     RALLY_PRIVATE_NET_NAME = CONST.__getattribute__('rally_network_name')
78     RALLY_PRIVATE_SUBNET_NAME = CONST.__getattribute__('rally_subnet_name')
79     RALLY_PRIVATE_SUBNET_CIDR = CONST.__getattribute__('rally_subnet_cidr')
80     RALLY_ROUTER_NAME = CONST.__getattribute__('rally_router_name')
81
82     def __init__(self, **kwargs):
83         """Initialize RallyBase object."""
84         super(RallyBase, self).__init__(**kwargs)
85         if 'os_creds' in kwargs:
86             self.os_creds = kwargs['os_creds']
87         else:
88             creds_override = None
89             if hasattr(CONST, 'snaps_os_creds_override'):
90                 creds_override = CONST.__getattribute__(
91                     'snaps_os_creds_override')
92
93             self.os_creds = openstack_tests.get_credentials(
94                 os_env_file=CONST.__getattribute__('openstack_creds'),
95                 overrides=creds_override)
96
97         self.guid = ''
98         if CONST.__getattribute__('rally_unique_names'):
99             self.guid = '-' + str(uuid.uuid4())
100
101         self.creators = []
102         self.mode = ''
103         self.summary = []
104         self.scenario_dir = ''
105         self.ext_net_name = None
106         self.priv_net_id = None
107         self.smoke = None
108         self.test_name = None
109         self.start_time = None
110         self.result = None
111         self.details = None
112
113     def _build_task_args(self, test_file_name):
114         task_args = {'service_list': [test_file_name]}
115         task_args['image_name'] = self.GLANCE_IMAGE_NAME
116         task_args['flavor_name'] = self.FLAVOR_NAME
117         task_args['glance_image_location'] = self.GLANCE_IMAGE_PATH
118         task_args['glance_image_format'] = self.GLANCE_IMAGE_FORMAT
119         task_args['tmpl_dir'] = self.TEMPLATE_DIR
120         task_args['sup_dir'] = self.SUPPORT_DIR
121         task_args['users_amount'] = self.USERS_AMOUNT
122         task_args['tenants_amount'] = self.TENANTS_AMOUNT
123         task_args['use_existing_users'] = False
124         task_args['iterations'] = self.ITERATIONS_AMOUNT
125         task_args['concurrency'] = self.CONCURRENCY
126         task_args['smoke'] = self.smoke
127
128         ext_net = self.ext_net_name
129         if ext_net:
130             task_args['floating_network'] = str(ext_net)
131         else:
132             task_args['floating_network'] = ''
133
134         net_id = self.priv_net_id
135         if net_id:
136             task_args['netid'] = str(net_id)
137         else:
138             task_args['netid'] = ''
139
140         return task_args
141
142     def _prepare_test_list(self, test_name):
143         test_yaml_file_name = 'opnfv-{}.yaml'.format(test_name)
144         scenario_file_name = os.path.join(self.RALLY_SCENARIO_DIR,
145                                           test_yaml_file_name)
146
147         if not os.path.exists(scenario_file_name):
148             scenario_file_name = os.path.join(self.scenario_dir,
149                                               test_yaml_file_name)
150
151             if not os.path.exists(scenario_file_name):
152                 raise Exception("The scenario '%s' does not exist."
153                                 % scenario_file_name)
154
155         LOGGER.debug('Scenario fetched from : %s', scenario_file_name)
156         test_file_name = os.path.join(self.TEMP_DIR, test_yaml_file_name)
157
158         if not os.path.exists(self.TEMP_DIR):
159             os.makedirs(self.TEMP_DIR)
160
161         self.apply_blacklist(scenario_file_name, test_file_name)
162         return test_file_name
163
164     @staticmethod
165     def get_task_id(cmd_raw):
166         """
167         Get task id from command rally result.
168
169         :param cmd_raw:
170         :return: task_id as string
171         """
172         taskid_re = re.compile('^Task +(.*): started$')
173         for line in cmd_raw.splitlines(True):
174             line = line.strip()
175             match = taskid_re.match(line)
176             if match:
177                 return match.group(1)
178         return None
179
180     @staticmethod
181     def task_succeed(json_raw):
182         """
183         Parse JSON from rally JSON results.
184
185         :param json_raw:
186         :return: Bool
187         """
188         rally_report = json.loads(json_raw)
189         for report in rally_report:
190             if report is None or report.get('result') is None:
191                 return False
192
193             for result in report.get('result'):
194                 if result is None or len(result.get('error')) > 0:
195                     return False
196
197         return True
198
199     @staticmethod
200     def live_migration_supported():
201         """Determine if live migration is supported."""
202         config = iniparse.ConfigParser()
203         if (config.read(RallyBase.TEMPEST_CONF_FILE) and
204                 config.has_section('compute-feature-enabled') and
205                 config.has_option('compute-feature-enabled',
206                                   'live_migration')):
207             return config.getboolean('compute-feature-enabled',
208                                      'live_migration')
209
210         return False
211
212     @staticmethod
213     def get_cmd_output(proc):
214         """Get command stdout."""
215         result = ""
216         while proc.poll() is None:
217             line = proc.stdout.readline()
218             result += line
219         return result
220
221     @staticmethod
222     def excl_scenario():
223         """Exclude scenario."""
224         black_tests = []
225         try:
226             with open(RallyBase.BLACKLIST_FILE, 'r') as black_list_file:
227                 black_list_yaml = yaml.safe_load(black_list_file)
228
229             installer_type = CONST.__getattribute__('INSTALLER_TYPE')
230             deploy_scenario = CONST.__getattribute__('DEPLOY_SCENARIO')
231             if (bool(installer_type) and bool(deploy_scenario) and
232                     'scenario' in black_list_yaml.keys()):
233                 for item in black_list_yaml['scenario']:
234                     scenarios = item['scenarios']
235                     installers = item['installers']
236                     in_it = RallyBase.in_iterable_re
237                     if (in_it(deploy_scenario, scenarios) and
238                             in_it(installer_type, installers)):
239                         tests = item['tests']
240                         black_tests.extend(tests)
241         except Exception:
242             LOGGER.debug("Scenario exclusion not applied.")
243
244         return black_tests
245
246     @staticmethod
247     def in_iterable_re(needle, haystack):
248         """
249         Check if given needle is in the iterable haystack, using regex.
250
251         :param needle: string to be matched
252         :param haystack: iterable of strings (optionally regex patterns)
253         :return: True if needle is eqial to any of the elements in haystack,
254                  or if a nonempty regex pattern in haystack is found in needle.
255         """
256         # match without regex
257         if needle in haystack:
258             return True
259
260         for pattern in haystack:
261             # match if regex pattern is set and found in the needle
262             if pattern and re.search(pattern, needle) is not None:
263                 return True
264         else:
265             return False
266
267     @staticmethod
268     def excl_func():
269         """Exclude functionalities."""
270         black_tests = []
271         func_list = []
272
273         try:
274             with open(RallyBase.BLACKLIST_FILE, 'r') as black_list_file:
275                 black_list_yaml = yaml.safe_load(black_list_file)
276
277             if not RallyBase.live_migration_supported():
278                 func_list.append("no_live_migration")
279
280             if 'functionality' in black_list_yaml.keys():
281                 for item in black_list_yaml['functionality']:
282                     functions = item['functions']
283                     for func in func_list:
284                         if func in functions:
285                             tests = item['tests']
286                             black_tests.extend(tests)
287         except Exception:  # pylint: disable=broad-except
288             LOGGER.debug("Functionality exclusion not applied.")
289
290         return black_tests
291
292     @staticmethod
293     def apply_blacklist(case_file_name, result_file_name):
294         """Apply blacklist."""
295         LOGGER.debug("Applying blacklist...")
296         cases_file = open(case_file_name, 'r')
297         result_file = open(result_file_name, 'w')
298
299         black_tests = list(set(RallyBase.excl_func() +
300                                RallyBase.excl_scenario()))
301
302         if black_tests:
303             LOGGER.debug("Blacklisted tests: " + str(black_tests))
304
305         include = True
306         for cases_line in cases_file:
307             if include:
308                 for black_tests_line in black_tests:
309                     if re.search(black_tests_line,
310                                  cases_line.strip().rstrip(':')):
311                         include = False
312                         break
313                 else:
314                     result_file.write(str(cases_line))
315             else:
316                 if cases_line.isspace():
317                     include = True
318
319         cases_file.close()
320         result_file.close()
321
322     @staticmethod
323     def file_is_empty(file_name):
324         """Determine is a file is empty."""
325         try:
326             if os.stat(file_name).st_size > 0:
327                 return False
328         except Exception:  # pylint: disable=broad-except
329             pass
330
331         return True
332
333     def _run_task(self, test_name):
334         """Run a task."""
335         LOGGER.info('Starting test scenario "%s" ...', test_name)
336
337         task_file = os.path.join(self.RALLY_DIR, 'task.yaml')
338         if not os.path.exists(task_file):
339             LOGGER.error("Task file '%s' does not exist.", task_file)
340             raise Exception("Task file '%s' does not exist.", task_file)
341
342         file_name = self._prepare_test_list(test_name)
343         if self.file_is_empty(file_name):
344             LOGGER.info('No tests for scenario "%s"', test_name)
345             return
346
347         cmd_line = ("rally task start --abort-on-sla-failure "
348                     "--task {0} "
349                     "--task-args \"{1}\""
350                     .format(task_file, self._build_task_args(test_name)))
351         LOGGER.debug('running command line: %s', cmd_line)
352
353         proc = subprocess.Popen(cmd_line, stdout=subprocess.PIPE,
354                                 stderr=subprocess.STDOUT, shell=True)
355         output = self._get_output(proc, test_name)
356         task_id = self.get_task_id(output)
357         LOGGER.debug('task_id : %s', task_id)
358
359         if task_id is None:
360             LOGGER.error('Failed to retrieve task_id, validating task...')
361             cmd_line = ("rally task validate "
362                         "--task {0} "
363                         "--task-args \"{1}\""
364                         .format(task_file, self._build_task_args(test_name)))
365             LOGGER.debug('running command line: %s', cmd_line)
366             proc = subprocess.Popen(cmd_line, stdout=subprocess.PIPE,
367                                     stderr=subprocess.STDOUT, shell=True)
368             output = self.get_cmd_output(proc)
369             LOGGER.error("Task validation result:" + "\n" + output)
370             return
371
372         # check for result directory and create it otherwise
373         if not os.path.exists(self.RESULTS_DIR):
374             LOGGER.debug('%s does not exist, we create it.',
375                          self.RESULTS_DIR)
376             os.makedirs(self.RESULTS_DIR)
377
378         # write html report file
379         report_html_name = 'opnfv-{}.html'.format(test_name)
380         report_html_dir = os.path.join(self.RESULTS_DIR, report_html_name)
381         cmd_line = "rally task report {} --out {}".format(task_id,
382                                                           report_html_dir)
383
384         LOGGER.debug('running command line: %s', cmd_line)
385         os.popen(cmd_line)
386
387         # get and save rally operation JSON result
388         cmd_line = "rally task results %s" % task_id
389         LOGGER.debug('running command line: %s', cmd_line)
390         cmd = os.popen(cmd_line)
391         json_results = cmd.read()
392         report_json_name = 'opnfv-{}.json'.format(test_name)
393         report_json_dir = os.path.join(self.RESULTS_DIR, report_json_name)
394         with open(report_json_dir, 'w') as r_file:
395             LOGGER.debug('saving json file')
396             r_file.write(json_results)
397
398         # parse JSON operation result
399         if self.task_succeed(json_results):
400             LOGGER.info('Test scenario: "{}" OK.'.format(test_name) + "\n")
401         else:
402             LOGGER.info('Test scenario: "{}" Failed.'.format(test_name) + "\n")
403
404     def _get_output(self, proc, test_name):
405         result = ""
406         nb_tests = 0
407         overall_duration = 0.0
408         success = 0.0
409         nb_totals = 0
410
411         while proc.poll() is None:
412             line = proc.stdout.readline()
413             if ("Load duration" in line or
414                     "started" in line or
415                     "finished" in line or
416                     " Preparing" in line or
417                     "+-" in line or
418                     "|" in line):
419                 result += line
420             elif "test scenario" in line:
421                 result += "\n" + line
422             elif "Full duration" in line:
423                 result += line + "\n\n"
424
425             # parse output for summary report
426             if ("| " in line and
427                     "| action" not in line and
428                     "| Starting" not in line and
429                     "| Completed" not in line and
430                     "| ITER" not in line and
431                     "|   " not in line and
432                     "| total" not in line):
433                 nb_tests += 1
434             elif "| total" in line:
435                 percentage = ((line.split('|')[8]).strip(' ')).strip('%')
436                 try:
437                     success += float(percentage)
438                 except ValueError:
439                     LOGGER.info('Percentage error: %s, %s',
440                                 percentage, line)
441                 nb_totals += 1
442             elif "Full duration" in line:
443                 duration = line.split(': ')[1]
444                 try:
445                     overall_duration += float(duration)
446                 except ValueError:
447                     LOGGER.info('Duration error: %s, %s', duration, line)
448
449         overall_duration = "{:10.2f}".format(overall_duration)
450         if nb_totals == 0:
451             success_avg = 0
452         else:
453             success_avg = "{:0.2f}".format(success / nb_totals)
454
455         scenario_summary = {'test_name': test_name,
456                             'overall_duration': overall_duration,
457                             'nb_tests': nb_tests,
458                             'success': success_avg}
459         self.summary.append(scenario_summary)
460
461         LOGGER.debug("\n" + result)
462
463         return result
464
465     def _prepare_env(self):
466         LOGGER.debug('Validating the test name...')
467         if self.test_name not in self.TESTS:
468             raise Exception("Test name '%s' is invalid" % self.test_name)
469
470         image_name = self.GLANCE_IMAGE_NAME + self.guid
471         network_name = self.RALLY_PRIVATE_NET_NAME + self.guid
472         subnet_name = self.RALLY_PRIVATE_SUBNET_NAME + self.guid
473         router_name = self.RALLY_ROUTER_NAME + self.guid
474         self.ext_net_name = snaps_utils.get_ext_net_name(self.os_creds)
475
476         LOGGER.debug('Getting or creating image...')
477         image_creator = deploy_utils.create_image(
478             self.os_creds, ImageSettings(
479                 name=image_name,
480                 image_file=self.GLANCE_IMAGE_PATH,
481                 img_format=self.GLANCE_IMAGE_FORMAT,
482                 image_user=self.GLANCE_IMAGE_USERNAME,
483                 public=True,
484                 extra_properties=self.GLANCE_IMAGE_EXTRA_PROPERTIES))
485         if image_creator is None:
486             raise Exception("Failed to get or create image '%s'" %
487                             image_name)
488         self.creators.append(image_creator)
489
490         LOGGER.debug("Creating network '%s'...", network_name)
491         network_creator = deploy_utils.create_network(
492             self.os_creds, NetworkSettings(
493                 name=network_name,
494                 shared=True,
495                 subnet_settings=[SubnetSettings(
496                     name=subnet_name,
497                     cidr=self.RALLY_PRIVATE_SUBNET_CIDR)
498                 ]))
499         if network_creator is None:
500             raise Exception("Failed to create private network")
501         self.priv_net_id = network_creator.get_network().id
502         self.creators.append(network_creator)
503
504         LOGGER.debug("Creating router '%s'...", router_name)
505         router_creator = deploy_utils.create_router(
506             self.os_creds, RouterSettings(
507                 name=router_name,
508                 external_gateway=self.ext_net_name,
509                 internal_subnets=[subnet_name]))
510         if router_creator is None:
511             raise Exception("Failed to create router")
512         self.creators.append(router_creator)
513
514     def _run_tests(self):
515         if self.test_name == 'all':
516             for test in self.TESTS:
517                 if test == 'all' or test == 'vm':
518                     continue
519                 self._run_task(test)
520         else:
521             self._run_task(self.test_name)
522
523     def _generate_report(self):
524         report = (
525             "\n"
526             "                                                              "
527             "\n"
528             "                     Rally Summary Report\n"
529             "\n"
530             "+===================+============+===============+===========+"
531             "\n"
532             "| Module            | Duration   | nb. Test Run  | Success   |"
533             "\n"
534             "+===================+============+===============+===========+"
535             "\n")
536         payload = []
537
538         # for each scenario we draw a row for the table
539         total_duration = 0.0
540         total_nb_tests = 0
541         total_success = 0.0
542         for item in self.summary:
543             name = "{0:<17}".format(item['test_name'])
544             duration = float(item['overall_duration'])
545             total_duration += duration
546             duration = time.strftime("%M:%S", time.gmtime(duration))
547             duration = "{0:<10}".format(duration)
548             nb_tests = "{0:<13}".format(item['nb_tests'])
549             total_nb_tests += int(item['nb_tests'])
550             success = "{0:<10}".format(str(item['success']) + '%')
551             total_success += float(item['success'])
552             report += ("" +
553                        "| " + name + " | " + duration + " | " +
554                        nb_tests + " | " + success + "|\n" +
555                        "+-------------------+------------"
556                        "+---------------+-----------+\n")
557             payload.append({'module': name,
558                             'details': {'duration': item['overall_duration'],
559                                         'nb tests': item['nb_tests'],
560                                         'success': item['success']}})
561
562         total_duration_str = time.strftime("%H:%M:%S",
563                                            time.gmtime(total_duration))
564         total_duration_str2 = "{0:<10}".format(total_duration_str)
565         total_nb_tests_str = "{0:<13}".format(total_nb_tests)
566
567         try:
568             self.result = total_success / len(self.summary)
569         except ZeroDivisionError:
570             self.result = 100
571
572         success_rate = "{:0.2f}".format(self.result)
573         success_rate_str = "{0:<10}".format(str(success_rate) + '%')
574         report += ("+===================+============"
575                    "+===============+===========+")
576         report += "\n"
577         report += ("| TOTAL:            | " + total_duration_str2 + " | " +
578                    total_nb_tests_str + " | " + success_rate_str + "|\n")
579         report += ("+===================+============"
580                    "+===============+===========+")
581         report += "\n"
582
583         LOGGER.info("\n" + report)
584         payload.append({'summary': {'duration': total_duration,
585                                     'nb tests': total_nb_tests,
586                                     'nb success': success_rate}})
587
588         self.details = payload
589
590         LOGGER.info("Rally '%s' success_rate is %s%%",
591                     self.case_name, success_rate)
592
593     def _clean_up(self):
594         for creator in reversed(self.creators):
595             try:
596                 creator.clean()
597             except Exception as e:
598                 LOGGER.error('Unexpected error cleaning - %s', e)
599
600     @energy.enable_recording
601     def run(self, **kwargs):
602         """Run testcase."""
603         self.start_time = time.time()
604         try:
605             self._prepare_env()
606             self._run_tests()
607             self._generate_report()
608             res = testcase.TestCase.EX_OK
609         except Exception as exc:   # pylint: disable=broad-except
610             LOGGER.error('Error with run: %s', exc)
611             res = testcase.TestCase.EX_RUN_ERROR
612         finally:
613             self._clean_up()
614
615         self.stop_time = time.time()
616         return res
617
618
619 class RallySanity(RallyBase):
620     """Rally sanity testcase implementation."""
621
622     def __init__(self, **kwargs):
623         """Initialize RallySanity object."""
624         if "case_name" not in kwargs:
625             kwargs["case_name"] = "rally_sanity"
626         super(RallySanity, self).__init__(**kwargs)
627         self.mode = 'sanity'
628         self.test_name = 'all'
629         self.smoke = True
630         self.scenario_dir = os.path.join(self.RALLY_SCENARIO_DIR, 'sanity')
631
632
633 class RallyFull(RallyBase):
634     """Rally full testcase implementation."""
635
636     def __init__(self, **kwargs):
637         """Initialize RallyFull object."""
638         if "case_name" not in kwargs:
639             kwargs["case_name"] = "rally_full"
640         super(RallyFull, self).__init__(**kwargs)
641         self.mode = 'full'
642         self.test_name = 'all'
643         self.smoke = False
644         self.scenario_dir = os.path.join(self.RALLY_SCENARIO_DIR, 'full')