Fix the yamllint errors in functest/ci
[functest.git] / functest / opnfv_tests / openstack / rally / rally.py
1 #!/usr/bin/env python
2 #
3 # Copyright (c) 2015 All rights reserved
4 # This program and the accompanying materials
5 # are made available under the terms of the Apache License, Version 2.0
6 # which accompanies this distribution, and is available at
7 #
8 # http://www.apache.org/licenses/LICENSE-2.0
9 #
10
11 """Rally testcases implementation."""
12
13 from __future__ import division
14
15 import json
16 import logging
17 import os
18 import re
19 import subprocess
20 import time
21 import uuid
22
23 import pkg_resources
24 import yaml
25
26 from functest.core import testcase
27 from functest.energy import energy
28 from functest.opnfv_tests.openstack.snaps import snaps_utils
29 from functest.opnfv_tests.openstack.tempest import conf_utils
30 from functest.utils.constants import CONST
31
32 from snaps.config.flavor import FlavorConfig
33 from snaps.config.image import ImageConfig
34 from snaps.config.network import NetworkConfig, SubnetConfig
35 from snaps.config.router import RouterConfig
36
37 from snaps.openstack.create_flavor import OpenStackFlavor
38 from snaps.openstack.tests import openstack_tests
39 from snaps.openstack.utils import deploy_utils
40
41 LOGGER = logging.getLogger(__name__)
42
43
44 class RallyBase(testcase.TestCase):
45     """Base class form Rally testcases implementation."""
46
47     TESTS = ['authenticate', 'glance', 'ceilometer', 'cinder', 'heat',
48              'keystone', 'neutron', 'nova', 'quotas', 'vm', 'all']
49     GLANCE_IMAGE_NAME = CONST.__getattribute__('openstack_image_name')
50     GLANCE_IMAGE_FILENAME = CONST.__getattribute__('openstack_image_file_name')
51     GLANCE_IMAGE_PATH = os.path.join(
52         CONST.__getattribute__('dir_functest_images'),
53         GLANCE_IMAGE_FILENAME)
54     GLANCE_IMAGE_FORMAT = CONST.__getattribute__('openstack_image_disk_format')
55     GLANCE_IMAGE_USERNAME = CONST.__getattribute__('openstack_image_username')
56     GLANCE_IMAGE_EXTRA_PROPERTIES = {}
57     if hasattr(CONST, 'openstack_extra_properties'):
58         GLANCE_IMAGE_EXTRA_PROPERTIES = CONST.__getattribute__(
59             'openstack_extra_properties')
60     FLAVOR_NAME = CONST.__getattribute__('rally_flavor_name')
61     FLAVOR_ALT_NAME = CONST.__getattribute__('rally_flavor_alt_name')
62     FLAVOR_EXTRA_SPECS = None
63     if hasattr(CONST, 'flavor_extra_specs'):
64         FLAVOR_EXTRA_SPECS = CONST.__getattribute__('flavor_extra_specs')
65
66     RALLY_DIR = pkg_resources.resource_filename(
67         'functest', 'opnfv_tests/openstack/rally')
68     RALLY_SCENARIO_DIR = pkg_resources.resource_filename(
69         'functest', 'opnfv_tests/openstack/rally/scenario')
70     TEMPLATE_DIR = pkg_resources.resource_filename(
71         'functest', 'opnfv_tests/openstack/rally/scenario/templates')
72     SUPPORT_DIR = pkg_resources.resource_filename(
73         'functest', 'opnfv_tests/openstack/rally/scenario/support')
74     USERS_AMOUNT = 2
75     TENANTS_AMOUNT = 3
76     ITERATIONS_AMOUNT = 10
77     CONCURRENCY = 4
78     RESULTS_DIR = os.path.join(CONST.__getattribute__('dir_results'), 'rally')
79     BLACKLIST_FILE = os.path.join(RALLY_DIR, "blacklist.txt")
80     TEMP_DIR = os.path.join(RALLY_DIR, "var")
81
82     RALLY_PRIVATE_NET_NAME = CONST.__getattribute__('rally_network_name')
83     RALLY_PRIVATE_SUBNET_NAME = CONST.__getattribute__('rally_subnet_name')
84     RALLY_PRIVATE_SUBNET_CIDR = CONST.__getattribute__('rally_subnet_cidr')
85     RALLY_ROUTER_NAME = CONST.__getattribute__('rally_router_name')
86
87     def __init__(self, **kwargs):
88         """Initialize RallyBase object."""
89         super(RallyBase, self).__init__(**kwargs)
90         if 'os_creds' in kwargs:
91             self.os_creds = kwargs['os_creds']
92         else:
93             creds_override = None
94             if hasattr(CONST, 'snaps_os_creds_override'):
95                 creds_override = CONST.__getattribute__(
96                     'snaps_os_creds_override')
97
98             self.os_creds = openstack_tests.get_credentials(
99                 os_env_file=CONST.__getattribute__('openstack_creds'),
100                 overrides=creds_override)
101
102         self.guid = '-' + str(uuid.uuid4())
103
104         self.creators = []
105         self.mode = ''
106         self.summary = []
107         self.scenario_dir = ''
108         self.image_name = None
109         self.ext_net_name = None
110         self.priv_net_id = None
111         self.flavor_name = None
112         self.flavor_alt_name = None
113         self.smoke = None
114         self.test_name = None
115         self.start_time = None
116         self.result = None
117         self.details = None
118         self.compute_cnt = 0
119
120     def _build_task_args(self, test_file_name):
121         task_args = {'service_list': [test_file_name]}
122         task_args['image_name'] = self.image_name
123         task_args['flavor_name'] = self.flavor_name
124         task_args['flavor_alt_name'] = self.flavor_alt_name
125         task_args['glance_image_location'] = self.GLANCE_IMAGE_PATH
126         task_args['glance_image_format'] = self.GLANCE_IMAGE_FORMAT
127         task_args['tmpl_dir'] = self.TEMPLATE_DIR
128         task_args['sup_dir'] = self.SUPPORT_DIR
129         task_args['users_amount'] = self.USERS_AMOUNT
130         task_args['tenants_amount'] = self.TENANTS_AMOUNT
131         task_args['use_existing_users'] = False
132         task_args['iterations'] = self.ITERATIONS_AMOUNT
133         task_args['concurrency'] = self.CONCURRENCY
134         task_args['smoke'] = self.smoke
135
136         ext_net = self.ext_net_name
137         if ext_net:
138             task_args['floating_network'] = str(ext_net)
139         else:
140             task_args['floating_network'] = ''
141
142         net_id = self.priv_net_id
143         if net_id:
144             task_args['netid'] = str(net_id)
145         else:
146             task_args['netid'] = ''
147
148         return task_args
149
150     def _prepare_test_list(self, test_name):
151         test_yaml_file_name = 'opnfv-{}.yaml'.format(test_name)
152         scenario_file_name = os.path.join(self.RALLY_SCENARIO_DIR,
153                                           test_yaml_file_name)
154
155         if not os.path.exists(scenario_file_name):
156             scenario_file_name = os.path.join(self.scenario_dir,
157                                               test_yaml_file_name)
158
159             if not os.path.exists(scenario_file_name):
160                 raise Exception("The scenario '%s' does not exist."
161                                 % scenario_file_name)
162
163         LOGGER.debug('Scenario fetched from : %s', scenario_file_name)
164         test_file_name = os.path.join(self.TEMP_DIR, test_yaml_file_name)
165
166         if not os.path.exists(self.TEMP_DIR):
167             os.makedirs(self.TEMP_DIR)
168
169         self._apply_blacklist(scenario_file_name, test_file_name)
170         return test_file_name
171
172     @staticmethod
173     def get_task_id(cmd_raw):
174         """
175         Get task id from command rally result.
176
177         :param cmd_raw:
178         :return: task_id as string
179         """
180         taskid_re = re.compile('^Task +(.*): started$')
181         for line in cmd_raw.splitlines(True):
182             line = line.strip()
183             match = taskid_re.match(line)
184             if match:
185                 return match.group(1)
186         return None
187
188     @staticmethod
189     def task_succeed(json_raw):
190         """
191         Parse JSON from rally JSON results.
192
193         :param json_raw:
194         :return: Bool
195         """
196         rally_report = json.loads(json_raw)
197         for report in rally_report:
198             if report is None or report.get('result') is None:
199                 return False
200
201             for result in report.get('result'):
202                 if result is None or len(result.get('error')) > 0:
203                     return False
204
205         return True
206
207     def _migration_supported(self):
208         """Determine if migration is supported."""
209         if self.compute_cnt > 1:
210             return True
211
212         return False
213
214     @staticmethod
215     def get_cmd_output(proc):
216         """Get command stdout."""
217         result = ""
218         while proc.poll() is None:
219             line = proc.stdout.readline()
220             result += line
221         return result
222
223     @staticmethod
224     def excl_scenario():
225         """Exclude scenario."""
226         black_tests = []
227         try:
228             with open(RallyBase.BLACKLIST_FILE, 'r') as black_list_file:
229                 black_list_yaml = yaml.safe_load(black_list_file)
230
231             installer_type = CONST.__getattribute__('INSTALLER_TYPE')
232             deploy_scenario = CONST.__getattribute__('DEPLOY_SCENARIO')
233             if (bool(installer_type) and bool(deploy_scenario) and
234                     'scenario' in black_list_yaml.keys()):
235                 for item in black_list_yaml['scenario']:
236                     scenarios = item['scenarios']
237                     installers = item['installers']
238                     in_it = RallyBase.in_iterable_re
239                     if (in_it(deploy_scenario, scenarios) and
240                             in_it(installer_type, installers)):
241                         tests = item['tests']
242                         black_tests.extend(tests)
243         except Exception:
244             LOGGER.debug("Scenario exclusion not applied.")
245
246         return black_tests
247
248     @staticmethod
249     def in_iterable_re(needle, haystack):
250         """
251         Check if given needle is in the iterable haystack, using regex.
252
253         :param needle: string to be matched
254         :param haystack: iterable of strings (optionally regex patterns)
255         :return: True if needle is eqial to any of the elements in haystack,
256                  or if a nonempty regex pattern in haystack is found in needle.
257         """
258         # match without regex
259         if needle in haystack:
260             return True
261
262         for pattern in haystack:
263             # match if regex pattern is set and found in the needle
264             if pattern and re.search(pattern, needle) is not None:
265                 return True
266         else:
267             return False
268
269     def excl_func(self):
270         """Exclude functionalities."""
271         black_tests = []
272         func_list = []
273
274         try:
275             with open(RallyBase.BLACKLIST_FILE, 'r') as black_list_file:
276                 black_list_yaml = yaml.safe_load(black_list_file)
277
278             if not self._migration_supported():
279                 func_list.append("no_migration")
280
281             if 'functionality' in black_list_yaml.keys():
282                 for item in black_list_yaml['functionality']:
283                     functions = item['functions']
284                     for func in func_list:
285                         if func in functions:
286                             tests = item['tests']
287                             black_tests.extend(tests)
288         except Exception:  # pylint: disable=broad-except
289             LOGGER.debug("Functionality exclusion not applied.")
290
291         return black_tests
292
293     def _apply_blacklist(self, case_file_name, result_file_name):
294         """Apply blacklist."""
295         LOGGER.debug("Applying blacklist...")
296         cases_file = open(case_file_name, 'r')
297         result_file = open(result_file_name, 'w')
298
299         black_tests = list(set(self.excl_func() +
300                                self.excl_scenario()))
301
302         if black_tests:
303             LOGGER.debug("Blacklisted tests: " + str(black_tests))
304
305         include = True
306         for cases_line in cases_file:
307             if include:
308                 for black_tests_line in black_tests:
309                     if re.search(black_tests_line,
310                                  cases_line.strip().rstrip(':')):
311                         include = False
312                         break
313                 else:
314                     result_file.write(str(cases_line))
315             else:
316                 if cases_line.isspace():
317                     include = True
318
319         cases_file.close()
320         result_file.close()
321
322     @staticmethod
323     def file_is_empty(file_name):
324         """Determine is a file is empty."""
325         try:
326             if os.stat(file_name).st_size > 0:
327                 return False
328         except Exception:  # pylint: disable=broad-except
329             pass
330
331         return True
332
333     def _run_task(self, test_name):
334         """Run a task."""
335         LOGGER.info('Starting test scenario "%s" ...', test_name)
336
337         task_file = os.path.join(self.RALLY_DIR, 'task.yaml')
338         if not os.path.exists(task_file):
339             LOGGER.error("Task file '%s' does not exist.", task_file)
340             raise Exception("Task file '%s' does not exist.", task_file)
341
342         file_name = self._prepare_test_list(test_name)
343         if self.file_is_empty(file_name):
344             LOGGER.info('No tests for scenario "%s"', test_name)
345             return
346
347         cmd = (["rally", "task", "start", "--abort-on-sla-failure", "--task",
348                 task_file, "--task-args",
349                 str(self._build_task_args(test_name))])
350         LOGGER.debug('running command: %s', cmd)
351
352         proc = subprocess.Popen(cmd, stdout=subprocess.PIPE,
353                                 stderr=subprocess.STDOUT)
354         output = self._get_output(proc, test_name)
355         task_id = self.get_task_id(output)
356         LOGGER.debug('task_id : %s', task_id)
357
358         if task_id is None:
359             LOGGER.error('Failed to retrieve task_id, validating task...')
360             cmd = (["rally", "task", "validate", "--task", task_file,
361                     "--task-args", str(self._build_task_args(test_name))])
362             LOGGER.debug('running command: %s', cmd)
363             proc = subprocess.Popen(cmd, stdout=subprocess.PIPE,
364                                     stderr=subprocess.STDOUT)
365             output = self.get_cmd_output(proc)
366             LOGGER.error("Task validation result:" + "\n" + output)
367             return
368
369         # check for result directory and create it otherwise
370         if not os.path.exists(self.RESULTS_DIR):
371             LOGGER.debug('%s does not exist, we create it.',
372                          self.RESULTS_DIR)
373             os.makedirs(self.RESULTS_DIR)
374
375         # write html report file
376         report_html_name = 'opnfv-{}.html'.format(test_name)
377         report_html_dir = os.path.join(self.RESULTS_DIR, report_html_name)
378         cmd = (["rally", "task", "report", task_id, "--out", report_html_dir])
379
380         LOGGER.debug('running command: %s', cmd)
381         subprocess.Popen(cmd, stdout=subprocess.PIPE,
382                          stderr=subprocess.STDOUT)
383
384         # get and save rally operation JSON result
385         cmd = (["rally", "task", "results", task_id])
386         LOGGER.debug('running command: %s', cmd)
387         proc = subprocess.Popen(cmd, stdout=subprocess.PIPE,
388                                 stderr=subprocess.STDOUT)
389         json_results = self.get_cmd_output(proc)
390         report_json_name = 'opnfv-{}.json'.format(test_name)
391         report_json_dir = os.path.join(self.RESULTS_DIR, report_json_name)
392         with open(report_json_dir, 'w') as r_file:
393             LOGGER.debug('saving json file')
394             r_file.write(json_results)
395
396         # parse JSON operation result
397         if self.task_succeed(json_results):
398             LOGGER.info('Test scenario: "{}" OK.'.format(test_name) + "\n")
399         else:
400             LOGGER.info('Test scenario: "{}" Failed.'.format(test_name) + "\n")
401
402     def _get_output(self, proc, test_name):
403         result = ""
404         nb_tests = 0
405         overall_duration = 0.0
406         success = 0.0
407         nb_totals = 0
408
409         while proc.poll() is None:
410             line = proc.stdout.readline()
411             if ("Load duration" in line or
412                     "started" in line or
413                     "finished" in line or
414                     " Preparing" in line or
415                     "+-" in line or
416                     "|" in line):
417                 result += line
418             elif "test scenario" in line:
419                 result += "\n" + line
420             elif "Full duration" in line:
421                 result += line + "\n\n"
422
423             # parse output for summary report
424             if ("| " in line and
425                     "| action" not in line and
426                     "| Starting" not in line and
427                     "| Completed" not in line and
428                     "| ITER" not in line and
429                     "|   " not in line and
430                     "| total" not in line):
431                 nb_tests += 1
432             elif "| total" in line:
433                 percentage = ((line.split('|')[8]).strip(' ')).strip('%')
434                 try:
435                     success += float(percentage)
436                 except ValueError:
437                     LOGGER.info('Percentage error: %s, %s',
438                                 percentage, line)
439                 nb_totals += 1
440             elif "Full duration" in line:
441                 duration = line.split(': ')[1]
442                 try:
443                     overall_duration += float(duration)
444                 except ValueError:
445                     LOGGER.info('Duration error: %s, %s', duration, line)
446
447         overall_duration = "{:10.2f}".format(overall_duration)
448         if nb_totals == 0:
449             success_avg = 0
450         else:
451             success_avg = "{:0.2f}".format(success / nb_totals)
452
453         scenario_summary = {'test_name': test_name,
454                             'overall_duration': overall_duration,
455                             'nb_tests': nb_tests,
456                             'success': success_avg}
457         self.summary.append(scenario_summary)
458
459         LOGGER.debug("\n" + result)
460
461         return result
462
463     def _prepare_env(self):
464         LOGGER.debug('Validating the test name...')
465         if self.test_name not in self.TESTS:
466             raise Exception("Test name '%s' is invalid" % self.test_name)
467
468         network_name = self.RALLY_PRIVATE_NET_NAME + self.guid
469         subnet_name = self.RALLY_PRIVATE_SUBNET_NAME + self.guid
470         router_name = self.RALLY_ROUTER_NAME + self.guid
471         self.image_name = self.GLANCE_IMAGE_NAME + self.guid
472         self.flavor_name = self.FLAVOR_NAME + self.guid
473         self.flavor_alt_name = self.FLAVOR_ALT_NAME + self.guid
474         self.ext_net_name = snaps_utils.get_ext_net_name(self.os_creds)
475         self.compute_cnt = snaps_utils.get_active_compute_cnt(self.os_creds)
476
477         LOGGER.debug("Creating image '%s'...", self.image_name)
478         image_creator = deploy_utils.create_image(
479             self.os_creds, ImageConfig(
480                 name=self.image_name,
481                 image_file=self.GLANCE_IMAGE_PATH,
482                 img_format=self.GLANCE_IMAGE_FORMAT,
483                 image_user=self.GLANCE_IMAGE_USERNAME,
484                 public=True,
485                 extra_properties=self.GLANCE_IMAGE_EXTRA_PROPERTIES))
486         if image_creator is None:
487             raise Exception("Failed to create image")
488         self.creators.append(image_creator)
489
490         LOGGER.debug("Creating network '%s'...", network_name)
491         network_creator = deploy_utils.create_network(
492             self.os_creds, NetworkConfig(
493                 name=network_name,
494                 shared=True,
495                 subnet_settings=[SubnetConfig(
496                     name=subnet_name,
497                     cidr=self.RALLY_PRIVATE_SUBNET_CIDR)
498                 ]))
499         if network_creator is None:
500             raise Exception("Failed to create private network")
501         self.priv_net_id = network_creator.get_network().id
502         self.creators.append(network_creator)
503
504         LOGGER.debug("Creating router '%s'...", router_name)
505         router_creator = deploy_utils.create_router(
506             self.os_creds, RouterConfig(
507                 name=router_name,
508                 external_gateway=self.ext_net_name,
509                 internal_subnets=[subnet_name]))
510         if router_creator is None:
511             raise Exception("Failed to create router")
512         self.creators.append(router_creator)
513
514         LOGGER.debug("Creating flavor '%s'...", self.flavor_name)
515         flavor_creator = OpenStackFlavor(
516             self.os_creds, FlavorConfig(
517                 name=self.flavor_name, ram=512, disk=1, vcpus=1,
518                 metadata=self.FLAVOR_EXTRA_SPECS))
519         if flavor_creator is None or flavor_creator.create() is None:
520             raise Exception("Failed to create flavor")
521         self.creators.append(flavor_creator)
522
523         LOGGER.debug("Creating flavor '%s'...", self.flavor_alt_name)
524         flavor_alt_creator = OpenStackFlavor(
525             self.os_creds, FlavorConfig(
526                 name=self.flavor_alt_name, ram=1024, disk=1, vcpus=1,
527                 metadata=self.FLAVOR_EXTRA_SPECS))
528         if flavor_alt_creator is None or flavor_alt_creator.create() is None:
529             raise Exception("Failed to create flavor")
530         self.creators.append(flavor_alt_creator)
531
532     def _run_tests(self):
533         if self.test_name == 'all':
534             for test in self.TESTS:
535                 if test == 'all' or test == 'vm':
536                     continue
537                 self._run_task(test)
538         else:
539             self._run_task(self.test_name)
540
541     def _generate_report(self):
542         report = (
543             "\n"
544             "                                                              "
545             "\n"
546             "                     Rally Summary Report\n"
547             "\n"
548             "+===================+============+===============+===========+"
549             "\n"
550             "| Module            | Duration   | nb. Test Run  | Success   |"
551             "\n"
552             "+===================+============+===============+===========+"
553             "\n")
554         payload = []
555
556         # for each scenario we draw a row for the table
557         total_duration = 0.0
558         total_nb_tests = 0
559         total_success = 0.0
560         for item in self.summary:
561             name = "{0:<17}".format(item['test_name'])
562             duration = float(item['overall_duration'])
563             total_duration += duration
564             duration = time.strftime("%M:%S", time.gmtime(duration))
565             duration = "{0:<10}".format(duration)
566             nb_tests = "{0:<13}".format(item['nb_tests'])
567             total_nb_tests += int(item['nb_tests'])
568             success = "{0:<10}".format(str(item['success']) + '%')
569             total_success += float(item['success'])
570             report += ("" +
571                        "| " + name + " | " + duration + " | " +
572                        nb_tests + " | " + success + "|\n" +
573                        "+-------------------+------------"
574                        "+---------------+-----------+\n")
575             payload.append({'module': name,
576                             'details': {'duration': item['overall_duration'],
577                                         'nb tests': item['nb_tests'],
578                                         'success': item['success']}})
579
580         total_duration_str = time.strftime("%H:%M:%S",
581                                            time.gmtime(total_duration))
582         total_duration_str2 = "{0:<10}".format(total_duration_str)
583         total_nb_tests_str = "{0:<13}".format(total_nb_tests)
584
585         try:
586             self.result = total_success / len(self.summary)
587         except ZeroDivisionError:
588             self.result = 100
589
590         success_rate = "{:0.2f}".format(self.result)
591         success_rate_str = "{0:<10}".format(str(success_rate) + '%')
592         report += ("+===================+============"
593                    "+===============+===========+")
594         report += "\n"
595         report += ("| TOTAL:            | " + total_duration_str2 + " | " +
596                    total_nb_tests_str + " | " + success_rate_str + "|\n")
597         report += ("+===================+============"
598                    "+===============+===========+")
599         report += "\n"
600
601         LOGGER.info("\n" + report)
602         payload.append({'summary': {'duration': total_duration,
603                                     'nb tests': total_nb_tests,
604                                     'nb success': success_rate}})
605
606         self.details = payload
607
608         LOGGER.info("Rally '%s' success_rate is %s%%",
609                     self.case_name, success_rate)
610
611     def _clean_up(self):
612         for creator in reversed(self.creators):
613             try:
614                 creator.clean()
615             except Exception as e:
616                 LOGGER.error('Unexpected error cleaning - %s', e)
617
618     @energy.enable_recording
619     def run(self, **kwargs):
620         """Run testcase."""
621         self.start_time = time.time()
622         try:
623             conf_utils.create_rally_deployment()
624             self._prepare_env()
625             self._run_tests()
626             self._generate_report()
627             res = testcase.TestCase.EX_OK
628         except Exception as exc:   # pylint: disable=broad-except
629             LOGGER.error('Error with run: %s', exc)
630             res = testcase.TestCase.EX_RUN_ERROR
631         finally:
632             self._clean_up()
633
634         self.stop_time = time.time()
635         return res
636
637
638 class RallySanity(RallyBase):
639     """Rally sanity testcase implementation."""
640
641     def __init__(self, **kwargs):
642         """Initialize RallySanity object."""
643         if "case_name" not in kwargs:
644             kwargs["case_name"] = "rally_sanity"
645         super(RallySanity, self).__init__(**kwargs)
646         self.mode = 'sanity'
647         self.test_name = 'all'
648         self.smoke = True
649         self.scenario_dir = os.path.join(self.RALLY_SCENARIO_DIR, 'sanity')
650
651
652 class RallyFull(RallyBase):
653     """Rally full testcase implementation."""
654
655     def __init__(self, **kwargs):
656         """Initialize RallyFull object."""
657         if "case_name" not in kwargs:
658             kwargs["case_name"] = "rally_full"
659         super(RallyFull, self).__init__(**kwargs)
660         self.mode = 'full'
661         self.test_name = 'all'
662         self.smoke = False
663         self.scenario_dir = os.path.join(self.RALLY_SCENARIO_DIR, 'full')