Fix image name in rally scenarios
[functest.git] / functest / opnfv_tests / openstack / rally / rally.py
1 #!/usr/bin/env python
2 #
3 # Copyright (c) 2015 All rights reserved
4 # This program and the accompanying materials
5 # are made available under the terms of the Apache License, Version 2.0
6 # which accompanies this distribution, and is available at
7 #
8 # http://www.apache.org/licenses/LICENSE-2.0
9 #
10
11 """Rally testcases implementation."""
12
13 from __future__ import division
14
15 import json
16 import logging
17 import os
18 import re
19 import subprocess
20 import time
21 import uuid
22
23 import iniparse
24 import pkg_resources
25 import yaml
26
27 from functest.core import testcase
28 from functest.energy import energy
29 from functest.opnfv_tests.openstack.snaps import snaps_utils
30 from functest.utils.constants import CONST
31
32 from snaps.openstack.create_image import ImageSettings
33 from snaps.openstack.create_network import NetworkSettings, SubnetSettings
34 from snaps.openstack.create_router import RouterSettings
35 from snaps.openstack.tests import openstack_tests
36 from snaps.openstack.utils import deploy_utils
37
38 LOGGER = logging.getLogger(__name__)
39
40
41 class RallyBase(testcase.TestCase):
42     """Base class form Rally testcases implementation."""
43
44     TESTS = ['authenticate', 'glance', 'ceilometer', 'cinder', 'heat',
45              'keystone', 'neutron', 'nova', 'quotas', 'vm', 'all']
46     GLANCE_IMAGE_NAME = CONST.__getattribute__('openstack_image_name')
47     GLANCE_IMAGE_FILENAME = CONST.__getattribute__('openstack_image_file_name')
48     GLANCE_IMAGE_PATH = os.path.join(
49         CONST.__getattribute__('dir_functest_images'),
50         GLANCE_IMAGE_FILENAME)
51     GLANCE_IMAGE_FORMAT = CONST.__getattribute__('openstack_image_disk_format')
52     GLANCE_IMAGE_USERNAME = CONST.__getattribute__('openstack_image_username')
53     GLANCE_IMAGE_EXTRA_PROPERTIES = {}
54     if hasattr(CONST, 'openstack_extra_properties'):
55         GLANCE_IMAGE_EXTRA_PROPERTIES = CONST.__getattribute__(
56             'openstack_extra_properties')
57     FLAVOR_NAME = "m1.tiny"
58
59     RALLY_DIR = pkg_resources.resource_filename(
60         'functest', 'opnfv_tests/openstack/rally')
61     RALLY_SCENARIO_DIR = pkg_resources.resource_filename(
62         'functest', 'opnfv_tests/openstack/rally/scenario')
63     TEMPLATE_DIR = pkg_resources.resource_filename(
64         'functest', 'opnfv_tests/openstack/rally/scenario/templates')
65     SUPPORT_DIR = pkg_resources.resource_filename(
66         'functest', 'opnfv_tests/openstack/rally/scenario/support')
67     USERS_AMOUNT = 2
68     TENANTS_AMOUNT = 3
69     ITERATIONS_AMOUNT = 10
70     CONCURRENCY = 4
71     RESULTS_DIR = os.path.join(CONST.__getattribute__('dir_results'), 'rally')
72     TEMPEST_CONF_FILE = os.path.join(CONST.__getattribute__('dir_results'),
73                                      'tempest/tempest.conf')
74     BLACKLIST_FILE = os.path.join(RALLY_DIR, "blacklist.txt")
75     TEMP_DIR = os.path.join(RALLY_DIR, "var")
76
77     RALLY_PRIVATE_NET_NAME = CONST.__getattribute__('rally_network_name')
78     RALLY_PRIVATE_SUBNET_NAME = CONST.__getattribute__('rally_subnet_name')
79     RALLY_PRIVATE_SUBNET_CIDR = CONST.__getattribute__('rally_subnet_cidr')
80     RALLY_ROUTER_NAME = CONST.__getattribute__('rally_router_name')
81
82     def __init__(self, **kwargs):
83         """Initialize RallyBase object."""
84         super(RallyBase, self).__init__(**kwargs)
85         if 'os_creds' in kwargs:
86             self.os_creds = kwargs['os_creds']
87         else:
88             creds_override = None
89             if hasattr(CONST, 'snaps_os_creds_override'):
90                 creds_override = CONST.__getattribute__(
91                     'snaps_os_creds_override')
92
93             self.os_creds = openstack_tests.get_credentials(
94                 os_env_file=CONST.__getattribute__('openstack_creds'),
95                 overrides=creds_override)
96
97         self.guid = ''
98         if CONST.__getattribute__('rally_unique_names'):
99             self.guid = '-' + str(uuid.uuid4())
100
101         self.creators = []
102         self.mode = ''
103         self.summary = []
104         self.scenario_dir = ''
105         self.image_name = None
106         self.ext_net_name = None
107         self.priv_net_id = None
108         self.smoke = None
109         self.test_name = None
110         self.start_time = None
111         self.result = None
112         self.details = None
113
114     def _build_task_args(self, test_file_name):
115         task_args = {'service_list': [test_file_name]}
116         task_args['image_name'] = self.image_name
117         task_args['flavor_name'] = self.FLAVOR_NAME
118         task_args['glance_image_location'] = self.GLANCE_IMAGE_PATH
119         task_args['glance_image_format'] = self.GLANCE_IMAGE_FORMAT
120         task_args['tmpl_dir'] = self.TEMPLATE_DIR
121         task_args['sup_dir'] = self.SUPPORT_DIR
122         task_args['users_amount'] = self.USERS_AMOUNT
123         task_args['tenants_amount'] = self.TENANTS_AMOUNT
124         task_args['use_existing_users'] = False
125         task_args['iterations'] = self.ITERATIONS_AMOUNT
126         task_args['concurrency'] = self.CONCURRENCY
127         task_args['smoke'] = self.smoke
128
129         ext_net = self.ext_net_name
130         if ext_net:
131             task_args['floating_network'] = str(ext_net)
132         else:
133             task_args['floating_network'] = ''
134
135         net_id = self.priv_net_id
136         if net_id:
137             task_args['netid'] = str(net_id)
138         else:
139             task_args['netid'] = ''
140
141         return task_args
142
143     def _prepare_test_list(self, test_name):
144         test_yaml_file_name = 'opnfv-{}.yaml'.format(test_name)
145         scenario_file_name = os.path.join(self.RALLY_SCENARIO_DIR,
146                                           test_yaml_file_name)
147
148         if not os.path.exists(scenario_file_name):
149             scenario_file_name = os.path.join(self.scenario_dir,
150                                               test_yaml_file_name)
151
152             if not os.path.exists(scenario_file_name):
153                 raise Exception("The scenario '%s' does not exist."
154                                 % scenario_file_name)
155
156         LOGGER.debug('Scenario fetched from : %s', scenario_file_name)
157         test_file_name = os.path.join(self.TEMP_DIR, test_yaml_file_name)
158
159         if not os.path.exists(self.TEMP_DIR):
160             os.makedirs(self.TEMP_DIR)
161
162         self.apply_blacklist(scenario_file_name, test_file_name)
163         return test_file_name
164
165     @staticmethod
166     def get_task_id(cmd_raw):
167         """
168         Get task id from command rally result.
169
170         :param cmd_raw:
171         :return: task_id as string
172         """
173         taskid_re = re.compile('^Task +(.*): started$')
174         for line in cmd_raw.splitlines(True):
175             line = line.strip()
176             match = taskid_re.match(line)
177             if match:
178                 return match.group(1)
179         return None
180
181     @staticmethod
182     def task_succeed(json_raw):
183         """
184         Parse JSON from rally JSON results.
185
186         :param json_raw:
187         :return: Bool
188         """
189         rally_report = json.loads(json_raw)
190         for report in rally_report:
191             if report is None or report.get('result') is None:
192                 return False
193
194             for result in report.get('result'):
195                 if result is None or len(result.get('error')) > 0:
196                     return False
197
198         return True
199
200     @staticmethod
201     def live_migration_supported():
202         """Determine if live migration is supported."""
203         config = iniparse.ConfigParser()
204         if (config.read(RallyBase.TEMPEST_CONF_FILE) and
205                 config.has_section('compute-feature-enabled') and
206                 config.has_option('compute-feature-enabled',
207                                   'live_migration')):
208             return config.getboolean('compute-feature-enabled',
209                                      'live_migration')
210
211         return False
212
213     @staticmethod
214     def get_cmd_output(proc):
215         """Get command stdout."""
216         result = ""
217         while proc.poll() is None:
218             line = proc.stdout.readline()
219             result += line
220         return result
221
222     @staticmethod
223     def excl_scenario():
224         """Exclude scenario."""
225         black_tests = []
226         try:
227             with open(RallyBase.BLACKLIST_FILE, 'r') as black_list_file:
228                 black_list_yaml = yaml.safe_load(black_list_file)
229
230             installer_type = CONST.__getattribute__('INSTALLER_TYPE')
231             deploy_scenario = CONST.__getattribute__('DEPLOY_SCENARIO')
232             if (bool(installer_type) and bool(deploy_scenario) and
233                     'scenario' in black_list_yaml.keys()):
234                 for item in black_list_yaml['scenario']:
235                     scenarios = item['scenarios']
236                     installers = item['installers']
237                     in_it = RallyBase.in_iterable_re
238                     if (in_it(deploy_scenario, scenarios) and
239                             in_it(installer_type, installers)):
240                         tests = item['tests']
241                         black_tests.extend(tests)
242         except Exception:
243             LOGGER.debug("Scenario exclusion not applied.")
244
245         return black_tests
246
247     @staticmethod
248     def in_iterable_re(needle, haystack):
249         """
250         Check if given needle is in the iterable haystack, using regex.
251
252         :param needle: string to be matched
253         :param haystack: iterable of strings (optionally regex patterns)
254         :return: True if needle is eqial to any of the elements in haystack,
255                  or if a nonempty regex pattern in haystack is found in needle.
256         """
257         # match without regex
258         if needle in haystack:
259             return True
260
261         for pattern in haystack:
262             # match if regex pattern is set and found in the needle
263             if pattern and re.search(pattern, needle) is not None:
264                 return True
265         else:
266             return False
267
268     @staticmethod
269     def excl_func():
270         """Exclude functionalities."""
271         black_tests = []
272         func_list = []
273
274         try:
275             with open(RallyBase.BLACKLIST_FILE, 'r') as black_list_file:
276                 black_list_yaml = yaml.safe_load(black_list_file)
277
278             if not RallyBase.live_migration_supported():
279                 func_list.append("no_live_migration")
280
281             if 'functionality' in black_list_yaml.keys():
282                 for item in black_list_yaml['functionality']:
283                     functions = item['functions']
284                     for func in func_list:
285                         if func in functions:
286                             tests = item['tests']
287                             black_tests.extend(tests)
288         except Exception:  # pylint: disable=broad-except
289             LOGGER.debug("Functionality exclusion not applied.")
290
291         return black_tests
292
293     @staticmethod
294     def apply_blacklist(case_file_name, result_file_name):
295         """Apply blacklist."""
296         LOGGER.debug("Applying blacklist...")
297         cases_file = open(case_file_name, 'r')
298         result_file = open(result_file_name, 'w')
299
300         black_tests = list(set(RallyBase.excl_func() +
301                                RallyBase.excl_scenario()))
302
303         if black_tests:
304             LOGGER.debug("Blacklisted tests: " + str(black_tests))
305
306         include = True
307         for cases_line in cases_file:
308             if include:
309                 for black_tests_line in black_tests:
310                     if re.search(black_tests_line,
311                                  cases_line.strip().rstrip(':')):
312                         include = False
313                         break
314                 else:
315                     result_file.write(str(cases_line))
316             else:
317                 if cases_line.isspace():
318                     include = True
319
320         cases_file.close()
321         result_file.close()
322
323     @staticmethod
324     def file_is_empty(file_name):
325         """Determine is a file is empty."""
326         try:
327             if os.stat(file_name).st_size > 0:
328                 return False
329         except Exception:  # pylint: disable=broad-except
330             pass
331
332         return True
333
334     def _run_task(self, test_name):
335         """Run a task."""
336         LOGGER.info('Starting test scenario "%s" ...', test_name)
337
338         task_file = os.path.join(self.RALLY_DIR, 'task.yaml')
339         if not os.path.exists(task_file):
340             LOGGER.error("Task file '%s' does not exist.", task_file)
341             raise Exception("Task file '%s' does not exist.", task_file)
342
343         file_name = self._prepare_test_list(test_name)
344         if self.file_is_empty(file_name):
345             LOGGER.info('No tests for scenario "%s"', test_name)
346             return
347
348         cmd_line = ("rally task start --abort-on-sla-failure "
349                     "--task {0} "
350                     "--task-args \"{1}\""
351                     .format(task_file, self._build_task_args(test_name)))
352         LOGGER.debug('running command line: %s', cmd_line)
353
354         proc = subprocess.Popen(cmd_line, stdout=subprocess.PIPE,
355                                 stderr=subprocess.STDOUT, shell=True)
356         output = self._get_output(proc, test_name)
357         task_id = self.get_task_id(output)
358         LOGGER.debug('task_id : %s', task_id)
359
360         if task_id is None:
361             LOGGER.error('Failed to retrieve task_id, validating task...')
362             cmd_line = ("rally task validate "
363                         "--task {0} "
364                         "--task-args \"{1}\""
365                         .format(task_file, self._build_task_args(test_name)))
366             LOGGER.debug('running command line: %s', cmd_line)
367             proc = subprocess.Popen(cmd_line, stdout=subprocess.PIPE,
368                                     stderr=subprocess.STDOUT, shell=True)
369             output = self.get_cmd_output(proc)
370             LOGGER.error("Task validation result:" + "\n" + output)
371             return
372
373         # check for result directory and create it otherwise
374         if not os.path.exists(self.RESULTS_DIR):
375             LOGGER.debug('%s does not exist, we create it.',
376                          self.RESULTS_DIR)
377             os.makedirs(self.RESULTS_DIR)
378
379         # write html report file
380         report_html_name = 'opnfv-{}.html'.format(test_name)
381         report_html_dir = os.path.join(self.RESULTS_DIR, report_html_name)
382         cmd_line = "rally task report {} --out {}".format(task_id,
383                                                           report_html_dir)
384
385         LOGGER.debug('running command line: %s', cmd_line)
386         os.popen(cmd_line)
387
388         # get and save rally operation JSON result
389         cmd_line = "rally task results %s" % task_id
390         LOGGER.debug('running command line: %s', cmd_line)
391         cmd = os.popen(cmd_line)
392         json_results = cmd.read()
393         report_json_name = 'opnfv-{}.json'.format(test_name)
394         report_json_dir = os.path.join(self.RESULTS_DIR, report_json_name)
395         with open(report_json_dir, 'w') as r_file:
396             LOGGER.debug('saving json file')
397             r_file.write(json_results)
398
399         # parse JSON operation result
400         if self.task_succeed(json_results):
401             LOGGER.info('Test scenario: "{}" OK.'.format(test_name) + "\n")
402         else:
403             LOGGER.info('Test scenario: "{}" Failed.'.format(test_name) + "\n")
404
405     def _get_output(self, proc, test_name):
406         result = ""
407         nb_tests = 0
408         overall_duration = 0.0
409         success = 0.0
410         nb_totals = 0
411
412         while proc.poll() is None:
413             line = proc.stdout.readline()
414             if ("Load duration" in line or
415                     "started" in line or
416                     "finished" in line or
417                     " Preparing" in line or
418                     "+-" in line or
419                     "|" in line):
420                 result += line
421             elif "test scenario" in line:
422                 result += "\n" + line
423             elif "Full duration" in line:
424                 result += line + "\n\n"
425
426             # parse output for summary report
427             if ("| " in line and
428                     "| action" not in line and
429                     "| Starting" not in line and
430                     "| Completed" not in line and
431                     "| ITER" not in line and
432                     "|   " not in line and
433                     "| total" not in line):
434                 nb_tests += 1
435             elif "| total" in line:
436                 percentage = ((line.split('|')[8]).strip(' ')).strip('%')
437                 try:
438                     success += float(percentage)
439                 except ValueError:
440                     LOGGER.info('Percentage error: %s, %s',
441                                 percentage, line)
442                 nb_totals += 1
443             elif "Full duration" in line:
444                 duration = line.split(': ')[1]
445                 try:
446                     overall_duration += float(duration)
447                 except ValueError:
448                     LOGGER.info('Duration error: %s, %s', duration, line)
449
450         overall_duration = "{:10.2f}".format(overall_duration)
451         if nb_totals == 0:
452             success_avg = 0
453         else:
454             success_avg = "{:0.2f}".format(success / nb_totals)
455
456         scenario_summary = {'test_name': test_name,
457                             'overall_duration': overall_duration,
458                             'nb_tests': nb_tests,
459                             'success': success_avg}
460         self.summary.append(scenario_summary)
461
462         LOGGER.debug("\n" + result)
463
464         return result
465
466     def _prepare_env(self):
467         LOGGER.debug('Validating the test name...')
468         if self.test_name not in self.TESTS:
469             raise Exception("Test name '%s' is invalid" % self.test_name)
470
471         network_name = self.RALLY_PRIVATE_NET_NAME + self.guid
472         subnet_name = self.RALLY_PRIVATE_SUBNET_NAME + self.guid
473         router_name = self.RALLY_ROUTER_NAME + self.guid
474         self.image_name = self.GLANCE_IMAGE_NAME + self.guid
475         self.ext_net_name = snaps_utils.get_ext_net_name(self.os_creds)
476
477         LOGGER.debug("Creating image '%s'...", self.image_name)
478         image_creator = deploy_utils.create_image(
479             self.os_creds, ImageSettings(
480                 name=self.image_name,
481                 image_file=self.GLANCE_IMAGE_PATH,
482                 img_format=self.GLANCE_IMAGE_FORMAT,
483                 image_user=self.GLANCE_IMAGE_USERNAME,
484                 public=True,
485                 extra_properties=self.GLANCE_IMAGE_EXTRA_PROPERTIES))
486         if image_creator is None:
487             raise Exception("Failed to create image")
488         self.creators.append(image_creator)
489
490         LOGGER.debug("Creating network '%s'...", network_name)
491         network_creator = deploy_utils.create_network(
492             self.os_creds, NetworkSettings(
493                 name=network_name,
494                 shared=True,
495                 subnet_settings=[SubnetSettings(
496                     name=subnet_name,
497                     cidr=self.RALLY_PRIVATE_SUBNET_CIDR)
498                 ]))
499         if network_creator is None:
500             raise Exception("Failed to create private network")
501         self.priv_net_id = network_creator.get_network().id
502         self.creators.append(network_creator)
503
504         LOGGER.debug("Creating router '%s'...", router_name)
505         router_creator = deploy_utils.create_router(
506             self.os_creds, RouterSettings(
507                 name=router_name,
508                 external_gateway=self.ext_net_name,
509                 internal_subnets=[subnet_name]))
510         if router_creator is None:
511             raise Exception("Failed to create router")
512         self.creators.append(router_creator)
513
514     def _run_tests(self):
515         if self.test_name == 'all':
516             for test in self.TESTS:
517                 if test == 'all' or test == 'vm':
518                     continue
519                 self._run_task(test)
520         else:
521             self._run_task(self.test_name)
522
523     def _generate_report(self):
524         report = (
525             "\n"
526             "                                                              "
527             "\n"
528             "                     Rally Summary Report\n"
529             "\n"
530             "+===================+============+===============+===========+"
531             "\n"
532             "| Module            | Duration   | nb. Test Run  | Success   |"
533             "\n"
534             "+===================+============+===============+===========+"
535             "\n")
536         payload = []
537
538         # for each scenario we draw a row for the table
539         total_duration = 0.0
540         total_nb_tests = 0
541         total_success = 0.0
542         for item in self.summary:
543             name = "{0:<17}".format(item['test_name'])
544             duration = float(item['overall_duration'])
545             total_duration += duration
546             duration = time.strftime("%M:%S", time.gmtime(duration))
547             duration = "{0:<10}".format(duration)
548             nb_tests = "{0:<13}".format(item['nb_tests'])
549             total_nb_tests += int(item['nb_tests'])
550             success = "{0:<10}".format(str(item['success']) + '%')
551             total_success += float(item['success'])
552             report += ("" +
553                        "| " + name + " | " + duration + " | " +
554                        nb_tests + " | " + success + "|\n" +
555                        "+-------------------+------------"
556                        "+---------------+-----------+\n")
557             payload.append({'module': name,
558                             'details': {'duration': item['overall_duration'],
559                                         'nb tests': item['nb_tests'],
560                                         'success': item['success']}})
561
562         total_duration_str = time.strftime("%H:%M:%S",
563                                            time.gmtime(total_duration))
564         total_duration_str2 = "{0:<10}".format(total_duration_str)
565         total_nb_tests_str = "{0:<13}".format(total_nb_tests)
566
567         try:
568             self.result = total_success / len(self.summary)
569         except ZeroDivisionError:
570             self.result = 100
571
572         success_rate = "{:0.2f}".format(self.result)
573         success_rate_str = "{0:<10}".format(str(success_rate) + '%')
574         report += ("+===================+============"
575                    "+===============+===========+")
576         report += "\n"
577         report += ("| TOTAL:            | " + total_duration_str2 + " | " +
578                    total_nb_tests_str + " | " + success_rate_str + "|\n")
579         report += ("+===================+============"
580                    "+===============+===========+")
581         report += "\n"
582
583         LOGGER.info("\n" + report)
584         payload.append({'summary': {'duration': total_duration,
585                                     'nb tests': total_nb_tests,
586                                     'nb success': success_rate}})
587
588         self.details = payload
589
590         LOGGER.info("Rally '%s' success_rate is %s%%",
591                     self.case_name, success_rate)
592
593     def _clean_up(self):
594         for creator in reversed(self.creators):
595             try:
596                 creator.clean()
597             except Exception as e:
598                 LOGGER.error('Unexpected error cleaning - %s', e)
599
600     @energy.enable_recording
601     def run(self, **kwargs):
602         """Run testcase."""
603         self.start_time = time.time()
604         try:
605             self._prepare_env()
606             self._run_tests()
607             self._generate_report()
608             res = testcase.TestCase.EX_OK
609         except Exception as exc:   # pylint: disable=broad-except
610             LOGGER.error('Error with run: %s', exc)
611             res = testcase.TestCase.EX_RUN_ERROR
612         finally:
613             self._clean_up()
614
615         self.stop_time = time.time()
616         return res
617
618
619 class RallySanity(RallyBase):
620     """Rally sanity testcase implementation."""
621
622     def __init__(self, **kwargs):
623         """Initialize RallySanity object."""
624         if "case_name" not in kwargs:
625             kwargs["case_name"] = "rally_sanity"
626         super(RallySanity, self).__init__(**kwargs)
627         self.mode = 'sanity'
628         self.test_name = 'all'
629         self.smoke = True
630         self.scenario_dir = os.path.join(self.RALLY_SCENARIO_DIR, 'sanity')
631
632
633 class RallyFull(RallyBase):
634     """Rally full testcase implementation."""
635
636     def __init__(self, **kwargs):
637         """Initialize RallyFull object."""
638         if "case_name" not in kwargs:
639             kwargs["case_name"] = "rally_full"
640         super(RallyFull, self).__init__(**kwargs)
641         self.mode = 'full'
642         self.test_name = 'all'
643         self.smoke = False
644         self.scenario_dir = os.path.join(self.RALLY_SCENARIO_DIR, 'full')