Merge "Add UEFI support in functest"
[functest-xtesting.git] / functest / opnfv_tests / openstack / rally / rally.py
1 #!/usr/bin/env python
2 #
3 # Copyright (c) 2015 All rights reserved
4 # This program and the accompanying materials
5 # are made available under the terms of the Apache License, Version 2.0
6 # which accompanies this distribution, and is available at
7 #
8 # http://www.apache.org/licenses/LICENSE-2.0
9 #
10
11 """Rally testcases implementation."""
12
13 from __future__ import division
14
15 import json
16 import logging
17 import os
18 import re
19 import subprocess
20 import time
21
22 import iniparse
23 import pkg_resources
24 import yaml
25
26 from functest.core import testcase
27 from functest.energy import energy
28 from functest.utils.constants import CONST
29 import functest.utils.openstack_utils as os_utils
30
31 LOGGER = logging.getLogger(__name__)
32
33
34 class RallyBase(testcase.OSGCTestCase):
35     """Base class form Rally testcases implementation."""
36
37     TESTS = ['authenticate', 'glance', 'ceilometer', 'cinder', 'heat',
38              'keystone', 'neutron', 'nova', 'quotas', 'vm', 'all']
39     GLANCE_IMAGE_NAME = CONST.__getattribute__('openstack_image_name')
40     GLANCE_IMAGE_FILENAME = CONST.__getattribute__('openstack_image_file_name')
41     GLANCE_IMAGE_PATH = os.path.join(
42         CONST.__getattribute__('dir_functest_images'),
43         GLANCE_IMAGE_FILENAME)
44     GLANCE_IMAGE_FORMAT = CONST.__getattribute__('openstack_image_disk_format')
45     GLANCE_IMAGE_EXTRA_PROPERTIES = {}
46     if hasattr(CONST, 'openstack_extra_properties'):
47         GLANCE_IMAGE_EXTRA_PROPERTIES = CONST.__getattribute__(
48             'openstack_extra_properties')
49     FLAVOR_NAME = "m1.tiny"
50
51     RALLY_DIR = pkg_resources.resource_filename(
52         'functest', 'opnfv_tests/openstack/rally')
53     RALLY_SCENARIO_DIR = pkg_resources.resource_filename(
54         'functest', 'opnfv_tests/openstack/rally/scenario')
55     TEMPLATE_DIR = pkg_resources.resource_filename(
56         'functest', 'opnfv_tests/openstack/rally/scenario/templates')
57     SUPPORT_DIR = pkg_resources.resource_filename(
58         'functest', 'opnfv_tests/openstack/rally/scenario/support')
59     USERS_AMOUNT = 2
60     TENANTS_AMOUNT = 3
61     ITERATIONS_AMOUNT = 10
62     CONCURRENCY = 4
63     RESULTS_DIR = os.path.join(CONST.__getattribute__('dir_results'), 'rally')
64     TEMPEST_CONF_FILE = os.path.join(CONST.__getattribute__('dir_results'),
65                                      'tempest/tempest.conf')
66     BLACKLIST_FILE = os.path.join(RALLY_DIR, "blacklist.txt")
67     TEMP_DIR = os.path.join(RALLY_DIR, "var")
68
69     CINDER_VOLUME_TYPE_NAME = "volume_test"
70     RALLY_PRIVATE_NET_NAME = CONST.__getattribute__('rally_network_name')
71     RALLY_PRIVATE_SUBNET_NAME = CONST.__getattribute__('rally_subnet_name')
72     RALLY_PRIVATE_SUBNET_CIDR = CONST.__getattribute__('rally_subnet_cidr')
73     RALLY_ROUTER_NAME = CONST.__getattribute__('rally_router_name')
74
75     def __init__(self, **kwargs):
76         """Initialize RallyBase object."""
77         super(RallyBase, self).__init__(**kwargs)
78         self.mode = ''
79         self.summary = []
80         self.scenario_dir = ''
81         self.nova_client = os_utils.get_nova_client()
82         self.neutron_client = os_utils.get_neutron_client()
83         self.cinder_client = os_utils.get_cinder_client()
84         self.network_dict = {}
85         self.volume_type = None
86         self.smoke = None
87         self.test_name = None
88         self.image_exists = None
89         self.image_id = None
90         self.start_time = None
91         self.result = None
92         self.details = None
93
94     def _build_task_args(self, test_file_name):
95         task_args = {'service_list': [test_file_name]}
96         task_args['image_name'] = self.GLANCE_IMAGE_NAME
97         task_args['flavor_name'] = self.FLAVOR_NAME
98         task_args['glance_image_location'] = self.GLANCE_IMAGE_PATH
99         task_args['glance_image_format'] = self.GLANCE_IMAGE_FORMAT
100         task_args['tmpl_dir'] = self.TEMPLATE_DIR
101         task_args['sup_dir'] = self.SUPPORT_DIR
102         task_args['users_amount'] = self.USERS_AMOUNT
103         task_args['tenants_amount'] = self.TENANTS_AMOUNT
104         task_args['use_existing_users'] = False
105         task_args['iterations'] = self.ITERATIONS_AMOUNT
106         task_args['concurrency'] = self.CONCURRENCY
107         task_args['smoke'] = self.smoke
108
109         ext_net = os_utils.get_external_net(self.neutron_client)
110         if ext_net:
111             task_args['floating_network'] = str(ext_net)
112         else:
113             task_args['floating_network'] = ''
114
115         net_id = self.network_dict['net_id']
116         if net_id:
117             task_args['netid'] = str(net_id)
118         else:
119             task_args['netid'] = ''
120
121         return task_args
122
123     def _prepare_test_list(self, test_name):
124         test_yaml_file_name = 'opnfv-{}.yaml'.format(test_name)
125         scenario_file_name = os.path.join(self.RALLY_SCENARIO_DIR,
126                                           test_yaml_file_name)
127
128         if not os.path.exists(scenario_file_name):
129             scenario_file_name = os.path.join(self.scenario_dir,
130                                               test_yaml_file_name)
131
132             if not os.path.exists(scenario_file_name):
133                 raise Exception("The scenario '%s' does not exist."
134                                 % scenario_file_name)
135
136         LOGGER.debug('Scenario fetched from : %s', scenario_file_name)
137         test_file_name = os.path.join(self.TEMP_DIR, test_yaml_file_name)
138
139         if not os.path.exists(self.TEMP_DIR):
140             os.makedirs(self.TEMP_DIR)
141
142         self.apply_blacklist(scenario_file_name, test_file_name)
143         return test_file_name
144
145     @staticmethod
146     def get_task_id(cmd_raw):
147         """
148         Get task id from command rally result.
149
150         :param cmd_raw:
151         :return: task_id as string
152         """
153         taskid_re = re.compile('^Task +(.*): started$')
154         for line in cmd_raw.splitlines(True):
155             line = line.strip()
156             match = taskid_re.match(line)
157             if match:
158                 return match.group(1)
159         return None
160
161     @staticmethod
162     def task_succeed(json_raw):
163         """
164         Parse JSON from rally JSON results.
165
166         :param json_raw:
167         :return: Bool
168         """
169         rally_report = json.loads(json_raw)
170         for report in rally_report:
171             if report is None or report.get('result') is None:
172                 return False
173
174             for result in report.get('result'):
175                 if result is None or len(result.get('error')) > 0:
176                     return False
177
178         return True
179
180     @staticmethod
181     def live_migration_supported():
182         """Determine is live migration is supported."""
183         config = iniparse.ConfigParser()
184         if (config.read(RallyBase.TEMPEST_CONF_FILE) and
185                 config.has_section('compute-feature-enabled') and
186                 config.has_option('compute-feature-enabled',
187                                   'live_migration')):
188             return config.getboolean('compute-feature-enabled',
189                                      'live_migration')
190
191         return False
192
193     @staticmethod
194     def get_cmd_output(proc):
195         """Get command stdout."""
196         result = ""
197         while proc.poll() is None:
198             line = proc.stdout.readline()
199             result += line
200         return result
201
202     @staticmethod
203     def excl_scenario():
204         """Exclude scenario."""
205         black_tests = []
206         try:
207             with open(RallyBase.BLACKLIST_FILE, 'r') as black_list_file:
208                 black_list_yaml = yaml.safe_load(black_list_file)
209
210             installer_type = CONST.__getattribute__('INSTALLER_TYPE')
211             deploy_scenario = CONST.__getattribute__('DEPLOY_SCENARIO')
212             if (bool(installer_type) and bool(deploy_scenario) and
213                     'scenario' in black_list_yaml.keys()):
214                 for item in black_list_yaml['scenario']:
215                     scenarios = item['scenarios']
216                     installers = item['installers']
217                     in_it = RallyBase.in_iterable_re
218                     if (in_it(deploy_scenario, scenarios) and
219                             in_it(installer_type, installers)):
220                         tests = item['tests']
221                         black_tests.extend(tests)
222         except Exception:
223             LOGGER.debug("Scenario exclusion not applied.")
224
225         return black_tests
226
227     @staticmethod
228     def in_iterable_re(needle, haystack):
229         """
230         Check if given needle is in the iterable haystack, using regex.
231
232         :param needle: string to be matched
233         :param haystack: iterable of strings (optionally regex patterns)
234         :return: True if needle is eqial to any of the elements in haystack,
235                  or if a nonempty regex pattern in haystack is found in needle.
236         """
237         # match without regex
238         if needle in haystack:
239             return True
240
241         for pattern in haystack:
242             # match if regex pattern is set and found in the needle
243             if pattern and re.search(pattern, needle) is not None:
244                 return True
245         else:
246             return False
247
248     @staticmethod
249     def excl_func():
250         """Exclude functionalities."""
251         black_tests = []
252         func_list = []
253
254         try:
255             with open(RallyBase.BLACKLIST_FILE, 'r') as black_list_file:
256                 black_list_yaml = yaml.safe_load(black_list_file)
257
258             if not RallyBase.live_migration_supported():
259                 func_list.append("no_live_migration")
260
261             if 'functionality' in black_list_yaml.keys():
262                 for item in black_list_yaml['functionality']:
263                     functions = item['functions']
264                     for func in func_list:
265                         if func in functions:
266                             tests = item['tests']
267                             black_tests.extend(tests)
268         except Exception:  # pylint: disable=broad-except
269             LOGGER.debug("Functionality exclusion not applied.")
270
271         return black_tests
272
273     @staticmethod
274     def apply_blacklist(case_file_name, result_file_name):
275         """Apply blacklist."""
276         LOGGER.debug("Applying blacklist...")
277         cases_file = open(case_file_name, 'r')
278         result_file = open(result_file_name, 'w')
279
280         black_tests = list(set(RallyBase.excl_func() +
281                                RallyBase.excl_scenario()))
282
283         if black_tests:
284             LOGGER.debug("Blacklisted tests: " + str(black_tests))
285
286         include = True
287         for cases_line in cases_file:
288             if include:
289                 for black_tests_line in black_tests:
290                     if re.search(black_tests_line,
291                                  cases_line.strip().rstrip(':')):
292                         include = False
293                         break
294                 else:
295                     result_file.write(str(cases_line))
296             else:
297                 if cases_line.isspace():
298                     include = True
299
300         cases_file.close()
301         result_file.close()
302
303     @staticmethod
304     def file_is_empty(file_name):
305         """Determine is a file is empty."""
306         try:
307             if os.stat(file_name).st_size > 0:
308                 return False
309         except Exception:  # pylint: disable=broad-except
310             pass
311
312         return True
313
314     def _run_task(self, test_name):
315         """Run a task."""
316         LOGGER.info('Starting test scenario "%s" ...', test_name)
317
318         task_file = os.path.join(self.RALLY_DIR, 'task.yaml')
319         if not os.path.exists(task_file):
320             LOGGER.error("Task file '%s' does not exist.", task_file)
321             raise Exception("Task file '%s' does not exist.", task_file)
322
323         file_name = self._prepare_test_list(test_name)
324         if self.file_is_empty(file_name):
325             LOGGER.info('No tests for scenario "%s"', test_name)
326             return
327
328         cmd_line = ("rally task start --abort-on-sla-failure "
329                     "--task {0} "
330                     "--task-args \"{1}\""
331                     .format(task_file, self._build_task_args(test_name)))
332         LOGGER.debug('running command line: %s', cmd_line)
333
334         proc = subprocess.Popen(cmd_line, stdout=subprocess.PIPE,
335                                 stderr=subprocess.STDOUT, shell=True)
336         output = self._get_output(proc, test_name)
337         task_id = self.get_task_id(output)
338         LOGGER.debug('task_id : %s', task_id)
339
340         if task_id is None:
341             LOGGER.error('Failed to retrieve task_id, validating task...')
342             cmd_line = ("rally task validate "
343                         "--task {0} "
344                         "--task-args \"{1}\""
345                         .format(task_file, self._build_task_args(test_name)))
346             LOGGER.debug('running command line: %s', cmd_line)
347             proc = subprocess.Popen(cmd_line, stdout=subprocess.PIPE,
348                                     stderr=subprocess.STDOUT, shell=True)
349             output = self.get_cmd_output(proc)
350             LOGGER.error("Task validation result:" + "\n" + output)
351             return
352
353         # check for result directory and create it otherwise
354         if not os.path.exists(self.RESULTS_DIR):
355             LOGGER.debug('%s does not exist, we create it.',
356                          self.RESULTS_DIR)
357             os.makedirs(self.RESULTS_DIR)
358
359         # write html report file
360         report_html_name = 'opnfv-{}.html'.format(test_name)
361         report_html_dir = os.path.join(self.RESULTS_DIR, report_html_name)
362         cmd_line = "rally task report {} --out {}".format(task_id,
363                                                           report_html_dir)
364
365         LOGGER.debug('running command line: %s', cmd_line)
366         os.popen(cmd_line)
367
368         # get and save rally operation JSON result
369         cmd_line = "rally task results %s" % task_id
370         LOGGER.debug('running command line: %s', cmd_line)
371         cmd = os.popen(cmd_line)
372         json_results = cmd.read()
373         report_json_name = 'opnfv-{}.json'.format(test_name)
374         report_json_dir = os.path.join(self.RESULTS_DIR, report_json_name)
375         with open(report_json_dir, 'w') as r_file:
376             LOGGER.debug('saving json file')
377             r_file.write(json_results)
378
379         # parse JSON operation result
380         if self.task_succeed(json_results):
381             LOGGER.info('Test scenario: "{}" OK.'.format(test_name) + "\n")
382         else:
383             LOGGER.info('Test scenario: "{}" Failed.'.format(test_name) + "\n")
384
385     def _get_output(self, proc, test_name):
386         result = ""
387         nb_tests = 0
388         overall_duration = 0.0
389         success = 0.0
390         nb_totals = 0
391
392         while proc.poll() is None:
393             line = proc.stdout.readline()
394             if ("Load duration" in line or
395                     "started" in line or
396                     "finished" in line or
397                     " Preparing" in line or
398                     "+-" in line or
399                     "|" in line):
400                 result += line
401             elif "test scenario" in line:
402                 result += "\n" + line
403             elif "Full duration" in line:
404                 result += line + "\n\n"
405
406             # parse output for summary report
407             if ("| " in line and
408                     "| action" not in line and
409                     "| Starting" not in line and
410                     "| Completed" not in line and
411                     "| ITER" not in line and
412                     "|   " not in line and
413                     "| total" not in line):
414                 nb_tests += 1
415             elif "| total" in line:
416                 percentage = ((line.split('|')[8]).strip(' ')).strip('%')
417                 try:
418                     success += float(percentage)
419                 except ValueError:
420                     LOGGER.info('Percentage error: %s, %s',
421                                 percentage, line)
422                 nb_totals += 1
423             elif "Full duration" in line:
424                 duration = line.split(': ')[1]
425                 try:
426                     overall_duration += float(duration)
427                 except ValueError:
428                     LOGGER.info('Duration error: %s, %s', duration, line)
429
430         overall_duration = "{:10.2f}".format(overall_duration)
431         if nb_totals == 0:
432             success_avg = 0
433         else:
434             success_avg = "{:0.2f}".format(success / nb_totals)
435
436         scenario_summary = {'test_name': test_name,
437                             'overall_duration': overall_duration,
438                             'nb_tests': nb_tests,
439                             'success': success_avg}
440         self.summary.append(scenario_summary)
441
442         LOGGER.debug("\n" + result)
443
444         return result
445
446     def _prepare_env(self):
447         LOGGER.debug('Validating the test name...')
448         if self.test_name not in self.TESTS:
449             raise Exception("Test name '%s' is invalid" % self.test_name)
450
451         volume_types = os_utils.list_volume_types(self.cinder_client,
452                                                   private=False)
453         if volume_types:
454             LOGGER.debug("Using existing volume type(s)...")
455         else:
456             LOGGER.debug('Creating volume type...')
457             self.volume_type = os_utils.create_volume_type(
458                 self.cinder_client, self.CINDER_VOLUME_TYPE_NAME)
459             if self.volume_type is None:
460                 raise Exception("Failed to create volume type '%s'" %
461                                 self.CINDER_VOLUME_TYPE_NAME)
462             LOGGER.debug("Volume type '%s' is created succesfully.",
463                          self.CINDER_VOLUME_TYPE_NAME)
464
465         LOGGER.debug('Getting or creating image...')
466         self.image_exists, self.image_id = os_utils.get_or_create_image(
467             self.GLANCE_IMAGE_NAME,
468             self.GLANCE_IMAGE_PATH,
469             self.GLANCE_IMAGE_FORMAT,
470             self.GLANCE_IMAGE_EXTRA_PROPERTIES)
471         if self.image_id is None:
472             raise Exception("Failed to get or create image '%s'" %
473                             self.GLANCE_IMAGE_NAME)
474
475         LOGGER.debug("Creating network '%s'...", self.RALLY_PRIVATE_NET_NAME)
476         self.network_dict = os_utils.create_shared_network_full(
477             self.RALLY_PRIVATE_NET_NAME,
478             self.RALLY_PRIVATE_SUBNET_NAME,
479             self.RALLY_ROUTER_NAME,
480             self.RALLY_PRIVATE_SUBNET_CIDR)
481         if self.network_dict is None:
482             raise Exception("Failed to create shared network '%s'" %
483                             self.RALLY_PRIVATE_NET_NAME)
484
485     def _run_tests(self):
486         if self.test_name == 'all':
487             for test in self.TESTS:
488                 if test == 'all' or test == 'vm':
489                     continue
490                 self._run_task(test)
491         else:
492             self._run_task(self.test_name)
493
494     def _generate_report(self):
495         report = (
496             "\n"
497             "                                                              "
498             "\n"
499             "                     Rally Summary Report\n"
500             "\n"
501             "+===================+============+===============+===========+"
502             "\n"
503             "| Module            | Duration   | nb. Test Run  | Success   |"
504             "\n"
505             "+===================+============+===============+===========+"
506             "\n")
507         payload = []
508
509         # for each scenario we draw a row for the table
510         total_duration = 0.0
511         total_nb_tests = 0
512         total_success = 0.0
513         for item in self.summary:
514             name = "{0:<17}".format(item['test_name'])
515             duration = float(item['overall_duration'])
516             total_duration += duration
517             duration = time.strftime("%M:%S", time.gmtime(duration))
518             duration = "{0:<10}".format(duration)
519             nb_tests = "{0:<13}".format(item['nb_tests'])
520             total_nb_tests += int(item['nb_tests'])
521             success = "{0:<10}".format(str(item['success']) + '%')
522             total_success += float(item['success'])
523             report += ("" +
524                        "| " + name + " | " + duration + " | " +
525                        nb_tests + " | " + success + "|\n" +
526                        "+-------------------+------------"
527                        "+---------------+-----------+\n")
528             payload.append({'module': name,
529                             'details': {'duration': item['overall_duration'],
530                                         'nb tests': item['nb_tests'],
531                                         'success': item['success']}})
532
533         total_duration_str = time.strftime("%H:%M:%S",
534                                            time.gmtime(total_duration))
535         total_duration_str2 = "{0:<10}".format(total_duration_str)
536         total_nb_tests_str = "{0:<13}".format(total_nb_tests)
537
538         try:
539             self.result = total_success / len(self.summary)
540         except ZeroDivisionError:
541             self.result = 100
542
543         success_rate = "{:0.2f}".format(self.result)
544         success_rate_str = "{0:<10}".format(str(success_rate) + '%')
545         report += ("+===================+============"
546                    "+===============+===========+")
547         report += "\n"
548         report += ("| TOTAL:            | " + total_duration_str2 + " | " +
549                    total_nb_tests_str + " | " + success_rate_str + "|\n")
550         report += ("+===================+============"
551                    "+===============+===========+")
552         report += "\n"
553
554         LOGGER.info("\n" + report)
555         payload.append({'summary': {'duration': total_duration,
556                                     'nb tests': total_nb_tests,
557                                     'nb success': success_rate}})
558
559         self.details = payload
560
561         LOGGER.info("Rally '%s' success_rate is %s%%",
562                     self.case_name, success_rate)
563
564     def _clean_up(self):
565         if self.volume_type:
566             LOGGER.debug("Deleting volume type '%s'...", self.volume_type)
567             os_utils.delete_volume_type(self.cinder_client, self.volume_type)
568
569         if not self.image_exists:
570             LOGGER.debug("Deleting image '%s' with ID '%s'...",
571                          self.GLANCE_IMAGE_NAME, self.image_id)
572             if not os_utils.delete_glance_image(self.nova_client,
573                                                 self.image_id):
574                 LOGGER.error("Error deleting the glance image")
575
576     @energy.enable_recording
577     def run(self, **kwargs):
578         """Run testcase."""
579         self.start_time = time.time()
580         try:
581             self._prepare_env()
582             self._run_tests()
583             self._generate_report()
584             self._clean_up()
585             res = testcase.TestCase.EX_OK
586         except Exception as exc:   # pylint: disable=broad-except
587             LOGGER.error('Error with run: %s', exc)
588             res = testcase.TestCase.EX_RUN_ERROR
589
590         self.stop_time = time.time()
591         return res
592
593
594 class RallySanity(RallyBase):
595     """Rally sanity testcase implementation."""
596
597     def __init__(self, **kwargs):
598         """Initialize RallySanity object."""
599         if "case_name" not in kwargs:
600             kwargs["case_name"] = "rally_sanity"
601         super(RallySanity, self).__init__(**kwargs)
602         self.mode = 'sanity'
603         self.test_name = 'all'
604         self.smoke = True
605         self.scenario_dir = os.path.join(self.RALLY_SCENARIO_DIR, 'sanity')
606
607
608 class RallyFull(RallyBase):
609     """Rally full testcase implementation."""
610
611     def __init__(self, **kwargs):
612         """Initialize RallyFull object."""
613         if "case_name" not in kwargs:
614             kwargs["case_name"] = "rally_full"
615         super(RallyFull, self).__init__(**kwargs)
616         self.mode = 'full'
617         self.test_name = 'all'
618         self.smoke = False
619         self.scenario_dir = os.path.join(self.RALLY_SCENARIO_DIR, 'full')