Merge "Add openssh-client in core"
[functest-xtesting.git] / functest / opnfv_tests / openstack / rally / rally.py
1 #!/usr/bin/env python
2 #
3 # Copyright (c) 2015 All rights reserved
4 # This program and the accompanying materials
5 # are made available under the terms of the Apache License, Version 2.0
6 # which accompanies this distribution, and is available at
7 #
8 # http://www.apache.org/licenses/LICENSE-2.0
9 #
10
11 """Rally testcases implementation."""
12
13 from __future__ import division
14
15 import json
16 import logging
17 import os
18 import re
19 import subprocess
20 import time
21
22 import iniparse
23 import pkg_resources
24 import yaml
25
26 from functest.core import testcase
27 from functest.energy import energy
28 from functest.utils.constants import CONST
29 import functest.utils.openstack_utils as os_utils
30
31 LOGGER = logging.getLogger(__name__)
32
33
34 class RallyBase(testcase.OSGCTestCase):
35     """Base class form Rally testcases implementation."""
36
37     TESTS = ['authenticate', 'glance', 'cinder', 'heat', 'keystone',
38              'neutron', 'nova', 'quotas', 'vm', 'all']
39     GLANCE_IMAGE_NAME = CONST.__getattribute__('openstack_image_name')
40     GLANCE_IMAGE_FILENAME = CONST.__getattribute__('openstack_image_file_name')
41     GLANCE_IMAGE_PATH = os.path.join(
42         CONST.__getattribute__('dir_functest_images'),
43         GLANCE_IMAGE_FILENAME)
44     GLANCE_IMAGE_FORMAT = CONST.__getattribute__('openstack_image_disk_format')
45     FLAVOR_NAME = "m1.tiny"
46
47     RALLY_DIR = pkg_resources.resource_filename(
48         'functest', 'opnfv_tests/openstack/rally')
49     RALLY_SCENARIO_DIR = pkg_resources.resource_filename(
50         'functest', 'opnfv_tests/openstack/rally/scenario')
51     TEMPLATE_DIR = pkg_resources.resource_filename(
52         'functest', 'opnfv_tests/openstack/rally/scenario/templates')
53     SUPPORT_DIR = pkg_resources.resource_filename(
54         'functest', 'opnfv_tests/openstack/rally/scenario/support')
55     USERS_AMOUNT = 2
56     TENANTS_AMOUNT = 3
57     ITERATIONS_AMOUNT = 10
58     CONCURRENCY = 4
59     RESULTS_DIR = os.path.join(CONST.__getattribute__('dir_results'), 'rally')
60     TEMPEST_CONF_FILE = os.path.join(CONST.__getattribute__('dir_results'),
61                                      'tempest/tempest.conf')
62     BLACKLIST_FILE = os.path.join(RALLY_DIR, "blacklist.txt")
63     TEMP_DIR = os.path.join(RALLY_DIR, "var")
64
65     CINDER_VOLUME_TYPE_NAME = "volume_test"
66     RALLY_PRIVATE_NET_NAME = CONST.__getattribute__('rally_network_name')
67     RALLY_PRIVATE_SUBNET_NAME = CONST.__getattribute__('rally_subnet_name')
68     RALLY_PRIVATE_SUBNET_CIDR = CONST.__getattribute__('rally_subnet_cidr')
69     RALLY_ROUTER_NAME = CONST.__getattribute__('rally_router_name')
70
71     def __init__(self, **kwargs):
72         """Initialize RallyBase object."""
73         super(RallyBase, self).__init__(**kwargs)
74         self.mode = ''
75         self.summary = []
76         self.scenario_dir = ''
77         self.nova_client = os_utils.get_nova_client()
78         self.neutron_client = os_utils.get_neutron_client()
79         self.cinder_client = os_utils.get_cinder_client()
80         self.network_dict = {}
81         self.volume_type = None
82         self.smoke = None
83         self.test_name = None
84         self.image_exists = None
85         self.image_id = None
86         self.start_time = None
87         self.result = None
88         self.details = None
89
90     def _build_task_args(self, test_file_name):
91         task_args = {'service_list': [test_file_name]}
92         task_args['image_name'] = self.GLANCE_IMAGE_NAME
93         task_args['flavor_name'] = self.FLAVOR_NAME
94         task_args['glance_image_location'] = self.GLANCE_IMAGE_PATH
95         task_args['glance_image_format'] = self.GLANCE_IMAGE_FORMAT
96         task_args['tmpl_dir'] = self.TEMPLATE_DIR
97         task_args['sup_dir'] = self.SUPPORT_DIR
98         task_args['users_amount'] = self.USERS_AMOUNT
99         task_args['tenants_amount'] = self.TENANTS_AMOUNT
100         task_args['use_existing_users'] = False
101         task_args['iterations'] = self.ITERATIONS_AMOUNT
102         task_args['concurrency'] = self.CONCURRENCY
103         task_args['smoke'] = self.smoke
104
105         ext_net = os_utils.get_external_net(self.neutron_client)
106         if ext_net:
107             task_args['floating_network'] = str(ext_net)
108         else:
109             task_args['floating_network'] = ''
110
111         net_id = self.network_dict['net_id']
112         if net_id:
113             task_args['netid'] = str(net_id)
114         else:
115             task_args['netid'] = ''
116
117         return task_args
118
119     def _prepare_test_list(self, test_name):
120         test_yaml_file_name = 'opnfv-{}.yaml'.format(test_name)
121         scenario_file_name = os.path.join(self.RALLY_SCENARIO_DIR,
122                                           test_yaml_file_name)
123
124         if not os.path.exists(scenario_file_name):
125             scenario_file_name = os.path.join(self.scenario_dir,
126                                               test_yaml_file_name)
127
128             if not os.path.exists(scenario_file_name):
129                 raise Exception("The scenario '%s' does not exist."
130                                 % scenario_file_name)
131
132         LOGGER.debug('Scenario fetched from : %s', scenario_file_name)
133         test_file_name = os.path.join(self.TEMP_DIR, test_yaml_file_name)
134
135         if not os.path.exists(self.TEMP_DIR):
136             os.makedirs(self.TEMP_DIR)
137
138         self.apply_blacklist(scenario_file_name, test_file_name)
139         return test_file_name
140
141     @staticmethod
142     def get_task_id(cmd_raw):
143         """
144         Get task id from command rally result.
145
146         :param cmd_raw:
147         :return: task_id as string
148         """
149         taskid_re = re.compile('^Task +(.*): started$')
150         for line in cmd_raw.splitlines(True):
151             line = line.strip()
152             match = taskid_re.match(line)
153             if match:
154                 return match.group(1)
155         return None
156
157     @staticmethod
158     def task_succeed(json_raw):
159         """
160         Parse JSON from rally JSON results.
161
162         :param json_raw:
163         :return: Bool
164         """
165         rally_report = json.loads(json_raw)
166         for report in rally_report:
167             if report is None or report.get('result') is None:
168                 return False
169
170             for result in report.get('result'):
171                 if result is None or len(result.get('error')) > 0:
172                     return False
173
174         return True
175
176     @staticmethod
177     def live_migration_supported():
178         """Determine is live migration is supported."""
179         config = iniparse.ConfigParser()
180         if (config.read(RallyBase.TEMPEST_CONF_FILE) and
181                 config.has_section('compute-feature-enabled') and
182                 config.has_option('compute-feature-enabled',
183                                   'live_migration')):
184             return config.getboolean('compute-feature-enabled',
185                                      'live_migration')
186
187         return False
188
189     @staticmethod
190     def get_cmd_output(proc):
191         """Get command stdout."""
192         result = ""
193         while proc.poll() is None:
194             line = proc.stdout.readline()
195             result += line
196         return result
197
198     @staticmethod
199     def excl_scenario():
200         """Exclude scenario."""
201         black_tests = []
202         try:
203             with open(RallyBase.BLACKLIST_FILE, 'r') as black_list_file:
204                 black_list_yaml = yaml.safe_load(black_list_file)
205
206             installer_type = CONST.__getattribute__('INSTALLER_TYPE')
207             deploy_scenario = CONST.__getattribute__('DEPLOY_SCENARIO')
208             if (bool(installer_type) and bool(deploy_scenario) and
209                     'scenario' in black_list_yaml.keys()):
210                 for item in black_list_yaml['scenario']:
211                     scenarios = item['scenarios']
212                     installers = item['installers']
213                     in_it = RallyBase.in_iterable_re
214                     if (in_it(deploy_scenario, scenarios) and
215                             in_it(installer_type, installers)):
216                         tests = item['tests']
217                         black_tests.extend(tests)
218         except Exception:
219             LOGGER.debug("Scenario exclusion not applied.")
220
221         return black_tests
222
223     @staticmethod
224     def in_iterable_re(needle, haystack):
225         """
226         Check if given needle is in the iterable haystack, using regex.
227
228         :param needle: string to be matched
229         :param haystack: iterable of strings (optionally regex patterns)
230         :return: True if needle is eqial to any of the elements in haystack,
231                  or if a nonempty regex pattern in haystack is found in needle.
232         """
233         # match without regex
234         if needle in haystack:
235             return True
236
237         for pattern in haystack:
238             # match if regex pattern is set and found in the needle
239             if pattern and re.search(pattern, needle) is not None:
240                 return True
241         else:
242             return False
243
244     @staticmethod
245     def excl_func():
246         """Exclude functionalities."""
247         black_tests = []
248         func_list = []
249
250         try:
251             with open(RallyBase.BLACKLIST_FILE, 'r') as black_list_file:
252                 black_list_yaml = yaml.safe_load(black_list_file)
253
254             if not RallyBase.live_migration_supported():
255                 func_list.append("no_live_migration")
256
257             if 'functionality' in black_list_yaml.keys():
258                 for item in black_list_yaml['functionality']:
259                     functions = item['functions']
260                     for func in func_list:
261                         if func in functions:
262                             tests = item['tests']
263                             black_tests.extend(tests)
264         except Exception:  # pylint: disable=broad-except
265             LOGGER.debug("Functionality exclusion not applied.")
266
267         return black_tests
268
269     @staticmethod
270     def apply_blacklist(case_file_name, result_file_name):
271         """Apply blacklist."""
272         LOGGER.debug("Applying blacklist...")
273         cases_file = open(case_file_name, 'r')
274         result_file = open(result_file_name, 'w')
275
276         black_tests = list(set(RallyBase.excl_func() +
277                                RallyBase.excl_scenario()))
278
279         if black_tests:
280             LOGGER.debug("Blacklisted tests: " + str(black_tests))
281
282         include = True
283         for cases_line in cases_file:
284             if include:
285                 for black_tests_line in black_tests:
286                     if re.search(black_tests_line,
287                                  cases_line.strip().rstrip(':')):
288                         include = False
289                         break
290                 else:
291                     result_file.write(str(cases_line))
292             else:
293                 if cases_line.isspace():
294                     include = True
295
296         cases_file.close()
297         result_file.close()
298
299     @staticmethod
300     def file_is_empty(file_name):
301         """Determine is a file is empty."""
302         try:
303             if os.stat(file_name).st_size > 0:
304                 return False
305         except Exception:  # pylint: disable=broad-except
306             pass
307
308         return True
309
310     def _run_task(self, test_name):
311         """Run a task."""
312         LOGGER.info('Starting test scenario "%s" ...', test_name)
313
314         task_file = os.path.join(self.RALLY_DIR, 'task.yaml')
315         if not os.path.exists(task_file):
316             LOGGER.error("Task file '%s' does not exist.", task_file)
317             raise Exception("Task file '%s' does not exist.", task_file)
318
319         file_name = self._prepare_test_list(test_name)
320         if self.file_is_empty(file_name):
321             LOGGER.info('No tests for scenario "%s"', test_name)
322             return
323
324         cmd_line = ("rally task start --abort-on-sla-failure "
325                     "--task {0} "
326                     "--task-args \"{1}\""
327                     .format(task_file, self._build_task_args(test_name)))
328         LOGGER.debug('running command line: %s', cmd_line)
329
330         proc = subprocess.Popen(cmd_line, stdout=subprocess.PIPE,
331                                 stderr=subprocess.STDOUT, shell=True)
332         output = self._get_output(proc, test_name)
333         task_id = self.get_task_id(output)
334         LOGGER.debug('task_id : %s', task_id)
335
336         if task_id is None:
337             LOGGER.error('Failed to retrieve task_id, validating task...')
338             cmd_line = ("rally task validate "
339                         "--task {0} "
340                         "--task-args \"{1}\""
341                         .format(task_file, self._build_task_args(test_name)))
342             LOGGER.debug('running command line: %s', cmd_line)
343             proc = subprocess.Popen(cmd_line, stdout=subprocess.PIPE,
344                                     stderr=subprocess.STDOUT, shell=True)
345             output = self.get_cmd_output(proc)
346             LOGGER.error("Task validation result:" + "\n" + output)
347             return
348
349         # check for result directory and create it otherwise
350         if not os.path.exists(self.RESULTS_DIR):
351             LOGGER.debug('%s does not exist, we create it.',
352                          self.RESULTS_DIR)
353             os.makedirs(self.RESULTS_DIR)
354
355         # write html report file
356         report_html_name = 'opnfv-{}.html'.format(test_name)
357         report_html_dir = os.path.join(self.RESULTS_DIR, report_html_name)
358         cmd_line = "rally task report {} --out {}".format(task_id,
359                                                           report_html_dir)
360
361         LOGGER.debug('running command line: %s', cmd_line)
362         os.popen(cmd_line)
363
364         # get and save rally operation JSON result
365         cmd_line = "rally task results %s" % task_id
366         LOGGER.debug('running command line: %s', cmd_line)
367         cmd = os.popen(cmd_line)
368         json_results = cmd.read()
369         report_json_name = 'opnfv-{}.json'.format(test_name)
370         report_json_dir = os.path.join(self.RESULTS_DIR, report_json_name)
371         with open(report_json_dir, 'w') as r_file:
372             LOGGER.debug('saving json file')
373             r_file.write(json_results)
374
375         # parse JSON operation result
376         if self.task_succeed(json_results):
377             LOGGER.info('Test scenario: "{}" OK.'.format(test_name) + "\n")
378         else:
379             LOGGER.info('Test scenario: "{}" Failed.'.format(test_name) + "\n")
380
381     def _get_output(self, proc, test_name):
382         result = ""
383         nb_tests = 0
384         overall_duration = 0.0
385         success = 0.0
386         nb_totals = 0
387
388         while proc.poll() is None:
389             line = proc.stdout.readline()
390             if ("Load duration" in line or
391                     "started" in line or
392                     "finished" in line or
393                     " Preparing" in line or
394                     "+-" in line or
395                     "|" in line):
396                 result += line
397             elif "test scenario" in line:
398                 result += "\n" + line
399             elif "Full duration" in line:
400                 result += line + "\n\n"
401
402             # parse output for summary report
403             if ("| " in line and
404                     "| action" not in line and
405                     "| Starting" not in line and
406                     "| Completed" not in line and
407                     "| ITER" not in line and
408                     "|   " not in line and
409                     "| total" not in line):
410                 nb_tests += 1
411             elif "| total" in line:
412                 percentage = ((line.split('|')[8]).strip(' ')).strip('%')
413                 try:
414                     success += float(percentage)
415                 except ValueError:
416                     LOGGER.info('Percentage error: %s, %s',
417                                 percentage, line)
418                 nb_totals += 1
419             elif "Full duration" in line:
420                 duration = line.split(': ')[1]
421                 try:
422                     overall_duration += float(duration)
423                 except ValueError:
424                     LOGGER.info('Duration error: %s, %s', duration, line)
425
426         overall_duration = "{:10.2f}".format(overall_duration)
427         if nb_totals == 0:
428             success_avg = 0
429         else:
430             success_avg = "{:0.2f}".format(success / nb_totals)
431
432         scenario_summary = {'test_name': test_name,
433                             'overall_duration': overall_duration,
434                             'nb_tests': nb_tests,
435                             'success': success_avg}
436         self.summary.append(scenario_summary)
437
438         LOGGER.debug("\n" + result)
439
440         return result
441
442     def _prepare_env(self):
443         LOGGER.debug('Validating the test name...')
444         if self.test_name not in self.TESTS:
445             raise Exception("Test name '%s' is invalid" % self.test_name)
446
447         volume_types = os_utils.list_volume_types(self.cinder_client,
448                                                   private=False)
449         if volume_types:
450             LOGGER.debug("Using existing volume type(s)...")
451         else:
452             LOGGER.debug('Creating volume type...')
453             self.volume_type = os_utils.create_volume_type(
454                 self.cinder_client, self.CINDER_VOLUME_TYPE_NAME)
455             if self.volume_type is None:
456                 raise Exception("Failed to create volume type '%s'" %
457                                 self.CINDER_VOLUME_TYPE_NAME)
458             LOGGER.debug("Volume type '%s' is created succesfully.",
459                          self.CINDER_VOLUME_TYPE_NAME)
460
461         LOGGER.debug('Getting or creating image...')
462         self.image_exists, self.image_id = os_utils.get_or_create_image(
463             self.GLANCE_IMAGE_NAME,
464             self.GLANCE_IMAGE_PATH,
465             self.GLANCE_IMAGE_FORMAT)
466         if self.image_id is None:
467             raise Exception("Failed to get or create image '%s'" %
468                             self.GLANCE_IMAGE_NAME)
469
470         LOGGER.debug("Creating network '%s'...", self.RALLY_PRIVATE_NET_NAME)
471         self.network_dict = os_utils.create_shared_network_full(
472             self.RALLY_PRIVATE_NET_NAME,
473             self.RALLY_PRIVATE_SUBNET_NAME,
474             self.RALLY_ROUTER_NAME,
475             self.RALLY_PRIVATE_SUBNET_CIDR)
476         if self.network_dict is None:
477             raise Exception("Failed to create shared network '%s'" %
478                             self.RALLY_PRIVATE_NET_NAME)
479
480     def _run_tests(self):
481         if self.test_name == 'all':
482             for test in self.TESTS:
483                 if test == 'all' or test == 'vm':
484                     continue
485                 self._run_task(test)
486         else:
487             self._run_task(self.test_name)
488
489     def _generate_report(self):
490         report = (
491             "\n"
492             "                                                              "
493             "\n"
494             "                     Rally Summary Report\n"
495             "\n"
496             "+===================+============+===============+===========+"
497             "\n"
498             "| Module            | Duration   | nb. Test Run  | Success   |"
499             "\n"
500             "+===================+============+===============+===========+"
501             "\n")
502         payload = []
503
504         # for each scenario we draw a row for the table
505         total_duration = 0.0
506         total_nb_tests = 0
507         total_success = 0.0
508         for item in self.summary:
509             name = "{0:<17}".format(item['test_name'])
510             duration = float(item['overall_duration'])
511             total_duration += duration
512             duration = time.strftime("%M:%S", time.gmtime(duration))
513             duration = "{0:<10}".format(duration)
514             nb_tests = "{0:<13}".format(item['nb_tests'])
515             total_nb_tests += int(item['nb_tests'])
516             success = "{0:<10}".format(str(item['success']) + '%')
517             total_success += float(item['success'])
518             report += ("" +
519                        "| " + name + " | " + duration + " | " +
520                        nb_tests + " | " + success + "|\n" +
521                        "+-------------------+------------"
522                        "+---------------+-----------+\n")
523             payload.append({'module': name,
524                             'details': {'duration': item['overall_duration'],
525                                         'nb tests': item['nb_tests'],
526                                         'success': item['success']}})
527
528         total_duration_str = time.strftime("%H:%M:%S",
529                                            time.gmtime(total_duration))
530         total_duration_str2 = "{0:<10}".format(total_duration_str)
531         total_nb_tests_str = "{0:<13}".format(total_nb_tests)
532
533         try:
534             self.result = total_success / len(self.summary)
535         except ZeroDivisionError:
536             self.result = 100
537
538         success_rate = "{:0.2f}".format(self.result)
539         success_rate_str = "{0:<10}".format(str(success_rate) + '%')
540         report += ("+===================+============"
541                    "+===============+===========+")
542         report += "\n"
543         report += ("| TOTAL:            | " + total_duration_str2 + " | " +
544                    total_nb_tests_str + " | " + success_rate_str + "|\n")
545         report += ("+===================+============"
546                    "+===============+===========+")
547         report += "\n"
548
549         LOGGER.info("\n" + report)
550         payload.append({'summary': {'duration': total_duration,
551                                     'nb tests': total_nb_tests,
552                                     'nb success': success_rate}})
553
554         self.details = payload
555
556         LOGGER.info("Rally '%s' success_rate is %s%%",
557                     self.case_name, success_rate)
558
559     def _clean_up(self):
560         if self.volume_type:
561             LOGGER.debug("Deleting volume type '%s'...", self.volume_type)
562             os_utils.delete_volume_type(self.cinder_client, self.volume_type)
563
564         if not self.image_exists:
565             LOGGER.debug("Deleting image '%s' with ID '%s'...",
566                          self.GLANCE_IMAGE_NAME, self.image_id)
567             if not os_utils.delete_glance_image(self.nova_client,
568                                                 self.image_id):
569                 LOGGER.error("Error deleting the glance image")
570
571     @energy.enable_recording
572     def run(self, **kwargs):
573         """Run testcase."""
574         self.start_time = time.time()
575         try:
576             self._prepare_env()
577             self._run_tests()
578             self._generate_report()
579             self._clean_up()
580             res = testcase.TestCase.EX_OK
581         except Exception as exc:   # pylint: disable=broad-except
582             LOGGER.error('Error with run: %s', exc)
583             res = testcase.TestCase.EX_RUN_ERROR
584
585         self.stop_time = time.time()
586         return res
587
588
589 class RallySanity(RallyBase):
590     """Rally sanity testcase implementation."""
591
592     def __init__(self, **kwargs):
593         """Initialize RallySanity object."""
594         if "case_name" not in kwargs:
595             kwargs["case_name"] = "rally_sanity"
596         super(RallySanity, self).__init__(**kwargs)
597         self.mode = 'sanity'
598         self.test_name = 'all'
599         self.smoke = True
600         self.scenario_dir = os.path.join(self.RALLY_SCENARIO_DIR, 'sanity')
601
602
603 class RallyFull(RallyBase):
604     """Rally full testcase implementation."""
605
606     def __init__(self, **kwargs):
607         """Initialize RallyFull object."""
608         if "case_name" not in kwargs:
609             kwargs["case_name"] = "rally_full"
610         super(RallyFull, self).__init__(**kwargs)
611         self.mode = 'full'
612         self.test_name = 'all'
613         self.smoke = False
614         self.scenario_dir = os.path.join(self.RALLY_SCENARIO_DIR, 'full')